diff --git a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go index b80efd453e6..0e84223f9ed 100644 --- a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go +++ b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "context" "flag" "fmt" "net" @@ -102,7 +103,7 @@ func main() { namespace := metav1.NamespaceSystem envNamespace := os.Getenv("NAMESPACE") if envNamespace != "" { - if _, err := client.CoreV1().Namespaces().Get(envNamespace, metav1.GetOptions{}); err != nil { + if _, err := client.CoreV1().Namespaces().Get(context.TODO(), envNamespace, metav1.GetOptions{}); err != nil { klog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err) } namespace = envNamespace @@ -117,7 +118,7 @@ func main() { // Look for endpoints associated with the Elasticsearch logging service. // First wait for the service to become available. for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) { - elasticsearch, err = client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{}) + elasticsearch, err = client.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) if err == nil { break } @@ -134,7 +135,7 @@ func main() { // Wait for some endpoints. count, _ := strconv.Atoi(os.Getenv("MINIMUM_MASTER_NODES")) for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) { - endpoints, err = client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) + endpoints, err = client.CoreV1().Endpoints(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) if err != nil { continue } diff --git a/cmd/kube-apiserver/app/testing/testserver.go b/cmd/kube-apiserver/app/testing/testserver.go index ea6bde3b323..8b8dce31411 100644 --- a/cmd/kube-apiserver/app/testing/testserver.go +++ b/cmd/kube-apiserver/app/testing/testserver.go @@ -231,7 +231,7 @@ func StartTestServer(t Logger, instanceOptions *TestServerInstanceOptions, custo default: } - if _, err := client.CoreV1().Namespaces().Get("default", metav1.GetOptions{}); err != nil { + if _, err := client.CoreV1().Namespaces().Get(context.TODO(), "default", metav1.GetOptions{}); err != nil { if !errors.IsNotFound(err) { t.Logf("Unable to get default namespace: %v", err) } diff --git a/cmd/kubeadm/app/cmd/config.go b/cmd/kubeadm/app/cmd/config.go index ab502bb25df..59bc5f3dd60 100644 --- a/cmd/kubeadm/app/cmd/config.go +++ b/cmd/kubeadm/app/cmd/config.go @@ -18,6 +18,7 @@ package cmd import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -432,7 +433,7 @@ func NewCmdConfigUploadFromFlags(out io.Writer, kubeConfigFile *string) *cobra.C func RunConfigView(out io.Writer, client clientset.Interface) error { klog.V(1).Infoln("[config] getting the cluster configuration") - cfgConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(constants.KubeadmConfigConfigMap, metav1.GetOptions{}) + cfgConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), constants.KubeadmConfigConfigMap, metav1.GetOptions{}) if err != nil { return err } diff --git a/cmd/kubeadm/app/cmd/phases/join/kubelet.go b/cmd/kubeadm/app/cmd/phases/join/kubelet.go index a7e043a6849..cdb535a0a01 100644 --- a/cmd/kubeadm/app/cmd/phases/join/kubelet.go +++ b/cmd/kubeadm/app/cmd/phases/join/kubelet.go @@ -17,6 +17,7 @@ limitations under the License. package phases import ( + "context" "fmt" "os" @@ -142,7 +143,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) { // A new Node with the same name as an existing control-plane Node can cause undefined // behavior and ultimately control-plane failure. klog.V(1).Infof("[kubelet-start] Checking for an existing Node in the cluster with name %q and status %q", nodeName, v1.NodeReady) - node, err := bootstrapClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := bootstrapClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return errors.Wrapf(err, "cannot get Node %q", nodeName) } diff --git a/cmd/kubeadm/app/cmd/token.go b/cmd/kubeadm/app/cmd/token.go index 56abbf25ee2..265fe7e453d 100644 --- a/cmd/kubeadm/app/cmd/token.go +++ b/cmd/kubeadm/app/cmd/token.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "io" "os" @@ -380,7 +381,7 @@ func RunListTokens(out io.Writer, errW io.Writer, client clientset.Interface, pr } klog.V(1).Info("[token] retrieving list of bootstrap tokens") - secrets, err := client.CoreV1().Secrets(metav1.NamespaceSystem).List(listOptions) + secrets, err := client.CoreV1().Secrets(metav1.NamespaceSystem).List(context.TODO(), listOptions) if err != nil { return errors.Wrap(err, "failed to list bootstrap tokens") } @@ -430,7 +431,7 @@ func RunDeleteTokens(out io.Writer, client clientset.Interface, tokenIDsOrTokens tokenSecretName := bootstraputil.BootstrapTokenSecretName(tokenID) klog.V(1).Infof("[token] deleting token %q", tokenID) - if err := client.CoreV1().Secrets(metav1.NamespaceSystem).Delete(tokenSecretName, nil); err != nil { + if err := client.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), tokenSecretName, nil); err != nil { return errors.Wrapf(err, "failed to delete bootstrap token %q", tokenID) } fmt.Fprintf(out, "bootstrap token %q deleted\n", tokenID) diff --git a/cmd/kubeadm/app/discovery/file/file.go b/cmd/kubeadm/app/discovery/file/file.go index b450963cfd4..fac3e02c49b 100644 --- a/cmd/kubeadm/app/discovery/file/file.go +++ b/cmd/kubeadm/app/discovery/file/file.go @@ -17,6 +17,7 @@ limitations under the License. package file import ( + "context" "time" "github.com/pkg/errors" @@ -100,7 +101,7 @@ func ValidateConfigInfo(config *clientcmdapi.Config, clustername string, discove err = wait.Poll(constants.DiscoveryRetryInterval, discoveryTimeout, func() (bool, error) { var err error - clusterinfoCM, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) + clusterinfoCM, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { if apierrors.IsForbidden(err) { // If the request is unauthorized, the cluster admin has not granted access to the cluster info configmap for unauthenticated users diff --git a/cmd/kubeadm/app/discovery/token/token.go b/cmd/kubeadm/app/discovery/token/token.go index 3dfb35903d2..dc9ae6b4e02 100644 --- a/cmd/kubeadm/app/discovery/token/token.go +++ b/cmd/kubeadm/app/discovery/token/token.go @@ -210,7 +210,7 @@ func getClusterInfo(client clientset.Interface, kubeconfig *clientcmdapi.Config, defer cancel() wait.JitterUntil(func() { - cm, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) + cm, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { klog.V(1).Infof("[discovery] Failed to request cluster-info, will try again: %v", err) return diff --git a/cmd/kubeadm/app/phases/addons/dns/dns.go b/cmd/kubeadm/app/phases/addons/dns/dns.go index f4243334c1a..bffeda7fb04 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns.go @@ -17,6 +17,7 @@ limitations under the License. package dns import ( + "context" "encoding/json" "fmt" "net" @@ -58,7 +59,7 @@ const ( // DeployedDNSAddon returns the type of DNS addon currently deployed func DeployedDNSAddon(client clientset.Interface) (kubeadmapi.DNSAddOnType, string, error) { deploymentsClient := client.AppsV1().Deployments(metav1.NamespaceSystem) - deployments, err := deploymentsClient.List(metav1.ListOptions{LabelSelector: "k8s-app=kube-dns"}) + deployments, err := deploymentsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "k8s-app=kube-dns"}) if err != nil { return "", "", errors.Wrap(err, "couldn't retrieve DNS addon deployments") } @@ -84,7 +85,7 @@ func DeployedDNSAddon(client clientset.Interface) (kubeadmapi.DNSAddOnType, stri // deployedDNSReplicas returns the replica count for the current DNS deployment func deployedDNSReplicas(client clientset.Interface, replicas int32) (*int32, error) { deploymentsClient := client.AppsV1().Deployments(metav1.NamespaceSystem) - deployments, err := deploymentsClient.List(metav1.ListOptions{LabelSelector: "k8s-app=kube-dns"}) + deployments, err := deploymentsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "k8s-app=kube-dns"}) if err != nil { return &replicas, errors.Wrap(err, "couldn't retrieve DNS addon deployments") } @@ -209,7 +210,7 @@ func coreDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interfa } // Get the kube-dns ConfigMap for translation to equivalent CoreDNS Config. - kubeDNSConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeDNSConfigMap, metav1.GetOptions{}) + kubeDNSConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.KubeDNSConfigMap, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return err } @@ -346,7 +347,7 @@ func createDNSService(dnsService *v1.Service, serviceBytes []byte, client client } // Can't use a generic apiclient helper func here as we have to tolerate more than AlreadyExists. - if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(dnsService); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(context.TODO(), dnsService); err != nil { // Ignore if the Service is invalid with this error message: // Service "kube-dns" is invalid: spec.clusterIP: Invalid value: "10.96.0.10": provided IP is already allocated @@ -354,7 +355,7 @@ func createDNSService(dnsService *v1.Service, serviceBytes []byte, client client return errors.Wrap(err, "unable to create a new DNS service") } - if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Update(dnsService); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Update(context.TODO(), dnsService); err != nil { return errors.Wrap(err, "unable to create/update the DNS service") } } @@ -373,7 +374,7 @@ func migrateCoreDNSCorefile(client clientset.Interface, cm *v1.ConfigMap, corefi // Take a copy of the Corefile data as `Corefile-backup` and update the ConfigMap // Also point the CoreDNS deployment to the `Corefile-backup` data. - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(&v1.ConfigMap{ + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmconstants.CoreDNSConfigMap, Namespace: metav1.NamespaceSystem, @@ -395,7 +396,7 @@ func migrateCoreDNSCorefile(client clientset.Interface, cm *v1.ConfigMap, corefi return errors.Wrap(err, "unable to migrate CoreDNS ConfigMap") } - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(&v1.ConfigMap{ + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmconstants.CoreDNSConfigMap, Namespace: metav1.NamespaceSystem, @@ -424,7 +425,7 @@ func migrateCoreDNSCorefile(client clientset.Interface, cm *v1.ConfigMap, corefi // GetCoreDNSInfo gets the current CoreDNS installed and the current Corefile Configuration of CoreDNS. func GetCoreDNSInfo(client clientset.Interface) (*v1.ConfigMap, string, string, error) { - coreDNSConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNSConfigMap, metav1.GetOptions{}) + coreDNSConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.CoreDNSConfigMap, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return nil, "", "", err } @@ -445,13 +446,13 @@ func GetCoreDNSInfo(client clientset.Interface) (*v1.ConfigMap, string, string, } func patchCoreDNSDeployment(client clientset.Interface, coreDNSCorefileName string) error { - dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNSDeploymentName, metav1.GetOptions{}) + dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.CoreDNSDeploymentName, metav1.GetOptions{}) if err != nil { return err } patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"volumes":[{"name": "config-volume", "configMap":{"name": "coredns", "items":[{"key": "%s", "path": "%s"}]}}]}}}}`, coreDNSCorefileName, coreDNSCorefileName) - if _, err := client.AppsV1().Deployments(dnsDeployment.ObjectMeta.Namespace).Patch(dnsDeployment.Name, types.StrategicMergePatchType, []byte(patch)); err != nil { + if _, err := client.AppsV1().Deployments(dnsDeployment.ObjectMeta.Namespace).Patch(context.TODO(), dnsDeployment.Name, types.StrategicMergePatchType, []byte(patch)); err != nil { return errors.Wrap(err, "unable to patch the CoreDNS deployment") } return nil diff --git a/cmd/kubeadm/app/phases/addons/dns/dns_test.go b/cmd/kubeadm/app/phases/addons/dns/dns_test.go index 7530ebfba1b..096f38b4f46 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns_test.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns_test.go @@ -17,6 +17,7 @@ limitations under the License. package dns import ( + "context" "strings" "testing" @@ -722,7 +723,7 @@ func TestCreateCoreDNSConfigMap(t *testing.T) { if err != nil { t.Fatalf("error creating the CoreDNS ConfigMap: %v", err) } - migratedConfigMap, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNSConfigMap, metav1.GetOptions{}) + migratedConfigMap, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.CoreDNSConfigMap, metav1.GetOptions{}) if !strings.EqualFold(migratedConfigMap.Data["Corefile"], tc.expectedCorefileData) { t.Fatalf("expected to get %v, but got %v", tc.expectedCorefileData, migratedConfigMap.Data["Corefile"]) } @@ -732,7 +733,7 @@ func TestCreateCoreDNSConfigMap(t *testing.T) { func createClientAndCoreDNSManifest(t *testing.T, corefile, coreDNSVersion string) *clientsetfake.Clientset { client := clientsetfake.NewSimpleClientset() - _, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&v1.ConfigMap{ + _, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmconstants.CoreDNSConfigMap, Namespace: metav1.NamespaceSystem, @@ -744,7 +745,7 @@ func createClientAndCoreDNSManifest(t *testing.T, corefile, coreDNSVersion strin if err != nil { t.Fatalf("error creating ConfigMap: %v", err) } - _, err = client.AppsV1().Deployments(metav1.NamespaceSystem).Create(&apps.Deployment{ + _, err = client.AppsV1().Deployments(metav1.NamespaceSystem).Create(context.TODO(), &apps.Deployment{ TypeMeta: metav1.TypeMeta{ Kind: "Deployment", APIVersion: "apps/v1", diff --git a/cmd/kubeadm/app/phases/bootstraptoken/node/token.go b/cmd/kubeadm/app/phases/bootstraptoken/node/token.go index 27d4cf3f79b..9a2dce93bd2 100644 --- a/cmd/kubeadm/app/phases/bootstraptoken/node/token.go +++ b/cmd/kubeadm/app/phases/bootstraptoken/node/token.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,7 +40,7 @@ func UpdateOrCreateTokens(client clientset.Interface, failIfExists bool, tokens for _, token := range tokens { secretName := bootstraputil.BootstrapTokenSecretName(token.Token.ID) - secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{}) + secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), secretName, metav1.GetOptions{}) if secret != nil && err == nil && failIfExists { return errors.Errorf("a token with id %q already exists", token.Token.ID) } diff --git a/cmd/kubeadm/app/phases/certs/renewal/apirenewer.go b/cmd/kubeadm/app/phases/certs/renewal/apirenewer.go index d095d0e0170..87f12475488 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/apirenewer.go +++ b/cmd/kubeadm/app/phases/certs/renewal/apirenewer.go @@ -91,7 +91,7 @@ func (r *APIRenewer) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Sign }, } - req, err := r.client.CertificateSigningRequests().Create(k8sCSR) + req, err := r.client.CertificateSigningRequests().Create(context.TODO(), k8sCSR) if err != nil { return nil, nil, errors.Wrap(err, "couldn't create certificate signing request") } diff --git a/cmd/kubeadm/app/phases/copycerts/copycerts.go b/cmd/kubeadm/app/phases/copycerts/copycerts.go index d15033a48de..5ae88fdb458 100644 --- a/cmd/kubeadm/app/phases/copycerts/copycerts.go +++ b/cmd/kubeadm/app/phases/copycerts/copycerts.go @@ -17,6 +17,7 @@ limitations under the License. package copycerts import ( + "context" "encoding/hex" "fmt" "io/ioutil" @@ -159,7 +160,7 @@ func createRBAC(client clientset.Interface) error { func getSecretOwnerRef(client clientset.Interface, tokenID string) ([]metav1.OwnerReference, error) { secretName := bootstraputil.BootstrapTokenSecretName(tokenID) - secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{}) + secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { return nil, errors.Wrap(err, "error to get token reference") } @@ -259,7 +260,7 @@ func writeCertOrKey(certOrKeyPath string, certOrKeyData []byte) error { } func getSecret(client clientset.Interface) (*v1.Secret, error) { - secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(kubeadmconstants.KubeadmCertsSecret, metav1.GetOptions{}) + secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.KubeadmCertsSecret, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return nil, errors.Errorf("Secret %q was not found in the %q Namespace. This Secret might have expired. Please, run `kubeadm init phase upload-certs --upload-certs` on a control plane to generate a new one", kubeadmconstants.KubeadmCertsSecret, metav1.NamespaceSystem) diff --git a/cmd/kubeadm/app/phases/copycerts/copycerts_test.go b/cmd/kubeadm/app/phases/copycerts/copycerts_test.go index c0322be4591..be2032aab6a 100644 --- a/cmd/kubeadm/app/phases/copycerts/copycerts_test.go +++ b/cmd/kubeadm/app/phases/copycerts/copycerts_test.go @@ -17,6 +17,7 @@ limitations under the License. package copycerts import ( + "context" "encoding/hex" "io/ioutil" "os" @@ -180,7 +181,7 @@ func TestUploadCerts(t *testing.T) { if err != nil { t.Fatalf("error decoding key: %v", err) } - secretMap, err := cs.CoreV1().Secrets(metav1.NamespaceSystem).Get(kubeadmconstants.KubeadmCertsSecret, metav1.GetOptions{}) + secretMap, err := cs.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.KubeadmCertsSecret, metav1.GetOptions{}) if err != nil { t.Fatalf("could not fetch secret: %v", err) } diff --git a/cmd/kubeadm/app/phases/upgrade/health.go b/cmd/kubeadm/app/phases/upgrade/health.go index 5674b0e5bb9..f48294b4109 100644 --- a/cmd/kubeadm/app/phases/upgrade/health.go +++ b/cmd/kubeadm/app/phases/upgrade/health.go @@ -17,6 +17,7 @@ limitations under the License. package upgrade import ( + "context" "fmt" "os" "time" @@ -142,7 +143,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) } // Check if the Job already exists and delete it - if _, err := client.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}); err == nil { + if _, err := client.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}); err == nil { if err = deleteHealthCheckJob(client, ns, jobName); err != nil { return err } @@ -156,7 +157,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) // Create the Job, but retry in case it is being currently deleted klog.V(2).Infof("Creating Job %q in the namespace %q", jobName, ns) err := wait.PollImmediate(time.Second*1, timeout, func() (bool, error) { - if _, err := client.BatchV1().Jobs(ns).Create(job); err != nil { + if _, err := client.BatchV1().Jobs(ns).Create(context.TODO(), job); err != nil { klog.V(2).Infof("Could not create Job %q in the namespace %q, retrying: %v", jobName, ns, err) lastError = err return false, nil @@ -172,7 +173,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) // Wait for the Job to complete err = wait.PollImmediate(time.Second*1, timeout, func() (bool, error) { - job, err := client.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) + job, err := client.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) if err != nil { lastError = err klog.V(2).Infof("could not get Job %q in the namespace %q, retrying: %v", jobName, ns, err) @@ -202,7 +203,7 @@ func deleteHealthCheckJob(client clientset.Interface, ns, jobName string) error deleteOptions := &metav1.DeleteOptions{ PropagationPolicy: &propagation, } - if err := client.BatchV1().Jobs(ns).Delete(jobName, deleteOptions); err != nil { + if err := client.BatchV1().Jobs(ns).Delete(context.TODO(), jobName, deleteOptions); err != nil { return errors.Wrapf(err, "could not delete Job %q in the namespace %q", jobName, ns) } return nil @@ -213,7 +214,7 @@ func controlPlaneNodesReady(client clientset.Interface, _ *kubeadmapi.ClusterCon selector := labels.SelectorFromSet(labels.Set(map[string]string{ constants.LabelNodeRoleMaster: "", })) - controlPlanes, err := client.CoreV1().Nodes().List(metav1.ListOptions{ + controlPlanes, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ LabelSelector: selector.String(), }) if err != nil { @@ -262,7 +263,7 @@ func getNotReadyDaemonSets(client clientset.Interface) ([]error, error) { notReadyDaemonSets := []error{} for _, component := range constants.ControlPlaneComponents { dsName := constants.AddSelfHostedPrefix(component) - ds, err := client.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{}) + ds, err := client.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(context.TODO(), dsName, metav1.GetOptions{}) if err != nil { return nil, errors.Errorf("couldn't get daemonset %q in the %s namespace", dsName, metav1.NamespaceSystem) } diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index 707ae38de57..17f24cef5ff 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -17,6 +17,7 @@ limitations under the License. package upgrade import ( + "context" "os" "github.com/pkg/errors" @@ -120,7 +121,7 @@ func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.ClusterConfigurati // If we're dry-running, we don't need to wait for the new DNS addon to become ready if !dryRun { - dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(installedDeploymentName, metav1.GetOptions{}) + dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(context.TODO(), installedDeploymentName, metav1.GetOptions{}) if err != nil { return err } diff --git a/cmd/kubeadm/app/phases/upgrade/versiongetter.go b/cmd/kubeadm/app/phases/upgrade/versiongetter.go index e70adc6b0f4..cd38f05ce88 100644 --- a/cmd/kubeadm/app/phases/upgrade/versiongetter.go +++ b/cmd/kubeadm/app/phases/upgrade/versiongetter.go @@ -17,6 +17,7 @@ limitations under the License. package upgrade import ( + "context" "fmt" "github.com/pkg/errors" @@ -94,7 +95,7 @@ func (g *KubeVersionGetter) VersionFromCILabel(ciVersionLabel, description strin // KubeletVersions gets the versions of the kubelets in the cluster func (g *KubeVersionGetter) KubeletVersions() (map[string]uint16, error) { - nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, errors.New("couldn't list all nodes in cluster") } diff --git a/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go b/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go index 98e0e1c7230..81f11482599 100644 --- a/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go +++ b/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go @@ -17,6 +17,7 @@ limitations under the License. package uploadconfig import ( + "context" "reflect" "testing" @@ -120,7 +121,7 @@ func TestUploadConfiguration(t *testing.T) { } } if tt.verifyResult { - controlPlaneCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeadmConfigConfigMap, metav1.GetOptions{}) + controlPlaneCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.KubeadmConfigConfigMap, metav1.GetOptions{}) if err != nil { t2.Fatalf("Fail to query ConfigMap error = %v", err) } diff --git a/cmd/kubeadm/app/util/apiclient/idempotency.go b/cmd/kubeadm/app/util/apiclient/idempotency.go index 1729611367c..cbf61c8d0e8 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency.go @@ -17,6 +17,7 @@ limitations under the License. package apiclient import ( + "context" "encoding/json" "fmt" "time" @@ -43,12 +44,12 @@ type ConfigMapMutator func(*v1.ConfigMap) error // CreateOrUpdateConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateConfigMap(client clientset.Interface, cm *v1.ConfigMap) error { - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(cm); err != nil { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create ConfigMap") } - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(cm); err != nil { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), cm); err != nil { return errors.Wrap(err, "unable to update ConfigMap") } } @@ -67,7 +68,7 @@ func CreateOrMutateConfigMap(client clientset.Interface, cm *v1.ConfigMap, mutat Factor: 1.0, Jitter: 0.1, }, func() (bool, error) { - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(cm); err != nil { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil { lastError = err if apierrors.IsAlreadyExists(err) { lastError = MutateConfigMap(client, metav1.ObjectMeta{Namespace: cm.ObjectMeta.Namespace, Name: cm.ObjectMeta.Name}, mutator) @@ -94,25 +95,25 @@ func MutateConfigMap(client clientset.Interface, meta metav1.ObjectMeta, mutator Factor: 1.0, Jitter: 0.1, }, func() error { - configMap, err := client.CoreV1().ConfigMaps(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + configMap, err := client.CoreV1().ConfigMaps(meta.Namespace).Get(context.TODO(), meta.Name, metav1.GetOptions{}) if err != nil { return err } if err = mutator(configMap); err != nil { return errors.Wrap(err, "unable to mutate ConfigMap") } - _, err = client.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Update(configMap) + _, err = client.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Update(context.TODO(), configMap) return err }) } // CreateOrRetainConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will retain the resource instead. func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, configMapName string) error { - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Get(configMapName, metav1.GetOptions{}); err != nil { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}); err != nil { if !apierrors.IsNotFound(err) { return nil } - if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(cm); err != nil { + if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create ConfigMap") } @@ -123,12 +124,12 @@ func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, confi // CreateOrUpdateSecret creates a Secret if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error { - if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(secret); err != nil { + if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(context.TODO(), secret); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create secret") } - if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(secret); err != nil { + if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(context.TODO(), secret); err != nil { return errors.Wrap(err, "unable to update secret") } } @@ -137,7 +138,7 @@ func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error { // CreateOrUpdateServiceAccount creates a ServiceAccount if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAccount) error { - if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(sa); err != nil { + if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(context.TODO(), sa); err != nil { // Note: We don't run .Update here afterwards as that's probably not required // Only thing that could be updated is annotations/labels in .metadata, but we don't use that currently if !apierrors.IsAlreadyExists(err) { @@ -149,12 +150,12 @@ func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAcco // CreateOrUpdateDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deployment) error { - if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(deploy); err != nil { + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create deployment") } - if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Update(deploy); err != nil { + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Update(context.TODO(), deploy); err != nil { return errors.Wrap(err, "unable to update deployment") } } @@ -163,11 +164,11 @@ func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deploymen // CreateOrRetainDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will retain the resource instead. func CreateOrRetainDeployment(client clientset.Interface, deploy *apps.Deployment, deployName string) error { - if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Get(deployName, metav1.GetOptions{}); err != nil { + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Get(context.TODO(), deployName, metav1.GetOptions{}); err != nil { if !apierrors.IsNotFound(err) { return nil } - if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(deploy); err != nil { + if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create deployment") } @@ -178,12 +179,12 @@ func CreateOrRetainDeployment(client clientset.Interface, deploy *apps.Deploymen // CreateOrUpdateDaemonSet creates a DaemonSet if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) error { - if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Create(ds); err != nil { + if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Create(context.TODO(), ds); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create daemonset") } - if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Update(ds); err != nil { + if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Update(context.TODO(), ds); err != nil { return errors.Wrap(err, "unable to update daemonset") } } @@ -196,7 +197,7 @@ func DeleteDaemonSetForeground(client clientset.Interface, namespace, name strin deleteOptions := &metav1.DeleteOptions{ PropagationPolicy: &foregroundDelete, } - return client.AppsV1().DaemonSets(namespace).Delete(name, deleteOptions) + return client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, deleteOptions) } // DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted @@ -205,17 +206,17 @@ func DeleteDeploymentForeground(client clientset.Interface, namespace, name stri deleteOptions := &metav1.DeleteOptions{ PropagationPolicy: &foregroundDelete, } - return client.AppsV1().Deployments(namespace).Delete(name, deleteOptions) + return client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, deleteOptions) } // CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error { - if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(role); err != nil { + if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(context.TODO(), role); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create RBAC role") } - if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(role); err != nil { + if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(context.TODO(), role); err != nil { return errors.Wrap(err, "unable to update RBAC role") } } @@ -224,12 +225,12 @@ func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error { // CreateOrUpdateRoleBinding creates a RoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.RoleBinding) error { - if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(roleBinding); err != nil { + if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(context.TODO(), roleBinding); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create RBAC rolebinding") } - if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(roleBinding); err != nil { + if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(context.TODO(), roleBinding); err != nil { return errors.Wrap(err, "unable to update RBAC rolebinding") } } @@ -238,12 +239,12 @@ func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.Rol // CreateOrUpdateClusterRole creates a ClusterRole if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.ClusterRole) error { - if _, err := client.RbacV1().ClusterRoles().Create(clusterRole); err != nil { + if _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create RBAC clusterrole") } - if _, err := client.RbacV1().ClusterRoles().Update(clusterRole); err != nil { + if _, err := client.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole); err != nil { return errors.Wrap(err, "unable to update RBAC clusterrole") } } @@ -252,12 +253,12 @@ func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.Clu // CreateOrUpdateClusterRoleBinding creates a ClusterRoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error { - if _, err := client.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding); err != nil { + if _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding); err != nil { if !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "unable to create RBAC clusterrolebinding") } - if _, err := client.RbacV1().ClusterRoleBindings().Update(clusterRoleBinding); err != nil { + if _, err := client.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterRoleBinding); err != nil { return errors.Wrap(err, "unable to update RBAC clusterrolebinding") } } @@ -271,7 +272,7 @@ func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBin func PatchNodeOnce(client clientset.Interface, nodeName string, patchFn func(*v1.Node)) func() (bool, error) { return func() (bool, error) { // First get the node object - n, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + n, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { // TODO this should only be for timeouts return false, nil @@ -301,7 +302,7 @@ func PatchNodeOnce(client clientset.Interface, nodeName string, patchFn func(*v1 return false, errors.Wrap(err, "failed to create two way merge patch") } - if _, err := client.CoreV1().Nodes().Patch(n.Name, types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := client.CoreV1().Nodes().Patch(context.TODO(), n.Name, types.StrategicMergePatchType, patchBytes); err != nil { // TODO also check for timeouts if apierrors.IsConflict(err) { fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)") @@ -332,7 +333,7 @@ func GetConfigMapWithRetry(client clientset.Interface, namespace, name string) ( var lastError error err := wait.ExponentialBackoff(clientsetretry.DefaultBackoff, func() (bool, error) { var err error - cm, err = client.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + cm, err = client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err == nil { return true, nil } diff --git a/cmd/kubeadm/app/util/apiclient/idempotency_test.go b/cmd/kubeadm/app/util/apiclient/idempotency_test.go index 8b2d421a7b1..d91555bba28 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency_test.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency_test.go @@ -17,6 +17,7 @@ limitations under the License. package apiclient import ( + "context" "testing" "github.com/pkg/errors" @@ -68,7 +69,7 @@ func TestPatchNodeNonErrorCases(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { client := fake.NewSimpleClientset() - _, err := client.CoreV1().Nodes().Create(&tc.node) + _, err := client.CoreV1().Nodes().Create(context.TODO(), &tc.node) if err != nil { t.Fatalf("failed to create node to fake client: %v", err) } @@ -105,7 +106,7 @@ func TestCreateOrMutateConfigMap(t *testing.T) { if err != nil { t.Fatalf("error creating ConfigMap: %v", err) } - _, err = client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{}) + _, err = client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), configMapName, metav1.GetOptions{}) if err != nil { t.Fatalf("error retrieving ConfigMap: %v", err) } @@ -113,7 +114,7 @@ func TestCreateOrMutateConfigMap(t *testing.T) { func createClientAndConfigMap(t *testing.T) *fake.Clientset { client := fake.NewSimpleClientset() - _, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&v1.ConfigMap{ + _, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: configMapName, Namespace: metav1.NamespaceSystem, @@ -142,7 +143,7 @@ func TestMutateConfigMap(t *testing.T) { t.Fatalf("error mutating regular ConfigMap: %v", err) } - cm, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{}) + cm, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), configMapName, metav1.GetOptions{}) if cm.Data["key"] != "some-other-value" { t.Fatalf("ConfigMap mutation was invalid, has: %q", cm.Data["key"]) } @@ -174,7 +175,7 @@ func TestMutateConfigMapWithConflict(t *testing.T) { t.Fatalf("error mutating conflicting ConfigMap: %v", err) } - cm, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{}) + cm, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), configMapName, metav1.GetOptions{}) if cm.Data["key"] != "some-other-value" { t.Fatalf("ConfigMap mutation with conflict was invalid, has: %q", cm.Data["key"]) } diff --git a/cmd/kubeadm/app/util/apiclient/wait.go b/cmd/kubeadm/app/util/apiclient/wait.go index 4d0ca735c05..295b47e6a94 100644 --- a/cmd/kubeadm/app/util/apiclient/wait.go +++ b/cmd/kubeadm/app/util/apiclient/wait.go @@ -95,7 +95,7 @@ func (w *KubeWaiter) WaitForPodsWithLabel(kvLabel string) error { lastKnownPodNumber := -1 return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) { listOpts := metav1.ListOptions{LabelSelector: kvLabel} - pods, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts) + pods, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts) if err != nil { fmt.Fprintf(w.writer, "[apiclient] Error getting Pods with label selector %q [%v]\n", kvLabel, err) return false, nil @@ -123,7 +123,7 @@ func (w *KubeWaiter) WaitForPodsWithLabel(kvLabel string) error { // WaitForPodToDisappear blocks until it timeouts or gets a "NotFound" response from the API Server when getting the Static Pod in question func (w *KubeWaiter) WaitForPodToDisappear(podName string) error { return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) { - _, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).Get(podName, metav1.GetOptions{}) + _, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), podName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { fmt.Printf("[apiclient] The old Pod %q is now removed (which is desired)\n", podName) return true, nil @@ -241,7 +241,7 @@ func (w *KubeWaiter) WaitForStaticPodHashChange(nodeName, component, previousHas func getStaticPodSingleHash(client clientset.Interface, nodeName string, component string) (string, error) { staticPodName := fmt.Sprintf("%s-%s", component, nodeName) - staticPod, err := client.CoreV1().Pods(metav1.NamespaceSystem).Get(staticPodName, metav1.GetOptions{}) + staticPod, err := client.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), staticPodName, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/cmd/kubeadm/app/util/config/cluster.go b/cmd/kubeadm/app/util/config/cluster.go index 3d6533c6712..48f8a221f8c 100644 --- a/cmd/kubeadm/app/util/config/cluster.go +++ b/cmd/kubeadm/app/util/config/cluster.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "context" "crypto/x509" "fmt" "io" @@ -115,7 +116,7 @@ func getNodeRegistration(kubeconfigDir string, client clientset.Interface, nodeR } // gets the corresponding node and retrieves attributes stored there. - node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return errors.Wrap(err, "failed to get corresponding node") } diff --git a/cmd/kubeadm/app/util/config/cluster_test.go b/cmd/kubeadm/app/util/config/cluster_test.go index 6ac57b6b371..0c4b6aa1556 100644 --- a/cmd/kubeadm/app/util/config/cluster_test.go +++ b/cmd/kubeadm/app/util/config/cluster_test.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "context" "io/ioutil" "os" "path/filepath" @@ -342,7 +343,7 @@ func TestGetNodeRegistration(t *testing.T) { client := clientsetfake.NewSimpleClientset() if rt.node != nil { - _, err := client.CoreV1().Nodes().Create(rt.node) + _, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node) if err != nil { t.Errorf("couldn't create Node") return @@ -618,7 +619,7 @@ func TestGetInitConfigurationFromCluster(t *testing.T) { client := clientsetfake.NewSimpleClientset() if rt.node != nil { - _, err := client.CoreV1().Nodes().Create(rt.node) + _, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node) if err != nil { t.Errorf("couldn't create Node") return diff --git a/pkg/client/tests/fake_client_test.go b/pkg/client/tests/fake_client_test.go index 66dea296051..0ae5d87bc14 100644 --- a/pkg/client/tests/fake_client_test.go +++ b/pkg/client/tests/fake_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package tests import ( + "context" "testing" corev1 "k8s.io/api/core/v1" @@ -37,7 +38,7 @@ func TestFakeClientSetFiltering(t *testing.T) { testSA("nsB", "sa-3"), ) - saList1, err := tc.CoreV1().ServiceAccounts("nsA").List(metav1.ListOptions{}) + saList1, err := tc.CoreV1().ServiceAccounts("nsA").List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("ServiceAccounts.List: %s", err) } @@ -50,7 +51,7 @@ func TestFakeClientSetFiltering(t *testing.T) { } } - saList2, err := tc.CoreV1().ServiceAccounts("nsB").List(metav1.ListOptions{}) + saList2, err := tc.CoreV1().ServiceAccounts("nsB").List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("ServiceAccounts.List: %s", err) } @@ -63,7 +64,7 @@ func TestFakeClientSetFiltering(t *testing.T) { } } - pod1, err := tc.CoreV1().Pods("nsA").Get("pod-1", metav1.GetOptions{}) + pod1, err := tc.CoreV1().Pods("nsA").Get(context.TODO(), "pod-1", metav1.GetOptions{}) if err != nil { t.Fatalf("Pods.Get: %s", err) } @@ -74,12 +75,12 @@ func TestFakeClientSetFiltering(t *testing.T) { t.Fatalf("Expected to find pod nsA/pod-1t, got %s/%s", pod1.Namespace, pod1.Name) } - wrongPod, err := tc.CoreV1().Pods("nsB").Get("pod-1", metav1.GetOptions{}) + wrongPod, err := tc.CoreV1().Pods("nsB").Get(context.TODO(), "pod-1", metav1.GetOptions{}) if err == nil { t.Fatalf("Pods.Get: expected nsB/pod-1 not to match, but it matched %s/%s", wrongPod.Namespace, wrongPod.Name) } - allPods, err := tc.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) + allPods, err := tc.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Pods.List: %s", err) } @@ -87,7 +88,7 @@ func TestFakeClientSetFiltering(t *testing.T) { t.Fatalf("Expected %d pods to match, got %d", expected, actual) } - allSAs, err := tc.CoreV1().ServiceAccounts(metav1.NamespaceAll).List(metav1.ListOptions{}) + allSAs, err := tc.CoreV1().ServiceAccounts(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("ServiceAccounts.List: %s", err) } @@ -102,12 +103,12 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) { testPod("nsA", "pod-1"), ) - _, err := tc.CoreV1().Namespaces().Create(testNamespace("nsB")) + _, err := tc.CoreV1().Namespaces().Create(context.TODO(), testNamespace("nsB")) if err != nil { t.Fatalf("Namespaces.Create: %s", err) } - allNS, err := tc.CoreV1().Namespaces().List(metav1.ListOptions{}) + allNS, err := tc.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Namespaces.List: %s", err) } @@ -115,12 +116,12 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) { t.Fatalf("Expected %d namespaces to match, got %d", expected, actual) } - _, err = tc.CoreV1().Pods("nsB").Create(testPod("", "pod-1")) + _, err = tc.CoreV1().Pods("nsB").Create(context.TODO(), testPod("", "pod-1")) if err != nil { t.Fatalf("Pods.Create nsB/pod-1: %s", err) } - podB1, err := tc.CoreV1().Pods("nsB").Get("pod-1", metav1.GetOptions{}) + podB1, err := tc.CoreV1().Pods("nsB").Get(context.TODO(), "pod-1", metav1.GetOptions{}) if err != nil { t.Fatalf("Pods.Get nsB/pod-1: %s", err) } @@ -131,17 +132,17 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) { t.Fatalf("Expected to find pod nsB/pod-1t, got %s/%s", podB1.Namespace, podB1.Name) } - _, err = tc.CoreV1().Pods("nsA").Create(testPod("", "pod-1")) + _, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("", "pod-1")) if err == nil { t.Fatalf("Expected Pods.Create to fail with already exists error") } - _, err = tc.CoreV1().Pods("nsA").Update(testPod("", "pod-1")) + _, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-1")) if err != nil { t.Fatalf("Pods.Update nsA/pod-1: %s", err) } - _, err = tc.CoreV1().Pods("nsA").Create(testPod("nsB", "pod-2")) + _, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("nsB", "pod-2")) if err == nil { t.Fatalf("Expected Pods.Create to fail with bad request from namespace mismtach") } @@ -149,7 +150,7 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) { t.Fatalf("Expected Pods.Create error to provide object and request namespaces, got %q", err) } - _, err = tc.CoreV1().Pods("nsA").Update(testPod("", "pod-3")) + _, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-3")) if err == nil { t.Fatalf("Expected Pods.Update nsA/pod-3 to fail with not found error") } diff --git a/pkg/controller/bootstrap/bootstrapsigner.go b/pkg/controller/bootstrap/bootstrapsigner.go index 38e1313faf7..27c0d4692d2 100644 --- a/pkg/controller/bootstrap/bootstrapsigner.go +++ b/pkg/controller/bootstrap/bootstrapsigner.go @@ -17,6 +17,7 @@ limitations under the License. package bootstrap import ( + "context" "strings" "time" @@ -242,7 +243,7 @@ func (e *Signer) signConfigMap() { } func (e *Signer) updateConfigMap(cm *v1.ConfigMap) { - _, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(cm) + _, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(context.TODO(), cm) if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) { klog.V(3).Infof("Error updating ConfigMap: %v", err) } diff --git a/pkg/controller/bootstrap/tokencleaner.go b/pkg/controller/bootstrap/tokencleaner.go index b89f48348f7..659a4e243ee 100644 --- a/pkg/controller/bootstrap/tokencleaner.go +++ b/pkg/controller/bootstrap/tokencleaner.go @@ -17,6 +17,7 @@ limitations under the License. package bootstrap import ( + "context" "fmt" "time" @@ -195,7 +196,7 @@ func (tc *TokenCleaner) evalSecret(o interface{}) { if len(secret.UID) > 0 { options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}} } - err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(secret.Name, options) + err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, options) // NotFound isn't a real error (it's already been deleted) // Conflict isn't a real error (the UID precondition failed) if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) { diff --git a/pkg/controller/certificates/approver/sarapprove.go b/pkg/controller/certificates/approver/sarapprove.go index 4c1d1dc2448..e159920ab38 100644 --- a/pkg/controller/certificates/approver/sarapprove.go +++ b/pkg/controller/certificates/approver/sarapprove.go @@ -18,6 +18,7 @@ limitations under the License. package approver import ( + "context" "crypto/x509" "fmt" "reflect" @@ -129,7 +130,7 @@ func (a *sarApprover) authorize(csr *capi.CertificateSigningRequest, rattrs auth ResourceAttributes: &rattrs, }, } - sar, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(sar) + sar, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar) if err != nil { return false, err } diff --git a/pkg/controller/certificates/cleaner/cleaner.go b/pkg/controller/certificates/cleaner/cleaner.go index ebdac2956d5..fd4d19e6a55 100644 --- a/pkg/controller/certificates/cleaner/cleaner.go +++ b/pkg/controller/certificates/cleaner/cleaner.go @@ -21,6 +21,7 @@ limitations under the License. package cleaner import ( + "context" "crypto/x509" "encoding/pem" "fmt" @@ -108,7 +109,7 @@ func (ccc *CSRCleanerController) handle(csr *capi.CertificateSigningRequest) err return err } if isIssuedPastDeadline(csr) || isDeniedPastDeadline(csr) || isPendingPastDeadline(csr) || isIssuedExpired { - if err := ccc.csrClient.Delete(csr.Name, nil); err != nil { + if err := ccc.csrClient.Delete(context.TODO(), csr.Name, nil); err != nil { return fmt.Errorf("unable to delete CSR %q: %v", csr.Name, err) } } diff --git a/pkg/controller/certificates/rootcacertpublisher/publisher.go b/pkg/controller/certificates/rootcacertpublisher/publisher.go index 9f9e1aa685c..24c6205fa3c 100644 --- a/pkg/controller/certificates/rootcacertpublisher/publisher.go +++ b/pkg/controller/certificates/rootcacertpublisher/publisher.go @@ -17,6 +17,7 @@ limitations under the License. package rootcacertpublisher import ( + "context" "fmt" "reflect" "time" @@ -178,7 +179,7 @@ func (c *Publisher) syncNamespace(ns string) error { cm, err := c.cmLister.ConfigMaps(ns).Get(RootCACertConfigMapName) switch { case apierrors.IsNotFound(err): - _, err := c.client.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{ + _, err := c.client.CoreV1().ConfigMaps(ns).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: RootCACertConfigMapName, }, @@ -201,7 +202,7 @@ func (c *Publisher) syncNamespace(ns string) error { cm.Data = data - _, err = c.client.CoreV1().ConfigMaps(ns).Update(cm) + _, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm) return err } diff --git a/pkg/controller/certificates/signer/signer.go b/pkg/controller/certificates/signer/signer.go index 94ba8272be8..9b7861eccac 100644 --- a/pkg/controller/certificates/signer/signer.go +++ b/pkg/controller/certificates/signer/signer.go @@ -18,6 +18,7 @@ limitations under the License. package signer import ( + "context" "encoding/pem" "fmt" "time" @@ -94,7 +95,7 @@ func (s *signer) handle(csr *capi.CertificateSigningRequest) error { if err != nil { return fmt.Errorf("error auto signing csr: %v", err) } - _, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(csr) + _, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(context.TODO(), csr) if err != nil { return fmt.Errorf("error updating signature for csr: %v", err) } diff --git a/pkg/controller/client_builder.go b/pkg/controller/client_builder.go index 716c5c4cbc5..7e5bc78b5b9 100644 --- a/pkg/controller/client_builder.go +++ b/pkg/controller/client_builder.go @@ -120,11 +120,11 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector - return b.CoreClient.Secrets(b.Namespace).List(options) + return b.CoreClient.Secrets(b.Namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector - return b.CoreClient.Secrets(b.Namespace).Watch(options) + return b.CoreClient.Secrets(b.Namespace).Watch(context.TODO(), options) }, } ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) @@ -157,7 +157,7 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro if !valid { klog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Namespace, sa.Name) // try to delete the secret containing the invalid token - if err := b.CoreClient.Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + if err := b.CoreClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { klog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Namespace, sa.Name, err) } // continue watching for good tokens @@ -186,7 +186,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, // Try token review first tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}} - if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(tokenReview); err == nil { + if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(context.TODO(), tokenReview); err == nil { if !tokenResult.Status.Authenticated { klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Namespace, sa.Name) return nil, false, nil diff --git a/pkg/controller/client_builder_dynamic.go b/pkg/controller/client_builder_dynamic.go index 19aaded8bf5..b89bfde9b6f 100644 --- a/pkg/controller/client_builder_dynamic.go +++ b/pkg/controller/client_builder_dynamic.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "fmt" "net/http" "sync" @@ -174,7 +175,7 @@ func (ts *tokenSourceImpl) Token() (*oauth2.Token, error) { return false, nil } - tr, inErr := ts.coreClient.ServiceAccounts(ts.namespace).CreateToken(ts.serviceAccountName, &v1authenticationapi.TokenRequest{ + tr, inErr := ts.coreClient.ServiceAccounts(ts.namespace).CreateToken(context.TODO(), ts.serviceAccountName, &v1authenticationapi.TokenRequest{ Spec: v1authenticationapi.TokenRequestSpec{ ExpirationSeconds: utilpointer.Int64Ptr(ts.expirationSeconds), }, diff --git a/pkg/controller/cloud/node_controller.go b/pkg/controller/cloud/node_controller.go index 99950b8e555..085875cbc01 100644 --- a/pkg/controller/cloud/node_controller.go +++ b/pkg/controller/cloud/node_controller.go @@ -154,7 +154,7 @@ func (cnc *CloudNodeController) UpdateNodeStatus(ctx context.Context) { return } - nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) + nodes, err := cnc.kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) if err != nil { klog.Errorf("Error monitoring node status: %v", err) return @@ -352,7 +352,7 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod return } - curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to get node %s: %v", node.Name, err)) return @@ -376,7 +376,7 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod }) err = clientretry.RetryOnConflict(UpdateNodeSpecBackoff, func() error { - curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) if err != nil { return err } @@ -385,7 +385,7 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod modify(curNode) } - _, err = cnc.kubeClient.CoreV1().Nodes().Update(curNode) + _, err = cnc.kubeClient.CoreV1().Nodes().Update(context.TODO(), curNode) if err != nil { return err } diff --git a/pkg/controller/cloud/node_controller_test.go b/pkg/controller/cloud/node_controller_test.go index f762affe8a3..9711a665338 100644 --- a/pkg/controller/cloud/node_controller_test.go +++ b/pkg/controller/cloud/node_controller_test.go @@ -773,7 +773,7 @@ func Test_reconcileNodeLabels(t *testing.T) { t.Errorf("unexpected error") } - actualNode, err := clientset.CoreV1().Nodes().Get("node01", metav1.GetOptions{}) + actualNode, err := clientset.CoreV1().Nodes().Get(context.TODO(), "node01", metav1.GetOptions{}) if err != nil { t.Fatalf("error getting updated node: %v", err) } diff --git a/pkg/controller/cloud/node_lifecycle_controller.go b/pkg/controller/cloud/node_lifecycle_controller.go index 83d21eeab98..ab0f07bad4a 100644 --- a/pkg/controller/cloud/node_lifecycle_controller.go +++ b/pkg/controller/cloud/node_lifecycle_controller.go @@ -190,7 +190,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes() { fmt.Sprintf("Deleting node %v because it does not exist in the cloud provider", node.Name), "Node %s event: %s", node.Name, deleteNodeEvent) - if err := c.kubeClient.CoreV1().Nodes().Delete(node.Name, nil); err != nil { + if err := c.kubeClient.CoreV1().Nodes().Delete(context.TODO(), node.Name, nil); err != nil { klog.Errorf("unable to delete node %q: %v", node.Name, err) } } diff --git a/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go b/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go index 4548e4c76d1..8765755e1bc 100644 --- a/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go +++ b/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go @@ -17,6 +17,7 @@ limitations under the License. package clusterroleaggregation import ( + "context" "fmt" "sort" "time" @@ -126,7 +127,7 @@ func (c *ClusterRoleAggregationController) syncClusterRole(key string) error { for _, rule := range newPolicyRules { clusterRole.Rules = append(clusterRole.Rules, *rule.DeepCopy()) } - _, err = c.clusterRoleClient.ClusterRoles().Update(clusterRole) + _, err = c.clusterRoleClient.ClusterRoles().Update(context.TODO(), clusterRole) return err } diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index ec876e1eb2b..e05e5b68428 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "encoding/binary" "encoding/json" "fmt" @@ -419,7 +420,7 @@ type RealRSControl struct { var _ RSControlInterface = &RealRSControl{} func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error { - _, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data) return err } @@ -439,7 +440,7 @@ type RealControllerRevisionControl struct { var _ ControllerRevisionControlInterface = &RealControllerRevisionControl{} func (r RealControllerRevisionControl) PatchControllerRevision(namespace, name string, data []byte) error { - _, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data) return err } @@ -536,7 +537,7 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v } func (r RealPodControl) PatchPod(namespace, name string, data []byte) error { - _, err := r.KubeClient.CoreV1().Pods(namespace).Patch(name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data) return err } @@ -576,7 +577,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT if len(labels.Set(pod.Labels)) == 0 { return fmt.Errorf("unable to create pods, no labels") } - newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(pod) + newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod) if err != nil { // only send an event if the namespace isn't terminating if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) { @@ -601,7 +602,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime return fmt.Errorf("object does not have ObjectMeta, %v", err) } klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) - if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil && !apierrors.IsNotFound(err) { + if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, nil); err != nil && !apierrors.IsNotFound(err) { r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) return fmt.Errorf("unable to delete pods: %v", err) } @@ -1013,10 +1014,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -1070,10 +1071,10 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -1118,7 +1119,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } - _, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes) return err } @@ -1147,10 +1148,10 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -1177,7 +1178,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la if err != nil { return fmt.Errorf("failed to create a two-way merge patch: %v", err) } - if _, err := kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes); err != nil { return fmt.Errorf("failed to patch the node: %v", err) } return nil @@ -1185,7 +1186,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la } func getOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, name string) (*v1.ServiceAccount, error) { - sa, err := coreClient.ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + sa, err := coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err == nil { return sa, nil } @@ -1195,17 +1196,17 @@ func getOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, nam // Create the namespace if we can't verify it exists. // Tolerate errors, since we don't know whether this component has namespace creation permissions. - if _, err := coreClient.Namespaces().Get(namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { - if _, err = coreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}); err != nil && !apierrors.IsAlreadyExists(err) { + if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { + if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}); err != nil && !apierrors.IsAlreadyExists(err) { klog.Warningf("create non-exist namespace %s failed:%v", namespace, err) } } // Create the service account - sa, err = coreClient.ServiceAccounts(namespace).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}) + sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}) if apierrors.IsAlreadyExists(err) { // If we're racing to init and someone else already created it, re-fetch - return coreClient.ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } return sa, err } diff --git a/pkg/controller/cronjob/cronjob_controller.go b/pkg/controller/cronjob/cronjob_controller.go index f778079571f..58a90224dc5 100644 --- a/pkg/controller/cronjob/cronjob_controller.go +++ b/pkg/controller/cronjob/cronjob_controller.go @@ -108,7 +108,7 @@ func (jm *Controller) syncAll() { // we must also see that the parent CronJob has non-nil DeletionTimestamp (see #42639). // Note that this only works because we are NOT using any caches here. jobListFunc := func(opts metav1.ListOptions) (runtime.Object, error) { - return jm.kubeClient.BatchV1().Jobs(metav1.NamespaceAll).List(opts) + return jm.kubeClient.BatchV1().Jobs(metav1.NamespaceAll).List(context.TODO(), opts) } js := make([]batchv1.Job, 0) @@ -128,7 +128,7 @@ func (jm *Controller) syncAll() { klog.V(4).Infof("Found %d jobs", len(js)) cronJobListFunc := func(opts metav1.ListOptions) (runtime.Object, error) { - return jm.kubeClient.BatchV1beta1().CronJobs(metav1.NamespaceAll).List(opts) + return jm.kubeClient.BatchV1beta1().CronJobs(metav1.NamespaceAll).List(context.TODO(), opts) } jobsBySj := groupJobsByParent(js) diff --git a/pkg/controller/cronjob/injection.go b/pkg/controller/cronjob/injection.go index b4b1bb8042f..27e22b49daa 100644 --- a/pkg/controller/cronjob/injection.go +++ b/pkg/controller/cronjob/injection.go @@ -17,6 +17,7 @@ limitations under the License. package cronjob import ( + "context" "fmt" "sync" @@ -44,7 +45,7 @@ type realSJControl struct { var _ sjControlInterface = &realSJControl{} func (c *realSJControl) UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) { - return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(sj) + return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(context.TODO(), sj) } // fakeSJControl is the default implementation of sjControlInterface. @@ -102,24 +103,24 @@ func copyAnnotations(template *batchv1beta1.JobTemplateSpec) labels.Set { } func (r realJobControl) GetJob(namespace, name string) (*batchv1.Job, error) { - return r.KubeClient.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{}) + return r.KubeClient.BatchV1().Jobs(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } func (r realJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) { - return r.KubeClient.BatchV1().Jobs(namespace).Update(job) + return r.KubeClient.BatchV1().Jobs(namespace).Update(context.TODO(), job) } func (r realJobControl) PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error) { - return r.KubeClient.BatchV1().Jobs(namespace).Patch(name, pt, data, subresources...) + return r.KubeClient.BatchV1().Jobs(namespace).Patch(context.TODO(), name, pt, data, subresources...) } func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) { - return r.KubeClient.BatchV1().Jobs(namespace).Create(job) + return r.KubeClient.BatchV1().Jobs(namespace).Create(context.TODO(), job) } func (r realJobControl) DeleteJob(namespace string, name string) error { background := metav1.DeletePropagationBackground - return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, &metav1.DeleteOptions{PropagationPolicy: &background}) + return r.KubeClient.BatchV1().Jobs(namespace).Delete(context.TODO(), name, &metav1.DeleteOptions{PropagationPolicy: &background}) } type fakeJobControl struct { @@ -217,11 +218,11 @@ type realPodControl struct { var _ podControlInterface = &realPodControl{} func (r realPodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error) { - return r.KubeClient.CoreV1().Pods(namespace).List(opts) + return r.KubeClient.CoreV1().Pods(namespace).List(context.TODO(), opts) } func (r realPodControl) DeletePod(namespace string, name string) error { - return r.KubeClient.CoreV1().Pods(namespace).Delete(name, nil) + return r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, nil) } type fakePodControl struct { diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 3ef22ba55b9..320a30fc7b1 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -17,6 +17,7 @@ limitations under the License. package daemon import ( + "context" "fmt" "reflect" "sort" @@ -718,7 +719,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, e // If any adoptions are attempted, we should first recheck for deletion with // an uncached quorum read sometime after listing Pods (see #42639). dsNotDeleted := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{}) + fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -1031,12 +1032,12 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps. toUpdate.Status.NumberAvailable = int32(numberAvailable) toUpdate.Status.NumberUnavailable = int32(numberUnavailable) - if _, updateErr = dsClient.UpdateStatus(toUpdate); updateErr == nil { + if _, updateErr = dsClient.UpdateStatus(context.TODO(), toUpdate); updateErr == nil { return nil } // Update the set with the latest resource version for the next poll - if toUpdate, getErr = dsClient.Get(ds.Name, metav1.GetOptions{}); getErr != nil { + if toUpdate, getErr = dsClient.Get(context.TODO(), ds.Name, metav1.GetOptions{}); getErr != nil { // If the GET fails we can't trust status.Replicas anymore. This error // is bound to be more interesting than the update failure. return getErr diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go index 1c09a2e4a79..2a7694771c9 100644 --- a/pkg/controller/daemon/update.go +++ b/pkg/controller/daemon/update.go @@ -18,6 +18,7 @@ package daemon import ( "bytes" + "context" "fmt" "reflect" "sort" @@ -94,7 +95,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok { toUpdate := history.DeepCopy() toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name - history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate) + history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate) if err != nil { return nil, nil, err } @@ -129,7 +130,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps if cur.Revision < currRevision { toUpdate := cur.DeepCopy() toUpdate.Revision = currRevision - _, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate) + _, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate) if err != nil { return nil, nil, err } @@ -170,7 +171,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps. continue } // Clean up - err := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(history.Name, nil) + err := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), history.Name, nil) if err != nil { return err } @@ -219,14 +220,14 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor toUpdate.Labels = make(map[string]string) } toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey] - _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate) + _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(context.TODO(), toUpdate) if err != nil { return nil, err } } } // Remove duplicates - err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(cur.Name, nil) + err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), cur.Name, nil) if err != nil { return nil, err } @@ -253,7 +254,7 @@ func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*app // If any adoptions are attempted, we should first recheck for deletion with // an uncached quorum read sometime after listing Pods (see #42639). canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{}) + fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -322,10 +323,10 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (* Revision: revision, } - history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(history) + history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), history) if outerErr := err; errors.IsAlreadyExists(outerErr) { // TODO: Is it okay to get from historyLister? - existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{}) + existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) if getErr != nil { return nil, getErr } @@ -340,7 +341,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (* // Handle name collisions between different history // Get the latest DaemonSet from the API server to make sure collision count is only increased when necessary - currDS, getErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{}) + currDS, getErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{}) if getErr != nil { return nil, getErr } @@ -352,7 +353,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (* currDS.Status.CollisionCount = new(int32) } *currDS.Status.CollisionCount++ - _, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(currDS) + _, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(context.TODO(), currDS) if updateErr != nil { return nil, updateErr } diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index c2495717192..5cc946f0441 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -21,6 +21,7 @@ limitations under the License. package deployment import ( + "context" "fmt" "reflect" "time" @@ -508,7 +509,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment) // If any adoptions are attempted, we should first recheck for deletion with // an uncached quorum read sometime after listing ReplicaSets (see #42639). canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) + fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(context.TODO(), d.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -588,7 +589,7 @@ func (dc *DeploymentController) syncDeployment(key string) error { dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.") if d.Status.ObservedGeneration < d.Generation { d.Status.ObservedGeneration = d.Generation - dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) + dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d) } return nil } diff --git a/pkg/controller/deployment/progress.go b/pkg/controller/deployment/progress.go index e340a5be89d..85d3628e3ee 100644 --- a/pkg/controller/deployment/progress.go +++ b/pkg/controller/deployment/progress.go @@ -17,6 +17,7 @@ limitations under the License. package deployment import ( + "context" "fmt" "reflect" "time" @@ -112,7 +113,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, new newDeployment := d newDeployment.Status = newStatus - _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) + _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment) return err } diff --git a/pkg/controller/deployment/rollback.go b/pkg/controller/deployment/rollback.go index 6593630af41..bc8ac36ad6e 100644 --- a/pkg/controller/deployment/rollback.go +++ b/pkg/controller/deployment/rollback.go @@ -17,6 +17,7 @@ limitations under the License. package deployment import ( + "context" "fmt" "strconv" @@ -113,7 +114,7 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, mess func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error { klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name) setRollbackTo(d, nil) - _, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d) + _, err := dc.client.AppsV1().Deployments(d.Namespace).Update(context.TODO(), d) return err } diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index a208fd0cfb5..1e191528820 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -17,6 +17,7 @@ limitations under the License. package deployment import ( + "context" "fmt" "reflect" "sort" @@ -97,7 +98,7 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error } var err error - d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) + d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d) return err } @@ -154,7 +155,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds if annotationsUpdated || minReadySecondsNeedsUpdate { rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds - return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy) + return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy) } // Should use the revision in existingNewRS's annotation, since it set by before @@ -172,7 +173,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old if needsUpdate { var err error - if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d); err != nil { + if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d); err != nil { return nil, err } } @@ -219,7 +220,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old // hash collisions. If there is any other error, we need to report it in the status of // the Deployment. alreadyExists := false - createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(&newRS) + createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS) switch { // We may end up hitting this due to a slow cache or a fast resync of the Deployment. case errors.IsAlreadyExists(err): @@ -251,7 +252,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old *d.Status.CollisionCount++ // Update the collisionCount for the Deployment and let it requeue by returning the original // error. - _, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) + _, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d) if dErr == nil { klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount) } @@ -267,7 +268,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old // We don't really care about this error at this point, since we have a bigger issue to report. // TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account // these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568 - _, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) + _, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d) } dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg) return nil, err @@ -284,7 +285,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old needsUpdate = true } if needsUpdate { - _, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) + _, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d) } return createdRS, err } @@ -419,7 +420,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale in rsCopy := rs.DeepCopy() *(rsCopy.Spec.Replicas) = newScale deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment)) - rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(rsCopy) + rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy) if err == nil && sizeNeedsUpdate { scaled = true dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale) @@ -457,7 +458,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep continue } klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name) - if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { + if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(context.TODO(), rs.Name, nil); err != nil && !errors.IsNotFound(err) { // Return error instead of aggregating and continuing DELETEs on the theory // that we may be overloading the api server. return err @@ -477,7 +478,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet, newDeployment := d newDeployment.Status = newStatus - _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) + _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment) return err } diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index f951ea1581a..26305c776a1 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "context" "fmt" "math" "sort" @@ -545,7 +546,7 @@ func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) // RsListFromClient returns an rsListFunc that wraps the given client. func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc { return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) { - rsList, err := c.ReplicaSets(namespace).List(options) + rsList, err := c.ReplicaSets(namespace).List(context.TODO(), options) if err != nil { return nil, err } diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index 4286de7638b..f1526b38c47 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -17,6 +17,7 @@ limitations under the License. package disruption import ( + "context" "fmt" "time" @@ -791,6 +792,6 @@ func (dc *DisruptionController) updatePdbStatus(pdb *policy.PodDisruptionBudget, func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error { // If this update fails, don't retry it. Allow the failure to get handled & // retried in `processNextWorkItem()`. - _, err := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(pdb) + _, err := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(context.TODO(), pdb) return err } diff --git a/pkg/controller/disruption/disruption_test.go b/pkg/controller/disruption/disruption_test.go index a98787af204..6c0a9d65e56 100644 --- a/pkg/controller/disruption/disruption_test.go +++ b/pkg/controller/disruption/disruption_test.go @@ -1054,14 +1054,14 @@ func TestUpdatePDBStatusRetries(t *testing.T) { // Create a PDB and 3 pods that match it. pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1)) - pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(pdb) + pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb) if err != nil { t.Fatalf("Failed to create PDB: %v", err) } podNames := []string{"moe", "larry", "curly"} for _, name := range podNames { pod, _ := newPod(t, name) - _, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(pod) + _, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create pod: %v", err) } @@ -1133,7 +1133,7 @@ func TestUpdatePDBStatusRetries(t *testing.T) { }) // (A) Delete one pod - if err := dc.coreClient.CoreV1().Pods("default").Delete(podNames[0], &metav1.DeleteOptions{}); err != nil { + if err := dc.coreClient.CoreV1().Pods("default").Delete(context.TODO(), podNames[0], &metav1.DeleteOptions{}); err != nil { t.Fatal(err) } if err := waitForCacheCount(dc.podStore, len(podNames)-1); err != nil { @@ -1151,7 +1151,7 @@ func TestUpdatePDBStatusRetries(t *testing.T) { // (C) Whether or not sync() returned an error, the PDB status should reflect // the evictions that took place. - finalPDB, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets("default").Get(pdb.Name, metav1.GetOptions{}) + finalPDB, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets("default").Get(context.TODO(), pdb.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get PDB: %v", err) } diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index 4c3186d2c5c..e307f722255 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -17,6 +17,7 @@ limitations under the License. package endpoint import ( + "context" "fmt" "reflect" "strconv" @@ -368,7 +369,7 @@ func (e *EndpointController) syncService(key string) error { // service is deleted. However, if we're down at the time when // the service is deleted, we will miss that deletion, so this // doesn't completely solve the problem. See #6877. - err = e.client.CoreV1().Endpoints(namespace).Delete(name, nil) + err = e.client.CoreV1().Endpoints(namespace).Delete(context.TODO(), name, nil) if err != nil && !errors.IsNotFound(err) { return err } @@ -512,10 +513,10 @@ func (e *EndpointController) syncService(key string) error { klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps) if createEndpoints { // No previous endpoints, create them - _, err = e.client.CoreV1().Endpoints(service.Namespace).Create(newEndpoints) + _, err = e.client.CoreV1().Endpoints(service.Namespace).Create(context.TODO(), newEndpoints) } else { // Pre-existing - _, err = e.client.CoreV1().Endpoints(service.Namespace).Update(newEndpoints) + _, err = e.client.CoreV1().Endpoints(service.Namespace).Update(context.TODO(), newEndpoints) } if err != nil { if createEndpoints && errors.IsForbidden(err) { diff --git a/pkg/controller/endpointslice/endpointslice_controller_test.go b/pkg/controller/endpointslice/endpointslice_controller_test.go index 737f5b23915..da48dad40c2 100644 --- a/pkg/controller/endpointslice/endpointslice_controller_test.go +++ b/pkg/controller/endpointslice/endpointslice_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package endpointslice import ( + "context" "fmt" "reflect" "testing" @@ -106,7 +107,7 @@ func TestSyncServiceWithSelector(t *testing.T) { standardSyncService(t, esController, ns, serviceName, "true") expectActions(t, client.Actions(), 1, "create", "endpointslices") - sliceList, err := client.DiscoveryV1beta1().EndpointSlices(ns).List(metav1.ListOptions{}) + sliceList, err := client.DiscoveryV1beta1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{}) assert.Nil(t, err, "Expected no error fetching endpoint slices") assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices") slice := sliceList.Items[0] @@ -173,7 +174,7 @@ func TestSyncServicePodSelection(t *testing.T) { expectActions(t, client.Actions(), 1, "create", "endpointslices") // an endpoint slice should be created, it should only reference pod1 (not pod2) - slices, err := client.DiscoveryV1beta1().EndpointSlices(ns).List(metav1.ListOptions{}) + slices, err := client.DiscoveryV1beta1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{}) assert.Nil(t, err, "Expected no error fetching endpoint slices") assert.Len(t, slices.Items, 1, "Expected 1 endpoint slices") slice := slices.Items[0] @@ -249,7 +250,7 @@ func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) { if err != nil { t.Fatalf("Expected no error adding EndpointSlice: %v", err) } - _, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(endpointSlice) + _, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(context.TODO(), endpointSlice) if err != nil { t.Fatalf("Expected no error creating EndpointSlice: %v", err) } @@ -305,7 +306,7 @@ func TestSyncServiceFull(t *testing.T) { }, } esController.serviceStore.Add(service) - _, err := esController.client.CoreV1().Services(namespace).Create(service) + _, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service) assert.Nil(t, err, "Expected no error creating service") // run through full sync service loop @@ -314,7 +315,7 @@ func TestSyncServiceFull(t *testing.T) { // last action should be to create endpoint slice expectActions(t, client.Actions(), 1, "create", "endpointslices") - sliceList, err := client.DiscoveryV1beta1().EndpointSlices(namespace).List(metav1.ListOptions{}) + sliceList, err := client.DiscoveryV1beta1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{}) assert.Nil(t, err, "Expected no error fetching endpoint slices") assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices") @@ -368,7 +369,7 @@ func createService(t *testing.T, esController *endpointSliceController, namespac }, } esController.serviceStore.Add(service) - _, err := esController.client.CoreV1().Services(namespace).Create(service) + _, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service) assert.Nil(t, err, "Expected no error creating service") return service } diff --git a/pkg/controller/endpointslice/reconciler.go b/pkg/controller/endpointslice/reconciler.go index 24496e41c80..f474908a251 100644 --- a/pkg/controller/endpointslice/reconciler.go +++ b/pkg/controller/endpointslice/reconciler.go @@ -17,6 +17,7 @@ limitations under the License. package endpointslice import ( + "context" "fmt" "sort" "time" @@ -205,7 +206,7 @@ func (r *reconciler) finalize( for _, endpointSlice := range slicesToCreate { addTriggerTimeAnnotation(endpointSlice, triggerTime) - _, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(endpointSlice) + _, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice) if err != nil { // If the namespace is terminating, creates will continue to fail. Simply drop the item. if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) { @@ -220,7 +221,7 @@ func (r *reconciler) finalize( for _, endpointSlice := range slicesToUpdate { addTriggerTimeAnnotation(endpointSlice, triggerTime) - _, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(endpointSlice) + _, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice) if err != nil { errs = append(errs, fmt.Errorf("Error updating %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)) } else { @@ -230,7 +231,7 @@ func (r *reconciler) finalize( } for _, endpointSlice := range slicesToDelete { - err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Delete(endpointSlice.Name, &metav1.DeleteOptions{}) + err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Delete(context.TODO(), endpointSlice.Name, &metav1.DeleteOptions{}) if err != nil { errs = append(errs, fmt.Errorf("Error deleting %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err)) } else { diff --git a/pkg/controller/endpointslice/reconciler_test.go b/pkg/controller/endpointslice/reconciler_test.go index 18331b4accc..a5a74bdf1b9 100644 --- a/pkg/controller/endpointslice/reconciler_test.go +++ b/pkg/controller/endpointslice/reconciler_test.go @@ -17,6 +17,7 @@ limitations under the License. package endpointslice import ( + "context" "fmt" "reflect" "strings" @@ -203,7 +204,7 @@ func TestReconcile1EndpointSlice(t *testing.T) { svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace) endpointSlice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc) - _, createErr := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(endpointSlice1) + _, createErr := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice1) assert.Nil(t, createErr, "Expected no error creating endpoint slice") numActionsBefore := len(client.Actions()) @@ -827,7 +828,7 @@ func portsAndAddressTypeEqual(slice1, slice2 discovery.EndpointSlice) bool { func createEndpointSlices(t *testing.T, client *fake.Clientset, namespace string, endpointSlices []*discovery.EndpointSlice) { t.Helper() for _, endpointSlice := range endpointSlices { - _, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(endpointSlice) + _, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice) if err != nil { t.Fatalf("Expected no error creating Endpoint Slice, got: %v", err) } @@ -836,7 +837,7 @@ func createEndpointSlices(t *testing.T, client *fake.Clientset, namespace string func fetchEndpointSlices(t *testing.T, client *fake.Clientset, namespace string) []discovery.EndpointSlice { t.Helper() - fetchedSlices, err := client.DiscoveryV1beta1().EndpointSlices(namespace).List(metav1.ListOptions{}) + fetchedSlices, err := client.DiscoveryV1beta1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Expected no error fetching Endpoint Slices, got: %v", err) return []discovery.EndpointSlice{} diff --git a/pkg/controller/history/controller_history.go b/pkg/controller/history/controller_history.go index ef3ba82fb4e..66dfc440e78 100644 --- a/pkg/controller/history/controller_history.go +++ b/pkg/controller/history/controller_history.go @@ -18,6 +18,7 @@ package history import ( "bytes" + "context" "encoding/json" "fmt" "hash/fnv" @@ -248,9 +249,9 @@ func (rh *realHistory) CreateControllerRevision(parent metav1.Object, revision * // Update the revisions name clone.Name = ControllerRevisionName(parent.GetName(), hash) ns := parent.GetNamespace() - created, err := rh.client.AppsV1().ControllerRevisions(ns).Create(clone) + created, err := rh.client.AppsV1().ControllerRevisions(ns).Create(context.TODO(), clone) if errors.IsAlreadyExists(err) { - exists, err := rh.client.AppsV1().ControllerRevisions(ns).Get(clone.Name, metav1.GetOptions{}) + exists, err := rh.client.AppsV1().ControllerRevisions(ns).Get(context.TODO(), clone.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -271,7 +272,7 @@ func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevisio return nil } clone.Revision = newRevision - updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(clone) + updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(context.TODO(), clone) if updateErr == nil { return nil } @@ -288,7 +289,7 @@ func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevisio } func (rh *realHistory) DeleteControllerRevision(revision *apps.ControllerRevision) error { - return rh.client.AppsV1().ControllerRevisions(revision.Namespace).Delete(revision.Name, nil) + return rh.client.AppsV1().ControllerRevisions(revision.Namespace).Delete(context.TODO(), revision.Name, nil) } type objectForPatch struct { @@ -326,13 +327,13 @@ func (rh *realHistory) AdoptControllerRevision(parent metav1.Object, parentKind return nil, err } // Use strategic merge patch to add an owner reference indicating a controller ref - return rh.client.AppsV1().ControllerRevisions(parent.GetNamespace()).Patch(revision.GetName(), + return rh.client.AppsV1().ControllerRevisions(parent.GetNamespace()).Patch(context.TODO(), revision.GetName(), types.StrategicMergePatchType, patchBytes) } func (rh *realHistory) ReleaseControllerRevision(parent metav1.Object, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) { // Use strategic merge patch to add an owner reference indicating a controller ref - released, err := rh.client.AppsV1().ControllerRevisions(revision.GetNamespace()).Patch(revision.GetName(), + released, err := rh.client.AppsV1().ControllerRevisions(revision.GetNamespace()).Patch(context.TODO(), revision.GetName(), types.StrategicMergePatchType, []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, parent.GetUID(), revision.UID))) diff --git a/pkg/controller/history/controller_history_test.go b/pkg/controller/history/controller_history_test.go index c513d616055..8e5d5e4291b 100644 --- a/pkg/controller/history/controller_history_test.go +++ b/pkg/controller/history/controller_history_test.go @@ -18,6 +18,7 @@ package history import ( "bytes" + "context" "encoding/json" "fmt" "reflect" @@ -260,7 +261,7 @@ func TestRealHistory_CreateControllerRevision(t *testing.T) { var collisionCount int32 for _, item := range test.existing { - _, err := client.AppsV1().ControllerRevisions(item.parent.GetNamespace()).Create(item.revision) + _, err := client.AppsV1().ControllerRevisions(item.parent.GetNamespace()).Create(context.TODO(), item.revision) if err != nil { t.Fatal(err) } diff --git a/pkg/controller/job/job_controller.go b/pkg/controller/job/job_controller.go index 4e3a156250f..1c5c445aeae 100644 --- a/pkg/controller/job/job_controller.go +++ b/pkg/controller/job/job_controller.go @@ -17,6 +17,7 @@ limitations under the License. package job import ( + "context" "fmt" "math" "reflect" @@ -417,7 +418,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) { // If any adoptions are attempted, we should first recheck for deletion // with an uncached quorum read sometime after listing Pods (see #42639). canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := jm.kubeClient.BatchV1().Jobs(j.Namespace).Get(j.Name, metav1.GetOptions{}) + fresh, err := jm.kubeClient.BatchV1().Jobs(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -826,12 +827,12 @@ func (jm *JobController) updateJobStatus(job *batch.Job) error { var err error for i := 0; i <= statusUpdateRetries; i = i + 1 { var newJob *batch.Job - newJob, err = jobClient.Get(job.Name, metav1.GetOptions{}) + newJob, err = jobClient.Get(context.TODO(), job.Name, metav1.GetOptions{}) if err != nil { break } newJob.Status = job.Status - if _, err = jobClient.UpdateStatus(newJob); err == nil { + if _, err = jobClient.UpdateStatus(context.TODO(), newJob); err == nil { break } } diff --git a/pkg/controller/namespace/deletion/namespaced_resources_deleter.go b/pkg/controller/namespace/deletion/namespaced_resources_deleter.go index 1bbceb5d329..7035a3f9b6f 100644 --- a/pkg/controller/namespace/deletion/namespaced_resources_deleter.go +++ b/pkg/controller/namespace/deletion/namespaced_resources_deleter.go @@ -17,6 +17,7 @@ limitations under the License. package deletion import ( + "context" "fmt" "reflect" "sync" @@ -95,7 +96,7 @@ func (d *namespacedResourcesDeleter) Delete(nsName string) error { // Multiple controllers may edit a namespace during termination // first get the latest state of the namespace before proceeding // if the namespace was deleted already, don't do anything - namespace, err := d.nsClient.Get(nsName, metav1.GetOptions{}) + namespace, err := d.nsClient.Get(context.TODO(), nsName, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return nil @@ -249,7 +250,7 @@ func (d *namespacedResourcesDeleter) retryOnConflictError(namespace *v1.Namespac return nil, err } prevNamespace := latestNamespace - latestNamespace, err = d.nsClient.Get(latestNamespace.Name, metav1.GetOptions{}) + latestNamespace, err = d.nsClient.Get(context.TODO(), latestNamespace.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -268,7 +269,7 @@ func (d *namespacedResourcesDeleter) updateNamespaceStatusFunc(namespace *v1.Nam newNamespace.ObjectMeta = namespace.ObjectMeta newNamespace.Status = *namespace.Status.DeepCopy() newNamespace.Status.Phase = v1.NamespaceTerminating - return d.nsClient.UpdateStatus(&newNamespace) + return d.nsClient.UpdateStatus(context.TODO(), &newNamespace) } // finalized returns true if the namespace.Spec.Finalizers is an empty list @@ -550,7 +551,7 @@ func (d *namespacedResourcesDeleter) deleteAllContent(ns *v1.Namespace) (int64, // we need to reflect that information. Recall that additional finalizers can be set on namespaces, so this finalizer may clear itself and // NOT remove the resource instance. if hasChanged := conditionUpdater.Update(ns); hasChanged { - if _, err = d.nsClient.UpdateStatus(ns); err != nil { + if _, err = d.nsClient.UpdateStatus(context.TODO(), ns); err != nil { utilruntime.HandleError(fmt.Errorf("couldn't update status condition for namespace %q: %v", namespace, err)) } } @@ -590,7 +591,7 @@ func (d *namespacedResourcesDeleter) estimateGracefulTerminationForPods(ns strin if podsGetter == nil || reflect.ValueOf(podsGetter).IsNil() { return 0, fmt.Errorf("unexpected: podsGetter is nil. Cannot estimate grace period seconds for pods") } - items, err := podsGetter.Pods(ns).List(metav1.ListOptions{}) + items, err := podsGetter.Pods(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return 0, err } diff --git a/pkg/controller/nodeipam/ipam/adapter.go b/pkg/controller/nodeipam/ipam/adapter.go index bfbfb9e2416..18603dc2fe0 100644 --- a/pkg/controller/nodeipam/ipam/adapter.go +++ b/pkg/controller/nodeipam/ipam/adapter.go @@ -88,7 +88,7 @@ func (a *adapter) AddAlias(ctx context.Context, nodeName string, cidrRange *net. } func (a *adapter) Node(ctx context.Context, name string) (*v1.Node, error) { - return a.k8s.CoreV1().Nodes().Get(name, metav1.GetOptions{}) + return a.k8s.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) } func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error { @@ -103,7 +103,7 @@ func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRang return err } - _, err = a.k8s.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, bytes) + _, err = a.k8s.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, bytes) return err } diff --git a/pkg/controller/nodeipam/ipam/cidr_allocator.go b/pkg/controller/nodeipam/ipam/cidr_allocator.go index 997e3545d59..765353c7105 100644 --- a/pkg/controller/nodeipam/ipam/cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cidr_allocator.go @@ -17,6 +17,7 @@ limitations under the License. package ipam import ( + "context" "fmt" "net" "time" @@ -124,7 +125,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) { // controller manager to restart. if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) { var err error - nodeList, err = kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ + nodeList, err = kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ FieldSelector: fields.Everything().String(), LabelSelector: labels.Everything().String(), }) diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index 1c0da7c3a0e..18d916aed1b 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -22,6 +22,7 @@ limitations under the License. package nodelifecycle import ( + "context" "fmt" "strings" "sync" @@ -813,7 +814,7 @@ func (nc *Controller) monitorNodeHealth() error { return true, nil } name := node.Name - node, err = nc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{}) + node, err = nc.kubeClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { klog.Errorf("Failed while getting a Node to retry updating node health. Probably Node %s was deleted.", name) return false, err @@ -1148,7 +1149,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node _, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) { - if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(node); err != nil { + if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(context.TODO(), node); err != nil { klog.Errorf("Error updating node %s: %v", node.Name, err) return gracePeriod, observedReadyCondition, currentReadyCondition, err } diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go index a194b6cb6d7..13e733a70f3 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package nodelifecycle import ( + "context" "fmt" "strings" "testing" @@ -66,7 +67,7 @@ func alwaysReady() bool { return true } func fakeGetPodsAssignedToNode(c *fake.Clientset) func(string) ([]*v1.Pod, error) { return func(nodeName string) ([]*v1.Pod, error) { selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}) - pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{ + pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ FieldSelector: selector.String(), LabelSelector: labels.Everything().String(), }) diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager.go b/pkg/controller/nodelifecycle/scheduler/taint_manager.go index d3eaab63049..108d48931b0 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager.go @@ -17,6 +17,7 @@ limitations under the License. package scheduler import ( + "context" "fmt" "hash/fnv" "io" @@ -108,7 +109,7 @@ func deletePodHandler(c clientset.Interface, emitEventFunc func(types.Namespaced } var err error for i := 0; i < retries; i++ { - err = c.CoreV1().Pods(ns).Delete(name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(ns).Delete(context.TODO(), name, &metav1.DeleteOptions{}) if err == nil { break } diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go b/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go index 6bff92e803a..30f0ba31793 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package scheduler import ( + "context" "fmt" "sort" "sync" @@ -37,14 +38,14 @@ var timeForControllerToProgress = 500 * time.Millisecond func getPodFromClientset(clientset *fake.Clientset) GetPodFunc { return func(name, namespace string) (*v1.Pod, error) { - return clientset.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + return clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } } func getPodsAssignedToNode(c *fake.Clientset) GetPodsByNodeNameFunc { return func(nodeName string) ([]*v1.Pod, error) { selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}) - pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{ + pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ FieldSelector: selector.String(), LabelSelector: labels.Everything().String(), }) @@ -61,7 +62,7 @@ func getPodsAssignedToNode(c *fake.Clientset) GetPodsByNodeNameFunc { func getNodeFromClientset(clientset *fake.Clientset) GetNodeFunc { return func(name string) (*v1.Node, error) { - return clientset.CoreV1().Nodes().Get(name, metav1.GetOptions{}) + return clientset.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) } } diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 85019a4dea7..465d5521f6e 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -17,6 +17,7 @@ limitations under the License. package podautoscaler import ( + "context" "fmt" "math" "time" @@ -1112,7 +1113,7 @@ func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAuto } hpav1 := hpaRaw.(*autoscalingv1.HorizontalPodAutoscaler) - _, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(hpav1) + _, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(context.TODO(), hpav1) if err != nil { a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error()) return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err) diff --git a/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go b/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go index 4c901b8664d..2df1d1bc003 100644 --- a/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go +++ b/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go @@ -116,7 +116,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name } func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (PodMetricsInfo, time.Time, error) { - podList, err := h.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + podList, err := h.podsGetter.Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err) } diff --git a/pkg/controller/podautoscaler/metrics/rest_metrics_client.go b/pkg/controller/podautoscaler/metrics/rest_metrics_client.go index 1f84866fc68..8d17ac3c989 100644 --- a/pkg/controller/podautoscaler/metrics/rest_metrics_client.go +++ b/pkg/controller/podautoscaler/metrics/rest_metrics_client.go @@ -17,6 +17,7 @@ limitations under the License. package metrics import ( + "context" "fmt" "time" @@ -63,7 +64,7 @@ type resourceMetricsClient struct { // GetResourceMetric gets the given resource metric (and an associated oldest timestamp) // for all pods matching the specified selector in the given namespace func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) { - metrics, err := c.client.PodMetricses(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + metrics, err := c.client.PodMetricses(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from resource metrics API: %v", err) } diff --git a/pkg/controller/podgc/gc_controller.go b/pkg/controller/podgc/gc_controller.go index ab1a060f242..996ea88fab2 100644 --- a/pkg/controller/podgc/gc_controller.go +++ b/pkg/controller/podgc/gc_controller.go @@ -17,6 +17,7 @@ limitations under the License. package podgc import ( + "context" "sort" "sync" "time" @@ -75,7 +76,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor nodeQueue: workqueue.NewNamedDelayingQueue("orphaned_pods_nodes"), deletePod: func(namespace, name string) error { klog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name) - return kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)) + return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.NewDeleteOptions(0)) }, } @@ -214,7 +215,7 @@ func (gcc *PodGCController) discoverDeletedNodes(existingNodeNames sets.String) } func (gcc *PodGCController) checkIfNodeExists(name string) (bool, error) { - _, fetchErr := gcc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{}) + _, fetchErr := gcc.kubeClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) if errors.IsNotFound(fetchErr) { return false, nil } diff --git a/pkg/controller/podgc/gc_controller_test.go b/pkg/controller/podgc/gc_controller_test.go index dcc65acfefb..d7fca40dbe0 100644 --- a/pkg/controller/podgc/gc_controller_test.go +++ b/pkg/controller/podgc/gc_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package podgc import ( + "context" "sync" "testing" "time" @@ -345,10 +346,10 @@ func TestGCOrphaned(t *testing.T) { // Execute planned nodes changes for _, node := range test.addedClientNodes { - client.CoreV1().Nodes().Create(node) + client.CoreV1().Nodes().Create(context.TODO(), node) } for _, node := range test.deletedClientNodes { - client.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{}) + client.CoreV1().Nodes().Delete(context.TODO(), node.Name, &metav1.DeleteOptions{}) } for _, node := range test.addedInformerNodes { nodeInformer.Informer().GetStore().Add(node) diff --git a/pkg/controller/replicaset/replica_set.go b/pkg/controller/replicaset/replica_set.go index e42efc78df3..134cfacbb24 100644 --- a/pkg/controller/replicaset/replica_set.go +++ b/pkg/controller/replicaset/replica_set.go @@ -28,6 +28,7 @@ limitations under the License. package replicaset import ( + "context" "fmt" "reflect" "sort" @@ -714,7 +715,7 @@ func (rsc *ReplicaSetController) claimPods(rs *apps.ReplicaSet, selector labels. // If any adoptions are attempted, we should first recheck for deletion with // an uncached quorum read sometime after listing Pods (see #42639). canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{}) + fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index bbbdb514f56..b35600c3c81 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -17,6 +17,7 @@ limitations under the License. package replicaset import ( + "context" "errors" "fmt" "math/rand" @@ -1158,7 +1159,7 @@ func TestExpectationsOnRecreate(t *testing.T) { } oldRS := newReplicaSet(1, map[string]string{"foo": "bar"}) - oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(oldRS) + oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(context.TODO(), oldRS) if err != nil { t.Fatal(err) } @@ -1202,7 +1203,7 @@ func TestExpectationsOnRecreate(t *testing.T) { t.Fatal("Unexpected item in the queue") } - err = client.AppsV1().ReplicaSets(oldRS.Namespace).Delete(oldRS.Name, &metav1.DeleteOptions{}) + err = client.AppsV1().ReplicaSets(oldRS.Namespace).Delete(context.TODO(), oldRS.Name, &metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } @@ -1239,7 +1240,7 @@ func TestExpectationsOnRecreate(t *testing.T) { newRS := oldRS.DeepCopy() newRS.UID = uuid.NewUUID() - newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(newRS) + newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(context.TODO(), newRS) if err != nil { t.Fatal(err) } diff --git a/pkg/controller/replicaset/replica_set_utils.go b/pkg/controller/replicaset/replica_set_utils.go index a2f7795726a..f3d0ac11dec 100644 --- a/pkg/controller/replicaset/replica_set_utils.go +++ b/pkg/controller/replicaset/replica_set_utils.go @@ -19,6 +19,7 @@ limitations under the License. package replicaset import ( + "context" "fmt" "reflect" @@ -63,7 +64,7 @@ func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSe fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, newStatus.ObservedGeneration)) rs.Status = newStatus - updatedRS, updateErr = c.UpdateStatus(rs) + updatedRS, updateErr = c.UpdateStatus(context.TODO(), rs) if updateErr == nil { return updatedRS, nil } @@ -72,7 +73,7 @@ func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSe break } // Update the ReplicaSet with the latest resource version for the next poll - if rs, getErr = c.Get(rs.Name, metav1.GetOptions{}); getErr != nil { + if rs, getErr = c.Get(context.TODO(), rs.Name, metav1.GetOptions{}); getErr != nil { // If the GET fails we can't trust status.Replicas anymore. This error // is bound to be more interesting than the update failure. return nil, getErr diff --git a/pkg/controller/replication/conversion.go b/pkg/controller/replication/conversion.go index 3bb1df30a96..38d9972eaef 100644 --- a/pkg/controller/replication/conversion.go +++ b/pkg/controller/replication/conversion.go @@ -22,6 +22,7 @@ limitations under the License. package replication import ( + "context" "errors" "fmt" "time" @@ -215,7 +216,7 @@ func (c conversionClient) UpdateStatus(rs *apps.ReplicaSet) (*apps.ReplicaSet, e } func (c conversionClient) Get(name string, options metav1.GetOptions) (*apps.ReplicaSet, error) { - rc, err := c.ReplicationControllerInterface.Get(name, options) + rc, err := c.ReplicationControllerInterface.Get(context.TODO(), name, options) if err != nil { return nil, err } @@ -223,7 +224,7 @@ func (c conversionClient) Get(name string, options metav1.GetOptions) (*apps.Rep } func (c conversionClient) List(opts metav1.ListOptions) (*apps.ReplicaSetList, error) { - rcList, err := c.ReplicationControllerInterface.List(opts) + rcList, err := c.ReplicationControllerInterface.List(context.TODO(), opts) if err != nil { return nil, err } diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index 363e077850d..875ff8cba8e 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -17,6 +17,7 @@ limitations under the License. package resourcequota import ( + "context" "fmt" "reflect" "sync" @@ -355,7 +356,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ // there was a change observed by this controller that requires we update quota if dirty { - _, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(usage) + _, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(context.TODO(), usage) if err != nil { errors = append(errors, err) } diff --git a/pkg/controller/service/controller_test.go b/pkg/controller/service/controller_test.go index c061063f9a2..2c2b5023adb 100644 --- a/pkg/controller/service/controller_test.go +++ b/pkg/controller/service/controller_test.go @@ -310,7 +310,7 @@ func TestSyncLoadBalancerIfNeeded(t *testing.T) { controller, cloud, client := newController() cloud.Exists = tc.lbExists key := fmt.Sprintf("%s/%s", tc.service.Namespace, tc.service.Name) - if _, err := client.CoreV1().Services(tc.service.Namespace).Create(tc.service); err != nil { + if _, err := client.CoreV1().Services(tc.service.Namespace).Create(context.TODO(), tc.service); err != nil { t.Fatalf("Failed to prepare service %s for testing: %v", key, err) } client.ClearActions() @@ -603,7 +603,7 @@ func TestProcessServiceCreateOrUpdate(t *testing.T) { for _, tc := range testCases { newSvc := tc.updateFn(tc.svc) - if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil { + if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil { t.Fatalf("Failed to prepare service %s for testing: %v", tc.key, err) } obtErr := controller.processServiceCreateOrUpdate(newSvc, tc.key) @@ -1222,7 +1222,7 @@ func TestAddFinalizer(t *testing.T) { s := &Controller{ kubeClient: c, } - if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil { + if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil { t.Fatalf("Failed to prepare service for testing: %v", err) } if err := s.addFinalizer(tc.svc); err != nil { @@ -1276,7 +1276,7 @@ func TestRemoveFinalizer(t *testing.T) { s := &Controller{ kubeClient: c, } - if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil { + if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil { t.Fatalf("Failed to prepare service for testing: %v", err) } if err := s.removeFinalizer(tc.svc); err != nil { @@ -1376,7 +1376,7 @@ func TestPatchStatus(t *testing.T) { s := &Controller{ kubeClient: c, } - if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil { + if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil { t.Fatalf("Failed to prepare service for testing: %v", err) } if err := s.patchStatus(tc.svc, &tc.svc.Status.LoadBalancer, tc.newStatus); err != nil { diff --git a/pkg/controller/service/patch.go b/pkg/controller/service/patch.go index da714e133ca..a0ee158aa5a 100644 --- a/pkg/controller/service/patch.go +++ b/pkg/controller/service/patch.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "encoding/json" "fmt" @@ -37,7 +38,7 @@ func patch(c v1core.CoreV1Interface, oldSvc *v1.Service, newSvc *v1.Service) (*v return nil, err } - return c.Services(oldSvc.Namespace).Patch(oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status") + return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status") } func getPatchBytes(oldSvc *v1.Service, newSvc *v1.Service) ([]byte, error) { diff --git a/pkg/controller/service/patch_test.go b/pkg/controller/service/patch_test.go index df3e3ae175f..95b60941529 100644 --- a/pkg/controller/service/patch_test.go +++ b/pkg/controller/service/patch_test.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "reflect" "testing" @@ -44,7 +45,7 @@ func TestPatch(t *testing.T) { // Issue a separate update and verify patch doesn't fail after this. svcToUpdate := svcOrigin.DeepCopy() addAnnotations(svcToUpdate) - if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(svcToUpdate); err != nil { + if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(context.TODO(), svcToUpdate); err != nil { t.Fatalf("Failed to update service: %v", err) } diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller.go b/pkg/controller/serviceaccount/serviceaccounts_controller.go index 1967f44749b..f746a309682 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller.go @@ -17,6 +17,7 @@ limitations under the License. package serviceaccount import ( + "context" "fmt" "time" @@ -212,7 +213,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error { // TODO eliminate this once the fake client can handle creation without NS sa.Namespace = ns.Name - if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrors.IsAlreadyExists(err) { + if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(context.TODO(), &sa); err != nil && !apierrors.IsAlreadyExists(err) { // we can safely ignore terminating namespace errors if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) { createFailures = append(createFailures, err) diff --git a/pkg/controller/serviceaccount/tokengetter.go b/pkg/controller/serviceaccount/tokengetter.go index 25baf01d8d9..f2c3c307a89 100644 --- a/pkg/controller/serviceaccount/tokengetter.go +++ b/pkg/controller/serviceaccount/tokengetter.go @@ -17,6 +17,7 @@ limitations under the License. package serviceaccount import ( + "context" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -44,19 +45,19 @@ func (c clientGetter) GetServiceAccount(namespace, name string) (*v1.ServiceAcco if serviceAccount, err := c.serviceAccountLister.ServiceAccounts(namespace).Get(name); err == nil { return serviceAccount, nil } - return c.client.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + return c.client.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } func (c clientGetter) GetPod(namespace, name string) (*v1.Pod, error) { if pod, err := c.podLister.Pods(namespace).Get(name); err == nil { return pod, nil } - return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + return c.client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } func (c clientGetter) GetSecret(namespace, name string) (*v1.Secret, error) { if secret, err := c.secretLister.Secrets(namespace).Get(name); err == nil { return secret, nil } - return c.client.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) + return c.client.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index 62dab048695..1cf9621472e 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -18,6 +18,7 @@ package serviceaccount import ( "bytes" + "context" "fmt" "time" @@ -345,7 +346,7 @@ func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry if len(uid) > 0 { opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}} } - err := e.client.CoreV1().Secrets(ns).Delete(name, opts) + err := e.client.CoreV1().Secrets(ns).Delete(context.TODO(), name, opts) // NotFound doesn't need a retry (it's already been deleted) // Conflict doesn't need a retry (the UID precondition failed) if err == nil || apierrors.IsNotFound(err) || apierrors.IsConflict(err) { @@ -368,7 +369,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou // We don't want to update the cache's copy of the service account // so add the secret to a freshly retrieved copy of the service account serviceAccounts := e.client.CoreV1().ServiceAccounts(serviceAccount.Namespace) - liveServiceAccount, err := serviceAccounts.Get(serviceAccount.Name, metav1.GetOptions{}) + liveServiceAccount, err := serviceAccounts.Get(context.TODO(), serviceAccount.Name, metav1.GetOptions{}) if err != nil { // Retry if we cannot fetch the live service account (for a NotFound error, either the live lookup or our cache are stale) return true, err @@ -406,7 +407,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou } // Save the secret - createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(secret) + createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(context.TODO(), secret) if err != nil { // if the namespace is being terminated, create will fail no matter what if apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) { @@ -427,7 +428,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou // fetch the live service account if needed, and verify the UID matches and that we still need a token if liveServiceAccount == nil { - liveServiceAccount, err = serviceAccounts.Get(serviceAccount.Name, metav1.GetOptions{}) + liveServiceAccount, err = serviceAccounts.Get(context.TODO(), serviceAccount.Name, metav1.GetOptions{}) if err != nil { return err } @@ -448,7 +449,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou // Try to add a reference to the token liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, v1.ObjectReference{Name: secret.Name}) - if _, err := serviceAccounts.Update(liveServiceAccount); err != nil { + if _, err := serviceAccounts.Update(context.TODO(), liveServiceAccount); err != nil { return err } @@ -460,7 +461,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou // we weren't able to use the token, try to clean it up. klog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err) deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}} - if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil { + if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(context.TODO(), createdToken.Name, deleteOpts); deleteErr != nil { klog.Error(deleteErr) // if we fail, just log it } } @@ -519,7 +520,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou // We don't want to update the cache's copy of the secret // so add the token to a freshly retrieved copy of the secret secrets := e.client.CoreV1().Secrets(cachedSecret.Namespace) - liveSecret, err := secrets.Get(cachedSecret.Name, metav1.GetOptions{}) + liveSecret, err := secrets.Get(context.TODO(), cachedSecret.Name, metav1.GetOptions{}) if err != nil { // Retry for any error other than a NotFound return !apierrors.IsNotFound(err), err @@ -566,7 +567,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou liveSecret.Annotations[v1.ServiceAccountUIDKey] = string(serviceAccount.UID) // Save the secret - _, err = secrets.Update(liveSecret) + _, err = secrets.Update(context.TODO(), liveSecret) if apierrors.IsConflict(err) || apierrors.IsNotFound(err) { // if we got a Conflict error, the secret was updated by someone else, and we'll get an update notification later // if we got a NotFound error, the secret no longer exists, and we don't need to populate a token @@ -583,7 +584,7 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri // We don't want to update the cache's copy of the service account // so remove the secret from a freshly retrieved copy of the service account serviceAccounts := e.client.CoreV1().ServiceAccounts(saNamespace) - serviceAccount, err := serviceAccounts.Get(saName, metav1.GetOptions{}) + serviceAccount, err := serviceAccounts.Get(context.TODO(), saName, metav1.GetOptions{}) // Ignore NotFound errors when attempting to remove a reference if apierrors.IsNotFound(err) { return nil @@ -610,7 +611,7 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri } } serviceAccount.Secrets = secrets - _, err = serviceAccounts.Update(serviceAccount) + _, err = serviceAccounts.Update(context.TODO(), serviceAccount) // Ignore NotFound errors when attempting to remove a reference if apierrors.IsNotFound(err) { return nil @@ -636,7 +637,7 @@ func (e *TokensController) getServiceAccount(ns string, name string, uid types.U } // Live lookup - sa, err = e.client.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + sa, err = e.client.CoreV1().ServiceAccounts(ns).Get(context.TODO(), name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil, nil } @@ -672,7 +673,7 @@ func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetc } // Live lookup - secret, err := e.client.CoreV1().Secrets(ns).Get(name, metav1.GetOptions{}) + secret, err := e.client.CoreV1().Secrets(ns).Get(context.TODO(), name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil, nil } diff --git a/pkg/controller/statefulset/stateful_pod_control.go b/pkg/controller/statefulset/stateful_pod_control.go index 403007ed36f..5ec41d4b9b3 100644 --- a/pkg/controller/statefulset/stateful_pod_control.go +++ b/pkg/controller/statefulset/stateful_pod_control.go @@ -17,6 +17,7 @@ limitations under the License. package statefulset import ( + "context" "fmt" "strings" @@ -77,7 +78,7 @@ func (spc *realStatefulPodControl) CreateStatefulPod(set *apps.StatefulSet, pod return err } // If we created the PVCs attempt to create the Pod - _, err := spc.client.CoreV1().Pods(set.Namespace).Create(pod) + _, err := spc.client.CoreV1().Pods(set.Namespace).Create(context.TODO(), pod) // sink already exists errors if apierrors.IsAlreadyExists(err) { return err @@ -113,7 +114,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod attemptedUpdate = true // commit the update, retrying on conflicts - _, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(pod) + _, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(context.TODO(), pod) if updateErr == nil { return nil } @@ -134,7 +135,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod } func (spc *realStatefulPodControl) DeleteStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error { - err := spc.client.CoreV1().Pods(set.Namespace).Delete(pod.Name, nil) + err := spc.client.CoreV1().Pods(set.Namespace).Delete(context.TODO(), pod.Name, nil) spc.recordPodEvent("delete", set, pod, err) return err } @@ -182,7 +183,7 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.Statef _, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name) switch { case apierrors.IsNotFound(err): - _, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(&claim) + _, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), &claim) if err != nil { errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err)) } diff --git a/pkg/controller/statefulset/stateful_set.go b/pkg/controller/statefulset/stateful_set.go index ac01f76791d..37724e855ef 100644 --- a/pkg/controller/statefulset/stateful_set.go +++ b/pkg/controller/statefulset/stateful_set.go @@ -17,6 +17,7 @@ limitations under the License. package statefulset import ( + "context" "fmt" "reflect" "time" @@ -291,7 +292,7 @@ func (ssc *StatefulSetController) getPodsForStatefulSet(set *apps.StatefulSet, s // If any adoptions are attempted, we should first recheck for deletion with // an uncached quorum read sometime after listing Pods (see #42639). canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := ssc.kubeClient.AppsV1().StatefulSets(set.Namespace).Get(set.Name, metav1.GetOptions{}) + fresh, err := ssc.kubeClient.AppsV1().StatefulSets(set.Namespace).Get(context.TODO(), set.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -318,7 +319,7 @@ func (ssc *StatefulSetController) adoptOrphanRevisions(set *apps.StatefulSet) er } } if len(orphanRevisions) > 0 { - fresh, err := ssc.kubeClient.AppsV1().StatefulSets(set.Namespace).Get(set.Name, metav1.GetOptions{}) + fresh, err := ssc.kubeClient.AppsV1().StatefulSets(set.Namespace).Get(context.TODO(), set.Name, metav1.GetOptions{}) if err != nil { return err } diff --git a/pkg/controller/statefulset/stateful_set_status_updater.go b/pkg/controller/statefulset/stateful_set_status_updater.go index 78b22dbbb53..f7f99e77a53 100644 --- a/pkg/controller/statefulset/stateful_set_status_updater.go +++ b/pkg/controller/statefulset/stateful_set_status_updater.go @@ -17,6 +17,7 @@ limitations under the License. package statefulset import ( + "context" "fmt" apps "k8s.io/api/apps/v1" @@ -53,7 +54,7 @@ func (ssu *realStatefulSetStatusUpdater) UpdateStatefulSetStatus( // don't wait due to limited number of clients, but backoff after the default number of steps return retry.RetryOnConflict(retry.DefaultRetry, func() error { set.Status = *status - _, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(set) + _, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(context.TODO(), set) if updateErr == nil { return nil } diff --git a/pkg/controller/ttl/ttl_controller.go b/pkg/controller/ttl/ttl_controller.go index dbd4c651b32..58bc3e1c194 100644 --- a/pkg/controller/ttl/ttl_controller.go +++ b/pkg/controller/ttl/ttl_controller.go @@ -27,6 +27,7 @@ limitations under the License. package ttl import ( + "context" "fmt" "math" "strconv" @@ -263,7 +264,7 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey if err != nil { return err } - _, err = ttlc.kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes) + _, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes) if err != nil { klog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err) return err diff --git a/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go b/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go index c4e38c2c28a..c9370a43cdb 100644 --- a/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go +++ b/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go @@ -17,6 +17,7 @@ limitations under the License. package ttlafterfinished import ( + "context" "fmt" "time" @@ -214,7 +215,7 @@ func (tc *Controller) processJob(key string) error { // Before deleting the Job, do a final sanity check. // If TTL is modified before we do this check, we cannot be sure if the TTL truly expires. // The latest Job may have a different UID, but it's fine because the checks will be run again. - fresh, err := tc.client.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{}) + fresh, err := tc.client.BatchV1().Jobs(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if errors.IsNotFound(err) { return nil } @@ -234,7 +235,7 @@ func (tc *Controller) processJob(key string) error { Preconditions: &metav1.Preconditions{UID: &fresh.UID}, } klog.V(4).Infof("Cleaning up Job %s/%s", namespace, name) - return tc.client.BatchV1().Jobs(fresh.Namespace).Delete(fresh.Name, options) + return tc.client.BatchV1().Jobs(fresh.Namespace).Delete(context.TODO(), fresh.Name, options) } // processTTL checks whether a given Job's TTL has expired, and add it to the queue after the TTL is expected to expire diff --git a/pkg/controller/util/node/controller_utils.go b/pkg/controller/util/node/controller_utils.go index f77838a48eb..97282b79db2 100644 --- a/pkg/controller/util/node/controller_utils.go +++ b/pkg/controller/util/node/controller_utils.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "strings" @@ -79,7 +80,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record. klog.V(2).Infof("Starting deletion of pod %v/%v", pod.Namespace, pod.Name) recorder.Eventf(pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) - if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { + if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, nil); err != nil { if apierrors.IsNotFound(err) { // NotFound error means that pod was already deleted. // There is nothing left to do with this pod. @@ -109,7 +110,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa var updatedPod *v1.Pod var err error - if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod); err != nil { + if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod); err != nil { return nil, err } return updatedPod, nil @@ -136,7 +137,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s break } klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name) - _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) + _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod) if err != nil { if apierrors.IsNotFound(err) { // NotFound error means that pod was already deleted. diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go index d7bfe34d848..22236354f35 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package attachdetach import ( + "context" "fmt" "testing" "time" @@ -160,7 +161,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 stopCh := make(chan struct{}) - pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{}) + pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: %v", err) } @@ -170,7 +171,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 podInformer.GetIndexer().Add(&podToAdd) podsNum++ } - nodes, err := fakeKubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := fakeKubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: %v", err) } @@ -180,7 +181,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 nodesNum++ } - csiNodes, err := fakeKubeClient.StorageV1().CSINodes().List(metav1.ListOptions{}) + csiNodes, err := fakeKubeClient.StorageV1().CSINodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: %v", err) } @@ -269,7 +270,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 for _, newPod := range extraPods1 { // Add a new pod between ASW and DSW ppoulators - _, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(newPod) + _, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod) if err != nil { t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err) } @@ -286,7 +287,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 for _, newPod := range extraPods2 { // Add a new pod between DSW ppoulator and reconciler run - _, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(newPod) + _, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod) if err != nil { t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err) } diff --git a/pkg/controller/volume/expand/cache/volume_resize_map.go b/pkg/controller/volume/expand/cache/volume_resize_map.go index 871dec58a46..7e2a2342884 100644 --- a/pkg/controller/volume/expand/cache/volume_resize_map.go +++ b/pkg/controller/volume/expand/cache/volume_resize_map.go @@ -17,6 +17,7 @@ limitations under the License. package cache import ( + "context" "encoding/json" "fmt" "sync" @@ -202,7 +203,7 @@ func (resizeMap *volumeResizeMap) UpdatePVSize(pvcr *PVCWithResizeRequest, newSi return fmt.Errorf("Error Creating two way merge patch for PV %q with error : %v", pvClone.Name, err) } - _, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(pvClone.Name, commontypes.StrategicMergePatchType, patchBytes) + _, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, commontypes.StrategicMergePatchType, patchBytes) if updateErr != nil { klog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr) diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index d96bc99ed4a..d8ab9396e0d 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -17,6 +17,7 @@ limitations under the License. package persistentvolume import ( + "context" "fmt" "reflect" "strings" @@ -559,7 +560,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) } found = !apierrors.IsNotFound(err) if !found { - obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name, metav1.GetOptions{}) + obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(context.TODO(), volume.Spec.ClaimRef.Name, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return err } @@ -753,7 +754,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo return claim, nil } - newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(context.TODO(), claimClone) if err != nil { klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err) return newClaim, err @@ -809,7 +810,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV volumeClone.Status.Phase = phase volumeClone.Status.Message = message - newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), volumeClone) if err != nil { klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err) return newVol, err @@ -871,7 +872,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, updateCache bool) (*v1.PersistentVolume, error) { claimKey := claimrefToClaimKey(volumeClone.Spec.ClaimRef) klog.V(2).Infof("claim %q bound to volume %q", claimKey, volumeClone.Name) - newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone) if err != nil { klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimKey, err) return newVol, err @@ -923,7 +924,7 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo if dirty { klog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) - newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone) if err != nil { klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err) return newClaim, err @@ -1010,7 +1011,7 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume volumeClone.Spec.ClaimRef.UID = "" } - newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone) if err != nil { klog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err) return err @@ -1081,7 +1082,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis // This method may have been waiting for a volume lock for some time. // Previous recycleVolumeOperation might just have saved an updated version, // so read current volume state now. - newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) + newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), volume.Name, metav1.GetOptions{}) if err != nil { klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) return @@ -1177,7 +1178,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist // This method may have been waiting for a volume lock for some time. // Previous deleteVolumeOperation might just have saved an updated version, so // read current volume state now. - newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) + newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), volume.Name, metav1.GetOptions{}) if err != nil { klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) return "", nil @@ -1221,7 +1222,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist klog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name) // Delete the volume - if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(volume.Name, nil); err != nil { + if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(context.TODO(), volume.Name, nil); err != nil { // Oops, could not delete the volume and therefore the controller will // try to delete the volume again on next update. We _could_ maintain a // cache of "recently deleted volumes" and avoid unnecessary deletion, @@ -1415,7 +1416,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( // yet. pvName := ctrl.getProvisionedVolumeNameForClaim(claim) - volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { klog.V(3).Infof("error reading persistent volume %q: %v", pvName, err) return pluginName, err @@ -1514,7 +1515,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) var newVol *v1.PersistentVolume - if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrors.IsAlreadyExists(err) { + if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), volume); err == nil || apierrors.IsAlreadyExists(err) { // Save succeeded. if err != nil { klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim)) @@ -1630,7 +1631,7 @@ func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.Persist newClaim := claim.DeepCopy() delete(newClaim.Annotations, pvutil.AnnSelectedNode) // Try to update the PVC object - if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(newClaim); err != nil { + if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(context.TODO(), newClaim); err != nil { klog.V(4).Infof("Failed to delete annotation 'pvutil.AnnSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err) return } diff --git a/pkg/controller/volume/persistentvolume/pv_controller_base.go b/pkg/controller/volume/persistentvolume/pv_controller_base.go index 41250638e79..44bd8aca2af 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller_base.go +++ b/pkg/controller/volume/persistentvolume/pv_controller_base.go @@ -17,6 +17,7 @@ limitations under the License. package persistentvolume import ( + "context" "fmt" "strconv" "time" @@ -321,7 +322,7 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(claim *v if !modified { return claimClone, nil } - newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone) if err != nil { return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err) } @@ -338,7 +339,7 @@ func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotations(volume if !modified { return volumeClone, nil } - newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone) if err != nil { return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err) } @@ -545,7 +546,7 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent claimClone := claim.DeepCopy() metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, pvutil.AnnStorageProvisioner, provisionerName) updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, pvutil.AnnStorageProvisioner) - newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone) if err != nil { return newClaim, err } diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go index 94d4ef6b64f..42e33544ddb 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go @@ -17,6 +17,7 @@ limitations under the License. package pvcprotection import ( + "context" "fmt" "time" @@ -188,7 +189,7 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error { } claimClone := pvc.DeepCopy() claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer) - _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone) + _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone) if err != nil { klog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name, err) return err @@ -200,7 +201,7 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error { func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error { claimClone := pvc.DeepCopy() claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil) - _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone) + _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone) if err != nil { klog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err) return err @@ -248,7 +249,7 @@ func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) { func (c *Controller) askAPIServer(pvc *v1.PersistentVolumeClaim) (bool, error) { klog.V(4).Infof("Looking for Pods using PVC %s/%s with a live list", pvc.Namespace, pvc.Name) - podsList, err := c.client.CoreV1().Pods(pvc.Namespace).List(metav1.ListOptions{}) + podsList, err := c.client.CoreV1().Pods(pvc.Namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("live list of pods failed: %s", err.Error()) } diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller.go b/pkg/controller/volume/pvprotection/pv_protection_controller.go index b2da80c0a73..6b1844f502c 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller.go @@ -17,6 +17,7 @@ limitations under the License. package pvprotection import ( + "context" "fmt" "time" @@ -161,7 +162,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error { } pvClone := pv.DeepCopy() pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer) - _, err := c.client.CoreV1().PersistentVolumes().Update(pvClone) + _, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone) if err != nil { klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err) return err @@ -173,7 +174,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error { func (c *Controller) removeFinalizer(pv *v1.PersistentVolume) error { pvClone := pv.DeepCopy() pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil) - _, err := c.client.CoreV1().PersistentVolumes().Update(pvClone) + _, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone) if err != nil { klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err) return err diff --git a/pkg/controller/volume/scheduling/scheduler_binder.go b/pkg/controller/volume/scheduling/scheduler_binder.go index 21bd947e134..6a57f272254 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder.go +++ b/pkg/controller/volume/scheduling/scheduler_binder.go @@ -17,6 +17,7 @@ limitations under the License. package scheduling import ( + "context" "fmt" "sort" "strings" @@ -423,7 +424,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl // TODO: does it hurt if we make an api call and nothing needs to be updated? claimKey := claimToClaimKey(binding.pvc) klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name) - newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(binding.pv) + newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv) if err != nil { klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err) return err @@ -438,7 +439,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl // PV controller is expect to signal back by removing related annotations if actual provisioning fails for i, claim = range claimsToProvision { klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim)) - newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim) + newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim) if err != nil { return err } diff --git a/pkg/controller/volume/scheduling/scheduler_binder_test.go b/pkg/controller/volume/scheduling/scheduler_binder_test.go index 7ca5082e92c..2b672416981 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder_test.go +++ b/pkg/controller/volume/scheduling/scheduler_binder_test.go @@ -295,7 +295,7 @@ func (env *testEnv) initVolumes(cachedPVs []*v1.PersistentVolume, apiPVs []*v1.P func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, waitCache bool) { for _, pv := range pvs { - if _, err := env.client.CoreV1().PersistentVolumes().Update(pv); err != nil { + if _, err := env.client.CoreV1().PersistentVolumes().Update(context.TODO(), pv); err != nil { t.Fatalf("failed to update PV %q", pv.Name) } } @@ -321,7 +321,7 @@ func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, wait func (env *testEnv) updateClaims(t *testing.T, pvcs []*v1.PersistentVolumeClaim, waitCache bool) { for _, pvc := range pvcs { - if _, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(pvc); err != nil { + if _, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), pvc); err != nil { t.Fatalf("failed to update PVC %q", getPVCName(pvc)) } } @@ -1769,7 +1769,7 @@ func TestBindPodVolumes(t *testing.T) { newPVC := pvc.DeepCopy() newPVC.Spec.VolumeName = pv.Name metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes") - if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(newPVC); err != nil { + if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil { t.Errorf("failed to update PVC %q: %v", newPVC.Name, err) } }, @@ -1780,20 +1780,20 @@ func TestBindPodVolumes(t *testing.T) { delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) { pvc := pvcs[0] // Update PVC to be fully bound to PV - newPVC, err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + newPVC, err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("failed to get PVC %q: %v", pvc.Name, err) return } dynamicPV := makeTestPV("dynamic-pv", "node1", "1G", "1", newPVC, waitClass) - dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(dynamicPV) + dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(context.TODO(), dynamicPV) if err != nil { t.Errorf("failed to create PV %q: %v", dynamicPV.Name, err) return } newPVC.Spec.VolumeName = dynamicPV.Name metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes") - if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(newPVC); err != nil { + if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil { t.Errorf("failed to update PVC %q: %v", newPVC.Name, err) } }, @@ -1846,7 +1846,7 @@ func TestBindPodVolumes(t *testing.T) { delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) { pvc := pvcs[0] // Delete PVC will fail check - if err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, &metav1.DeleteOptions{}); err != nil { + if err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, &metav1.DeleteOptions{}); err != nil { t.Errorf("failed to delete PVC %q: %v", pvc.Name, err) } }, @@ -1869,7 +1869,7 @@ func TestBindPodVolumes(t *testing.T) { newPVC := pvcs[0].DeepCopy() newPVC.Spec.VolumeName = pvNode2.Name metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes") - if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(newPVC); err != nil { + if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil { t.Errorf("failed to update PVC %q: %v", newPVC.Name, err) } }, @@ -1904,13 +1904,13 @@ func TestBindPodVolumes(t *testing.T) { // Before Execute if scenario.apiPV != nil { - _, err := testEnv.client.CoreV1().PersistentVolumes().Update(scenario.apiPV) + _, err := testEnv.client.CoreV1().PersistentVolumes().Update(context.TODO(), scenario.apiPV) if err != nil { t.Fatalf("failed to update PV %q", scenario.apiPV.Name) } } if scenario.apiPVC != nil { - _, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(scenario.apiPVC) + _, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(context.TODO(), scenario.apiPVC) if err != nil { t.Fatalf("failed to update PVC %q", getPVCName(scenario.apiPVC)) } diff --git a/pkg/kubectl/cmd/auth/cani.go b/pkg/kubectl/cmd/auth/cani.go index c08d989331a..06f48e7c7cd 100644 --- a/pkg/kubectl/cmd/auth/cani.go +++ b/pkg/kubectl/cmd/auth/cani.go @@ -17,6 +17,7 @@ limitations under the License. package auth import ( + "context" "errors" "fmt" "io" @@ -222,7 +223,7 @@ func (o *CanIOptions) RunAccessList() error { Namespace: o.Namespace, }, } - response, err := o.AuthClient.SelfSubjectRulesReviews().Create(sar) + response, err := o.AuthClient.SelfSubjectRulesReviews().Create(context.TODO(), sar) if err != nil { return err } @@ -257,7 +258,7 @@ func (o *CanIOptions) RunAccessCheck() (bool, error) { } } - response, err := o.AuthClient.SelfSubjectAccessReviews().Create(sar) + response, err := o.AuthClient.SelfSubjectAccessReviews().Create(context.TODO(), sar) if err != nil { return false, err } diff --git a/pkg/kubelet/configmap/configmap_manager.go b/pkg/kubelet/configmap/configmap_manager.go index 132f60c18f2..76b204f6fb6 100644 --- a/pkg/kubelet/configmap/configmap_manager.go +++ b/pkg/kubelet/configmap/configmap_manager.go @@ -17,6 +17,7 @@ limitations under the License. package configmap import ( + "context" "fmt" "time" @@ -61,7 +62,7 @@ func NewSimpleConfigMapManager(kubeClient clientset.Interface) Manager { } func (s *simpleConfigMapManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) { - return s.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + return s.kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } func (s *simpleConfigMapManager) RegisterPod(pod *v1.Pod) { @@ -120,7 +121,7 @@ const ( // value in cache; otherwise it is just fetched from cache func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager { getConfigMap := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) { - return kubeClient.CoreV1().ConfigMaps(namespace).Get(name, opts) + return kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, opts) } configMapStore := manager.NewObjectStore(getConfigMap, clock.RealClock{}, getTTL, defaultTTL) return &configMapManager{ @@ -136,10 +137,10 @@ func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.G // - every GetObject() returns a value from local cache propagated via watches func NewWatchingConfigMapManager(kubeClient clientset.Interface) Manager { listConfigMap := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) { - return kubeClient.CoreV1().ConfigMaps(namespace).List(opts) + return kubeClient.CoreV1().ConfigMaps(namespace).List(context.TODO(), opts) } watchConfigMap := func(namespace string, opts metav1.ListOptions) (watch.Interface, error) { - return kubeClient.CoreV1().ConfigMaps(namespace).Watch(opts) + return kubeClient.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), opts) } newConfigMap := func() runtime.Object { return &v1.ConfigMap{} diff --git a/pkg/kubelet/configmap/configmap_manager_test.go b/pkg/kubelet/configmap/configmap_manager_test.go index 2b8433c0e43..eb14bf60a97 100644 --- a/pkg/kubelet/configmap/configmap_manager_test.go +++ b/pkg/kubelet/configmap/configmap_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package configmap import ( + "context" "fmt" "strings" "testing" @@ -49,7 +50,7 @@ func noObjectTTL() (time.Duration, bool) { func getConfigMap(fakeClient clientset.Interface) manager.GetObjectFunc { return func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) { - return fakeClient.CoreV1().ConfigMaps(namespace).Get(name, opts) + return fakeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, opts) } } diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 5ed554c0fe8..b72e3a55e65 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -83,7 +83,7 @@ func (kl *Kubelet) registerWithAPIServer() { // value of the annotation for controller-managed attach-detach of attachable // persistent volumes for the node. func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { - _, err := kl.kubeClient.CoreV1().Nodes().Create(node) + _, err := kl.kubeClient.CoreV1().Nodes().Create(context.TODO(), node) if err == nil { return true } @@ -93,7 +93,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { return false } - existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(string(kl.nodeName), metav1.GetOptions{}) + existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), metav1.GetOptions{}) if err != nil { klog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err) return false @@ -420,7 +420,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { if tryNumber == 0 { util.FromApiserverCache(&opts) } - node, err := kl.heartbeatClient.CoreV1().Nodes().Get(string(kl.nodeName), opts) + node, err := kl.heartbeatClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), opts) if err != nil { return fmt.Errorf("error getting node %q: %v", kl.nodeName, err) } diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index 8bb0bab367b..dc0b1c2bf6c 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -17,6 +17,7 @@ limitations under the License. package kubelet import ( + "context" "encoding/json" "fmt" "net" @@ -678,7 +679,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { require.True(t, actions[1].Matches("patch", "nodes")) require.Equal(t, actions[1].GetSubresource(), "status") - updatedNode, err := kubeClient.CoreV1().Nodes().Get(testKubeletHostname, metav1.GetOptions{}) + updatedNode, err := kubeClient.CoreV1().Nodes().Get(context.TODO(), testKubeletHostname, metav1.GetOptions{}) require.NoError(t, err, "can't apply node status patch") for i, cond := range updatedNode.Status.Conditions { @@ -2263,7 +2264,7 @@ func TestUpdateNodeAddresses(t *testing.T) { }, } - _, err := kubeClient.CoreV1().Nodes().Update(oldNode) + _, err := kubeClient.CoreV1().Nodes().Update(context.TODO(), oldNode) assert.NoError(t, err) kubelet.setNodeStatusFuncs = []func(*v1.Node) error{ func(node *v1.Node) error { diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index bb109753e16..609303ac9d2 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -1779,13 +1779,13 @@ func hasHostNamespace(pod *v1.Pod) bool { func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { for _, volume := range pod.Spec.Volumes { if volume.PersistentVolumeClaim != nil { - pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) + pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) if err != nil { klog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err) continue } if pvc != nil { - referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { klog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err) continue diff --git a/pkg/kubelet/kubeletconfig/checkpoint/download.go b/pkg/kubelet/kubeletconfig/checkpoint/download.go index 8f023051b17..aa9c6746f9d 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/download.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/download.go @@ -17,6 +17,7 @@ limitations under the License. package checkpoint import ( + "context" "fmt" "math/rand" "time" @@ -191,7 +192,7 @@ func (r *remoteConfigMap) Download(client clientset.Interface, store cache.Store // if we didn't find the ConfigMap in the in-memory store, download it from the API server if cm == nil { utillog.Infof("attempting to download %s", r.APIPath()) - cm, err = client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(r.source.ConfigMap.Name, metav1.GetOptions{}) + cm, err = client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(context.TODO(), r.source.ConfigMap.Name, metav1.GetOptions{}) if err != nil { return nil, status.DownloadError, fmt.Errorf("%s, error: %v", status.DownloadError, err) } diff --git a/pkg/kubelet/kubeletconfig/configsync.go b/pkg/kubelet/kubeletconfig/configsync.go index f6663a58748..7e28c7e0273 100644 --- a/pkg/kubelet/kubeletconfig/configsync.go +++ b/pkg/kubelet/kubeletconfig/configsync.go @@ -17,6 +17,7 @@ limitations under the License. package kubeletconfig import ( + "context" "fmt" "os" "time" @@ -198,7 +199,7 @@ func restartForNewConfig(eventClient v1core.EventsGetter, nodeName string, sourc // because the event recorder won't flush its queue before we exit (we'd lose the event) event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message) klog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) - if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(event); err != nil { + if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(context.TODO(), event); err != nil { utillog.Errorf("failed to send event, error: %v", err) } utillog.Infof(message) diff --git a/pkg/kubelet/kubeletconfig/status/status.go b/pkg/kubelet/kubeletconfig/status/status.go index d1fdfeb8c9b..ad2cf898c86 100644 --- a/pkg/kubelet/kubeletconfig/status/status.go +++ b/pkg/kubelet/kubeletconfig/status/status.go @@ -17,6 +17,7 @@ limitations under the License. package status import ( + "context" "fmt" "sync" @@ -158,7 +159,7 @@ func (s *nodeConfigStatus) Sync(client clientset.Interface, nodeName string) { }() // get the Node so we can check the current status - oldNode, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("could not get Node %q, will not sync status, error: %v", nodeName, err) return diff --git a/pkg/kubelet/nodelease/controller.go b/pkg/kubelet/nodelease/controller.go index 073b7d2837c..ce1594c74d4 100644 --- a/pkg/kubelet/nodelease/controller.go +++ b/pkg/kubelet/nodelease/controller.go @@ -17,6 +17,7 @@ limitations under the License. package nodelease import ( + "context" "fmt" "time" @@ -142,7 +143,7 @@ func (c *controller) backoffEnsureLease() (*coordinationv1.Lease, bool) { // ensureLease creates the lease if it does not exist. Returns the lease and // a bool (true if this call created the lease), or any error that occurs. func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) { - lease, err := c.leaseClient.Get(c.holderIdentity, metav1.GetOptions{}) + lease, err := c.leaseClient.Get(context.TODO(), c.holderIdentity, metav1.GetOptions{}) if apierrors.IsNotFound(err) { // lease does not exist, create it. leaseToCreate := c.newLease(nil) @@ -152,7 +153,7 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) { // not create it this time - we will retry in the next iteration. return nil, false, nil } - lease, err := c.leaseClient.Create(leaseToCreate) + lease, err := c.leaseClient.Create(context.TODO(), leaseToCreate) if err != nil { return nil, false, err } @@ -169,7 +170,7 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) { // call this once you're sure the lease has been created func (c *controller) retryUpdateLease(base *coordinationv1.Lease) error { for i := 0; i < maxUpdateRetries; i++ { - lease, err := c.leaseClient.Update(c.newLease(base)) + lease, err := c.leaseClient.Update(context.TODO(), c.newLease(base)) if err == nil { c.latestLease = lease return nil @@ -214,7 +215,7 @@ func (c *controller) newLease(base *coordinationv1.Lease) *coordinationv1.Lease // the connection between master and node is not ready yet. So try to set // owner reference every time when renewing the lease, until successful. if len(lease.OwnerReferences) == 0 { - if node, err := c.client.CoreV1().Nodes().Get(c.holderIdentity, metav1.GetOptions{}); err == nil { + if node, err := c.client.CoreV1().Nodes().Get(context.TODO(), c.holderIdentity, metav1.GetOptions{}); err == nil { lease.OwnerReferences = []metav1.OwnerReference{ { APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, diff --git a/pkg/kubelet/pod/mirror_client.go b/pkg/kubelet/pod/mirror_client.go index fe7cd28c255..68487cf5800 100644 --- a/pkg/kubelet/pod/mirror_client.go +++ b/pkg/kubelet/pod/mirror_client.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -95,7 +96,7 @@ func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error { Controller: &controller, }} - apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(©Pod) + apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(context.TODO(), ©Pod) if err != nil && apierrors.IsAlreadyExists(err) { // Check if the existing pod is the same as the pod we want to create. if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash { @@ -123,7 +124,7 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string, uid *types.UID) } klog.V(2).Infof("Deleting a mirror pod %q (uid %#v)", podFullName, uid) var GracePeriodSeconds int64 - if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil { + if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, &metav1.DeleteOptions{GracePeriodSeconds: &GracePeriodSeconds, Preconditions: &metav1.Preconditions{UID: uid}}); err != nil { // Unfortunately, there's no generic error for failing a precondition if !(apierrors.IsNotFound(err) || apierrors.IsConflict(err)) { // We should return the error here, but historically this routine does diff --git a/pkg/kubelet/pod/mirror_client_test.go b/pkg/kubelet/pod/mirror_client_test.go index 00afe12bbff..f3d74cbbf7e 100644 --- a/pkg/kubelet/pod/mirror_client_test.go +++ b/pkg/kubelet/pod/mirror_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "errors" "testing" @@ -125,7 +126,7 @@ func TestCreateMirrorPod(t *testing.T) { return } - createdPod, err := clientset.CoreV1().Pods(testPodNS).Get(testPodName, metav1.GetOptions{}) + createdPod, err := clientset.CoreV1().Pods(testPodNS).Get(context.TODO(), testPodName, metav1.GetOptions{}) require.NoError(t, err) // Validate created pod diff --git a/pkg/kubelet/secret/secret_manager.go b/pkg/kubelet/secret/secret_manager.go index 56843ed9107..193afa51c88 100644 --- a/pkg/kubelet/secret/secret_manager.go +++ b/pkg/kubelet/secret/secret_manager.go @@ -17,6 +17,7 @@ limitations under the License. package secret import ( + "context" "fmt" "time" @@ -62,7 +63,7 @@ func NewSimpleSecretManager(kubeClient clientset.Interface) Manager { } func (s *simpleSecretManager) GetSecret(namespace, name string) (*v1.Secret, error) { - return s.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) + return s.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } func (s *simpleSecretManager) RegisterPod(pod *v1.Pod) { @@ -121,7 +122,7 @@ const ( // value in cache; otherwise it is just fetched from cache func NewCachingSecretManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager { getSecret := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) { - return kubeClient.CoreV1().Secrets(namespace).Get(name, opts) + return kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, opts) } secretStore := manager.NewObjectStore(getSecret, clock.RealClock{}, getTTL, defaultTTL) return &secretManager{ @@ -137,10 +138,10 @@ func NewCachingSecretManager(kubeClient clientset.Interface, getTTL manager.GetO // - every GetObject() returns a value from local cache propagated via watches func NewWatchingSecretManager(kubeClient clientset.Interface) Manager { listSecret := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) { - return kubeClient.CoreV1().Secrets(namespace).List(opts) + return kubeClient.CoreV1().Secrets(namespace).List(context.TODO(), opts) } watchSecret := func(namespace string, opts metav1.ListOptions) (watch.Interface, error) { - return kubeClient.CoreV1().Secrets(namespace).Watch(opts) + return kubeClient.CoreV1().Secrets(namespace).Watch(context.TODO(), opts) } newSecret := func() runtime.Object { return &v1.Secret{} diff --git a/pkg/kubelet/secret/secret_manager_test.go b/pkg/kubelet/secret/secret_manager_test.go index 0dba94a0262..82072e4595a 100644 --- a/pkg/kubelet/secret/secret_manager_test.go +++ b/pkg/kubelet/secret/secret_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package secret import ( + "context" "fmt" "strings" "testing" @@ -49,7 +50,7 @@ func noObjectTTL() (time.Duration, bool) { func getSecret(fakeClient clientset.Interface) manager.GetObjectFunc { return func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) { - return fakeClient.CoreV1().Secrets(namespace).Get(name, opts) + return fakeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, opts) } } diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index 1cb34e06a41..d1b4f951033 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -17,6 +17,7 @@ limitations under the License. package status import ( + "context" "fmt" "sort" "sync" @@ -519,7 +520,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { } // TODO: make me easier to express from client code - pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{}) + pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(context.TODO(), status.podName, metav1.GetOptions{}) if errors.IsNotFound(err) { klog.V(3).Infof("Pod %q does not exist on the server", format.PodDesc(status.podName, status.podNamespace, uid)) // If the Pod is deleted the status will be cleared in @@ -556,7 +557,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { deleteOptions := metav1.NewDeleteOptions(0) // Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace. deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID)) - err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) + err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, deleteOptions) if err != nil { klog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err) return diff --git a/pkg/kubelet/token/token_manager.go b/pkg/kubelet/token/token_manager.go index 6c2a98713c2..f14d5fe43cc 100644 --- a/pkg/kubelet/token/token_manager.go +++ b/pkg/kubelet/token/token_manager.go @@ -19,6 +19,7 @@ limitations under the License. package token import ( + "context" "errors" "fmt" "sync" @@ -64,7 +65,7 @@ func NewManager(c clientset.Interface) *Manager { if c == nil { return nil, errors.New("cannot use TokenManager when kubelet is in standalone mode") } - tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(name, tr) + tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr) if apierrors.IsNotFound(err) && !tokenRequestsSupported() { return nil, fmt.Errorf("the API server does not have TokenRequest endpoints enabled") } diff --git a/pkg/kubelet/util/manager/cache_based_manager_test.go b/pkg/kubelet/util/manager/cache_based_manager_test.go index fb88c003074..20c292661d8 100644 --- a/pkg/kubelet/util/manager/cache_based_manager_test.go +++ b/pkg/kubelet/util/manager/cache_based_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package manager import ( + "context" "fmt" "reflect" "strings" @@ -56,7 +57,7 @@ func noObjectTTL() (time.Duration, bool) { func getSecret(fakeClient clientset.Interface) GetObjectFunc { return func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) { - return fakeClient.CoreV1().Secrets(namespace).Get(name, opts) + return fakeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, opts) } } diff --git a/pkg/kubelet/util/manager/watch_based_manager_test.go b/pkg/kubelet/util/manager/watch_based_manager_test.go index fc27a7374d6..b06c7a39605 100644 --- a/pkg/kubelet/util/manager/watch_based_manager_test.go +++ b/pkg/kubelet/util/manager/watch_based_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package manager import ( + "context" "fmt" "strings" "testing" @@ -40,13 +41,13 @@ import ( func listSecret(fakeClient clientset.Interface) listObjectFunc { return func(namespace string, opts metav1.ListOptions) (runtime.Object, error) { - return fakeClient.CoreV1().Secrets(namespace).List(opts) + return fakeClient.CoreV1().Secrets(namespace).List(context.TODO(), opts) } } func watchSecret(fakeClient clientset.Interface) watchObjectFunc { return func(namespace string, opts metav1.ListOptions) (watch.Interface, error) { - return fakeClient.CoreV1().Secrets(namespace).Watch(opts) + return fakeClient.CoreV1().Secrets(namespace).Watch(context.TODO(), opts) } } diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index fce0d839911..677ba61eee3 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -21,6 +21,7 @@ caches in sync with the "ground truth". package populator import ( + "context" "errors" "fmt" "sync" @@ -576,7 +577,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( namespace string, claimName string) (*v1.PersistentVolumeClaim, error) { pvc, err := - dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), claimName, metav1.GetOptions{}) if err != nil || pvc == nil { return nil, fmt.Errorf("failed to fetch PVC from API server: %v", err) } @@ -612,7 +613,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVSpec( name string, pvcReadOnly bool, expectedClaimUID types.UID) (*volume.Spec, string, error) { - pv, err := dswp.kubeClient.CoreV1().PersistentVolumes().Get(name, metav1.GetOptions{}) + pv, err := dswp.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil || pv == nil { return nil, "", fmt.Errorf( "failed to fetch PV %s from API server: %v", name, err) diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index adb2e2038cd..5ac1fa37cf9 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -20,6 +20,7 @@ limitations under the License. package reconciler import ( + "context" "fmt" "io/ioutil" "os" @@ -599,7 +600,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, // updateDevicePath gets the node status to retrieve volume device path information. func (rc *reconciler) updateDevicePath(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) { - node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{}) + node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(rc.nodeName), metav1.GetOptions{}) if fetchErr != nil { klog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr) } else { diff --git a/pkg/kubelet/volumemanager/volume_manager_test.go b/pkg/kubelet/volumemanager/volume_manager_test.go index bd61425348d..1388fb9d77c 100644 --- a/pkg/kubelet/volumemanager/volume_manager_test.go +++ b/pkg/kubelet/volumemanager/volume_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package volumemanager import ( + "context" "os" "reflect" "strconv" @@ -425,11 +426,11 @@ func delayClaimBecomesBound( ) { time.Sleep(500 * time.Millisecond) volumeClaim, _ := - kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), claimName, metav1.GetOptions{}) volumeClaim.Status = v1.PersistentVolumeClaimStatus{ Phase: v1.ClaimBound, } - kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(volumeClaim) + kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(context.TODO(), volumeClaim) } func runVolumeManager(manager VolumeManager) chan struct{} { diff --git a/pkg/kubemark/controller.go b/pkg/kubemark/controller.go index d5ce070c7ab..38f16e23d63 100644 --- a/pkg/kubemark/controller.go +++ b/pkg/kubemark/controller.go @@ -17,6 +17,7 @@ limitations under the License. package kubemark import ( + "context" "fmt" "math/rand" "sync" @@ -226,7 +227,7 @@ func (kubemarkController *KubemarkController) addNodeToNodeGroup(nodeGroup strin var err error for i := 0; i < numRetries; i++ { - _, err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(node.Namespace).Create(node) + _, err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(node.Namespace).Create(context.TODO(), node) if err == nil { return nil } @@ -247,8 +248,7 @@ func (kubemarkController *KubemarkController) RemoveNodeFromNodeGroup(nodeGroup policy := metav1.DeletePropagationForeground var err error for i := 0; i < numRetries; i++ { - err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(namespaceKubemark).Delete( - pod.ObjectMeta.Labels["name"], + err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(namespaceKubemark).Delete(context.TODO(), pod.ObjectMeta.Labels["name"], &metav1.DeleteOptions{PropagationPolicy: &policy}) if err == nil { klog.Infof("marking node %s for deletion", node) @@ -374,7 +374,7 @@ func (kubemarkCluster *kubemarkCluster) removeUnneededNodes(oldObj interface{}, defer kubemarkCluster.nodesToDeleteLock.Unlock() if kubemarkCluster.nodesToDelete[node.Name] { kubemarkCluster.nodesToDelete[node.Name] = false - if err := kubemarkCluster.client.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{}); err != nil { + if err := kubemarkCluster.client.CoreV1().Nodes().Delete(context.TODO(), node.Name, &metav1.DeleteOptions{}); err != nil { klog.Errorf("failed to delete node %s from kubemark cluster, err: %v", node.Name, err) } } diff --git a/pkg/master/client_util.go b/pkg/master/client_util.go index acfcb0e8994..afefa6a9ec5 100644 --- a/pkg/master/client_util.go +++ b/pkg/master/client_util.go @@ -17,6 +17,7 @@ limitations under the License. package master import ( + "context" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,7 +25,7 @@ import ( ) func createNamespaceIfNeeded(c corev1client.NamespacesGetter, ns string) error { - if _, err := c.Namespaces().Get(ns, metav1.GetOptions{}); err == nil { + if _, err := c.Namespaces().Get(context.TODO(), ns, metav1.GetOptions{}); err == nil { // the namespace already exists return nil } @@ -34,7 +35,7 @@ func createNamespaceIfNeeded(c corev1client.NamespacesGetter, ns string) error { Namespace: "", }, } - _, err := c.Namespaces().Create(newNs) + _, err := c.Namespaces().Create(context.TODO(), newNs) if err != nil && errors.IsAlreadyExists(err) { err = nil } diff --git a/pkg/master/controller.go b/pkg/master/controller.go index fa024976484..76ef9fabb0a 100644 --- a/pkg/master/controller.go +++ b/pkg/master/controller.go @@ -281,12 +281,12 @@ func createEndpointPortSpec(endpointPort int, endpointPortName string, extraEndp // CreateOrUpdateMasterServiceIfNeeded will create the specified service if it // doesn't already exist. func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePorts []corev1.ServicePort, serviceType corev1.ServiceType, reconcile bool) error { - if s, err := c.ServiceClient.Services(metav1.NamespaceDefault).Get(serviceName, metav1.GetOptions{}); err == nil { + if s, err := c.ServiceClient.Services(metav1.NamespaceDefault).Get(context.TODO(), serviceName, metav1.GetOptions{}); err == nil { // The service already exists. if reconcile { if svc, updated := reconcilers.GetMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated { klog.Warningf("Resetting master service %q to %#v", serviceName, svc) - _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Update(svc) + _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Update(context.TODO(), svc) return err } } @@ -308,7 +308,7 @@ func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, ser }, } - _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Create(svc) + _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Create(context.TODO(), svc) if errors.IsAlreadyExists(err) { return c.CreateOrUpdateMasterServiceIfNeeded(serviceName, serviceIP, servicePorts, serviceType, reconcile) } diff --git a/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go b/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go index 03904f67c1c..abeb02bb50c 100644 --- a/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go +++ b/pkg/master/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go @@ -18,6 +18,7 @@ package clusterauthenticationtrust import ( "bytes" + "context" "crypto/x509" "encoding/json" "encoding/pem" @@ -174,7 +175,7 @@ func (c *Controller) syncConfigMap() error { } func createNamespaceIfNeeded(nsClient corev1client.NamespacesGetter, ns string) error { - if _, err := nsClient.Namespaces().Get(ns, metav1.GetOptions{}); err == nil { + if _, err := nsClient.Namespaces().Get(context.TODO(), ns, metav1.GetOptions{}); err == nil { // the namespace already exists return nil } @@ -184,7 +185,7 @@ func createNamespaceIfNeeded(nsClient corev1client.NamespacesGetter, ns string) Namespace: "", }, } - _, err := nsClient.Namespaces().Create(newNs) + _, err := nsClient.Namespaces().Create(context.TODO(), newNs) if err != nil && apierrors.IsAlreadyExists(err) { err = nil } @@ -192,9 +193,9 @@ func createNamespaceIfNeeded(nsClient corev1client.NamespacesGetter, ns string) } func writeConfigMap(configMapClient corev1client.ConfigMapsGetter, required *corev1.ConfigMap) error { - _, err := configMapClient.ConfigMaps(required.Namespace).Update(required) + _, err := configMapClient.ConfigMaps(required.Namespace).Update(context.TODO(), required) if apierrors.IsNotFound(err) { - _, err := configMapClient.ConfigMaps(required.Namespace).Create(required) + _, err := configMapClient.ConfigMaps(required.Namespace).Create(context.TODO(), required) return err } @@ -204,7 +205,7 @@ func writeConfigMap(configMapClient corev1client.ConfigMapsGetter, required *cor // 1. request is so big the generic request catcher finds it // 2. the content is so large that that the server sends a validation error "Too long: must have at most 1048576 characters" if apierrors.IsRequestEntityTooLargeError(err) || (apierrors.IsInvalid(err) && strings.Contains(err.Error(), "Too long")) { - if deleteErr := configMapClient.ConfigMaps(required.Namespace).Delete(required.Name, nil); deleteErr != nil { + if deleteErr := configMapClient.ConfigMaps(required.Namespace).Delete(context.TODO(), required.Name, nil); deleteErr != nil { return deleteErr } return err diff --git a/pkg/master/master.go b/pkg/master/master.go index b03b0c9ea03..a5849d0b8c9 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -17,6 +17,7 @@ limitations under the License. package master import ( + "context" "fmt" "net" "net/http" @@ -523,7 +524,7 @@ func (n nodeAddressProvider) externalAddresses() ([]string, error) { preferredAddressTypes := []apiv1.NodeAddressType{ apiv1.NodeExternalIP, } - nodes, err := n.nodeClient.List(metav1.ListOptions{}) + nodes, err := n.nodeClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index 48d9189cd33..e26f2376bc3 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -259,10 +259,10 @@ func TestGetNodeAddresses(t *testing.T) { assert.Equal([]string(nil), addrs) // Pass case with External type IP - nodes, _ := fakeNodeClient.List(metav1.ListOptions{}) + nodes, _ := fakeNodeClient.List(context.TODO(), metav1.ListOptions{}) for index := range nodes.Items { nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}} - fakeNodeClient.Update(&nodes.Items[index]) + fakeNodeClient.Update(context.TODO(), &nodes.Items[index]) } addrs, err = addressProvider.externalAddresses() assert.NoError(err, "addresses should not have returned an error.") @@ -276,9 +276,9 @@ func TestGetNodeAddressesWithOnlySomeExternalIP(t *testing.T) { addressProvider := nodeAddressProvider{fakeNodeClient} // Pass case with 1 External type IP (index == 1) and nodes (indexes 0 & 2) have no External IP. - nodes, _ := fakeNodeClient.List(metav1.ListOptions{}) + nodes, _ := fakeNodeClient.List(context.TODO(), metav1.ListOptions{}) nodes.Items[1].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}} - fakeNodeClient.Update(&nodes.Items[1]) + fakeNodeClient.Update(context.TODO(), &nodes.Items[1]) addrs, err := addressProvider.externalAddresses() assert.NoError(err, "addresses should not have returned an error.") diff --git a/pkg/master/reconcilers/endpointsadapter.go b/pkg/master/reconcilers/endpointsadapter.go index 5ad8cb687d9..4dd38bdf678 100644 --- a/pkg/master/reconcilers/endpointsadapter.go +++ b/pkg/master/reconcilers/endpointsadapter.go @@ -17,6 +17,7 @@ limitations under the License. package reconcilers import ( + "context" corev1 "k8s.io/api/core/v1" discovery "k8s.io/api/discovery/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -51,7 +52,7 @@ func NewEndpointsAdapter(endpointClient corev1client.EndpointsGetter, endpointSl // Get takes the name and namespace of the Endpoints resource, and returns a // corresponding Endpoints object if it exists, and an error if there is any. func (adapter *EndpointsAdapter) Get(namespace, name string, getOpts metav1.GetOptions) (*corev1.Endpoints, error) { - return adapter.endpointClient.Endpoints(namespace).Get(name, getOpts) + return adapter.endpointClient.Endpoints(namespace).Get(context.TODO(), name, getOpts) } // Create accepts a namespace and Endpoints object and creates the Endpoints @@ -59,7 +60,7 @@ func (adapter *EndpointsAdapter) Get(namespace, name string, getOpts metav1.GetO // be created or updated. The created Endpoints object or an error will be // returned. func (adapter *EndpointsAdapter) Create(namespace string, endpoints *corev1.Endpoints) (*corev1.Endpoints, error) { - endpoints, err := adapter.endpointClient.Endpoints(namespace).Create(endpoints) + endpoints, err := adapter.endpointClient.Endpoints(namespace).Create(context.TODO(), endpoints) if err == nil { err = adapter.EnsureEndpointSliceFromEndpoints(namespace, endpoints) } @@ -70,7 +71,7 @@ func (adapter *EndpointsAdapter) Create(namespace string, endpoints *corev1.Endp // endpointSliceClient exists, a matching EndpointSlice will also be created or // updated. The updated Endpoints object or an error will be returned. func (adapter *EndpointsAdapter) Update(namespace string, endpoints *corev1.Endpoints) (*corev1.Endpoints, error) { - endpoints, err := adapter.endpointClient.Endpoints(namespace).Update(endpoints) + endpoints, err := adapter.endpointClient.Endpoints(namespace).Update(context.TODO(), endpoints) if err == nil { err = adapter.EnsureEndpointSliceFromEndpoints(namespace, endpoints) } @@ -85,11 +86,11 @@ func (adapter *EndpointsAdapter) EnsureEndpointSliceFromEndpoints(namespace stri return nil } endpointSlice := endpointSliceFromEndpoints(endpoints) - currentEndpointSlice, err := adapter.endpointSliceClient.EndpointSlices(namespace).Get(endpointSlice.Name, metav1.GetOptions{}) + currentEndpointSlice, err := adapter.endpointSliceClient.EndpointSlices(namespace).Get(context.TODO(), endpointSlice.Name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { - if _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(endpointSlice); errors.IsAlreadyExists(err) { + if _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(context.TODO(), endpointSlice); errors.IsAlreadyExists(err) { err = nil } } @@ -98,11 +99,11 @@ func (adapter *EndpointsAdapter) EnsureEndpointSliceFromEndpoints(namespace stri // required for transition from IP to IPv4 address type. if currentEndpointSlice.AddressType != endpointSlice.AddressType { - err = adapter.endpointSliceClient.EndpointSlices(namespace).Delete(endpointSlice.Name, &metav1.DeleteOptions{}) + err = adapter.endpointSliceClient.EndpointSlices(namespace).Delete(context.TODO(), endpointSlice.Name, &metav1.DeleteOptions{}) if err != nil { return err } - _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(endpointSlice) + _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(context.TODO(), endpointSlice) return err } @@ -112,7 +113,7 @@ func (adapter *EndpointsAdapter) EnsureEndpointSliceFromEndpoints(namespace stri return nil } - _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Update(endpointSlice) + _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Update(context.TODO(), endpointSlice) return err } diff --git a/pkg/master/reconcilers/endpointsadapter_test.go b/pkg/master/reconcilers/endpointsadapter_test.go index 16edff0863d..c888da1e7db 100644 --- a/pkg/master/reconcilers/endpointsadapter_test.go +++ b/pkg/master/reconcilers/endpointsadapter_test.go @@ -17,6 +17,7 @@ limitations under the License. package reconcilers import ( + "context" "fmt" "testing" @@ -83,7 +84,7 @@ func TestEndpointsAdapterGet(t *testing.T) { } for _, endpoints := range testCase.endpoints { - _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(endpoints) + _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints) if err != nil { t.Fatalf("Error creating Endpoints: %v", err) } @@ -181,7 +182,7 @@ func TestEndpointsAdapterCreate(t *testing.T) { } for _, endpoints := range testCase.endpoints { - _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(endpoints) + _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints) if err != nil { t.Fatalf("Error creating Endpoints: %v", err) } @@ -197,7 +198,7 @@ func TestEndpointsAdapterCreate(t *testing.T) { t.Errorf("Expected endpoints: %v, got: %v", testCase.expectedEndpoints, endpoints) } - epSliceList, err := client.DiscoveryV1beta1().EndpointSlices(testCase.namespaceParam).List(metav1.ListOptions{}) + epSliceList, err := client.DiscoveryV1beta1().EndpointSlices(testCase.namespaceParam).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Error listing Endpoint Slices: %v", err) } @@ -292,7 +293,7 @@ func TestEndpointsAdapterUpdate(t *testing.T) { } for _, endpoints := range testCase.endpoints { - _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(endpoints) + _, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints) if err != nil { t.Fatalf("Error creating Endpoints: %v", err) } @@ -308,7 +309,7 @@ func TestEndpointsAdapterUpdate(t *testing.T) { t.Errorf("Expected endpoints: %v, got: %v", testCase.expectedEndpoints, endpoints) } - epSliceList, err := client.DiscoveryV1beta1().EndpointSlices(testCase.namespaceParam).List(metav1.ListOptions{}) + epSliceList, err := client.DiscoveryV1beta1().EndpointSlices(testCase.namespaceParam).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Error listing Endpoint Slices: %v", err) } @@ -434,7 +435,7 @@ func TestEndpointsAdapterEnsureEndpointSliceFromEndpoints(t *testing.T) { } for _, endpointSlice := range testCase.endpointSlices { - _, err := client.DiscoveryV1beta1().EndpointSlices(endpointSlice.Namespace).Create(endpointSlice) + _, err := client.DiscoveryV1beta1().EndpointSlices(endpointSlice.Namespace).Create(context.TODO(), endpointSlice) if err != nil { t.Fatalf("Error creating EndpointSlice: %v", err) } @@ -445,7 +446,7 @@ func TestEndpointsAdapterEnsureEndpointSliceFromEndpoints(t *testing.T) { t.Errorf("Expected error: %v, got: %v", testCase.expectedError, err) } - endpointSlice, err := client.DiscoveryV1beta1().EndpointSlices(testCase.namespaceParam).Get(testCase.endpointsParam.Name, metav1.GetOptions{}) + endpointSlice, err := client.DiscoveryV1beta1().EndpointSlices(testCase.namespaceParam).Get(context.TODO(), testCase.endpointsParam.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { t.Fatalf("Error getting Endpoint Slice: %v", err) } diff --git a/pkg/master/reconcilers/lease_test.go b/pkg/master/reconcilers/lease_test.go index 4bc6c49db9a..2160816c56f 100644 --- a/pkg/master/reconcilers/lease_test.go +++ b/pkg/master/reconcilers/lease_test.go @@ -22,6 +22,7 @@ https://github.com/openshift/origin/blob/bb340c5dd5ff72718be86fb194dedc0faed7f4c */ import ( + "context" "net" "reflect" "testing" @@ -420,7 +421,7 @@ func TestLeaseEndpointReconciler(t *testing.T) { clientset := fake.NewSimpleClientset() if test.endpoints != nil { for _, ep := range test.endpoints.Items { - if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(&ep); err != nil { + if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep); err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) continue } @@ -433,7 +434,7 @@ func TestLeaseEndpointReconciler(t *testing.T) { if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) } - actualEndpoints, err := clientset.CoreV1().Endpoints(corev1.NamespaceDefault).Get(test.serviceName, metav1.GetOptions{}) + actualEndpoints, err := clientset.CoreV1().Endpoints(corev1.NamespaceDefault).Get(context.TODO(), test.serviceName, metav1.GetOptions{}) if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) } @@ -522,7 +523,7 @@ func TestLeaseEndpointReconciler(t *testing.T) { clientset := fake.NewSimpleClientset() if test.endpoints != nil { for _, ep := range test.endpoints.Items { - if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(&ep); err != nil { + if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep); err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) continue } @@ -534,7 +535,7 @@ func TestLeaseEndpointReconciler(t *testing.T) { if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) } - actualEndpoints, err := clientset.CoreV1().Endpoints(corev1.NamespaceDefault).Get(test.serviceName, metav1.GetOptions{}) + actualEndpoints, err := clientset.CoreV1().Endpoints(corev1.NamespaceDefault).Get(context.TODO(), test.serviceName, metav1.GetOptions{}) if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) } @@ -637,7 +638,7 @@ func TestLeaseRemoveEndpoints(t *testing.T) { fakeLeases.SetKeys(test.endpointKeys) clientset := fake.NewSimpleClientset() for _, ep := range test.endpoints.Items { - if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(&ep); err != nil { + if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep); err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) continue } @@ -648,7 +649,7 @@ func TestLeaseRemoveEndpoints(t *testing.T) { if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) } - actualEndpoints, err := clientset.CoreV1().Endpoints(corev1.NamespaceDefault).Get(test.serviceName, metav1.GetOptions{}) + actualEndpoints, err := clientset.CoreV1().Endpoints(corev1.NamespaceDefault).Get(context.TODO(), test.serviceName, metav1.GetOptions{}) if err != nil { t.Errorf("case %q: unexpected error: %v", test.testName, err) } diff --git a/pkg/registry/core/pod/storage/eviction.go b/pkg/registry/core/pod/storage/eviction.go index 5cdf3b44d1e..6f69390f6eb 100644 --- a/pkg/registry/core/pod/storage/eviction.go +++ b/pkg/registry/core/pod/storage/eviction.go @@ -163,7 +163,7 @@ func (r *EvictionREST) Create(ctx context.Context, name string, obj runtime.Obje refresh := false err = retry.RetryOnConflict(EvictionsRetry, func() error { if refresh { - pdb, err = r.podDisruptionBudgetClient.PodDisruptionBudgets(pod.Namespace).Get(pdbName, metav1.GetOptions{}) + pdb, err = r.podDisruptionBudgetClient.PodDisruptionBudgets(pod.Namespace).Get(context.TODO(), pdbName, metav1.GetOptions{}) if err != nil { return err } @@ -241,7 +241,7 @@ func (r *EvictionREST) checkAndDecrement(namespace string, podName string, pdb p // If the pod is not deleted within a reasonable time limit PDB controller will assume that it won't // be deleted at all and remove it from DisruptedPod map. pdb.Status.DisruptedPods[podName] = metav1.Time{Time: time.Now()} - if _, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(namespace).UpdateStatus(&pdb); err != nil { + if _, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(namespace).UpdateStatus(context.TODO(), &pdb); err != nil { return err } @@ -254,7 +254,7 @@ func (r *EvictionREST) getPodDisruptionBudgets(ctx context.Context, pod *api.Pod return nil, nil } - pdbList, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(pod.Namespace).List(metav1.ListOptions{}) + pdbList, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(pod.Namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/pkg/registry/core/service/ipallocator/controller/repair.go b/pkg/registry/core/service/ipallocator/controller/repair.go index e89af254121..ce067f7e2c0 100644 --- a/pkg/registry/core/service/ipallocator/controller/repair.go +++ b/pkg/registry/core/service/ipallocator/controller/repair.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "fmt" "net" "time" @@ -190,7 +191,7 @@ func (c *Repair) runOnce() error { // the service collection. The caching layer keeps per-collection RVs, // and this is proper, since in theory the collections could be hosted // in separate etcd (or even non-etcd) instances. - list, err := c.serviceClient.Services(metav1.NamespaceAll).List(metav1.ListOptions{}) + list, err := c.serviceClient.Services(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) if err != nil { return fmt.Errorf("unable to refresh the service IP block: %v", err) } diff --git a/pkg/registry/core/service/portallocator/controller/repair.go b/pkg/registry/core/service/portallocator/controller/repair.go index b240e4a5673..32ad5b802b0 100644 --- a/pkg/registry/core/service/portallocator/controller/repair.go +++ b/pkg/registry/core/service/portallocator/controller/repair.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "fmt" "time" @@ -116,7 +117,7 @@ func (c *Repair) runOnce() error { // the service collection. The caching layer keeps per-collection RVs, // and this is proper, since in theory the collections could be hosted // in separate etcd (or even non-etcd) instances. - list, err := c.serviceClient.Services(metav1.NamespaceAll).List(metav1.ListOptions{}) + list, err := c.serviceClient.Services(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) if err != nil { return fmt.Errorf("unable to refresh the port block: %v", err) } diff --git a/pkg/registry/flowcontrol/rest/storage_flowcontrol.go b/pkg/registry/flowcontrol/rest/storage_flowcontrol.go index 84bcc7e29fb..965327a32b8 100644 --- a/pkg/registry/flowcontrol/rest/storage_flowcontrol.go +++ b/pkg/registry/flowcontrol/rest/storage_flowcontrol.go @@ -17,6 +17,7 @@ limitations under the License. package rest import ( + "context" "fmt" "time" @@ -142,7 +143,7 @@ func (p RESTStorageProvider) PostStartHook() (string, genericapiserver.PostStart // Returns false if there's a "exempt" priority-level existing in the cluster, otherwise returns a true // if the "exempt" priority-level is not found. func lastMandatoryExists(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface) (bool, error) { - if _, err := flowcontrolClientSet.PriorityLevelConfigurations().Get(flowcontrol.PriorityLevelConfigurationNameExempt, metav1.GetOptions{}); err != nil { + if _, err := flowcontrolClientSet.PriorityLevelConfigurations().Get(context.TODO(), flowcontrol.PriorityLevelConfigurationNameExempt, metav1.GetOptions{}); err != nil { if apierrors.IsNotFound(err) { return true, nil } @@ -153,7 +154,7 @@ func lastMandatoryExists(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alp func ensure(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface, flowSchemas []*flowcontrolv1alpha1.FlowSchema, priorityLevels []*flowcontrolv1alpha1.PriorityLevelConfiguration) error { for _, flowSchema := range flowSchemas { - _, err := flowcontrolClientSet.FlowSchemas().Create(flowSchema) + _, err := flowcontrolClientSet.FlowSchemas().Create(context.TODO(), flowSchema) if apierrors.IsAlreadyExists(err) { klog.V(3).Infof("system preset FlowSchema %s already exists, skipping creating", flowSchema.Name) continue @@ -164,7 +165,7 @@ func ensure(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface, klog.V(3).Infof("created system preset FlowSchema %s", flowSchema.Name) } for _, priorityLevelConfiguration := range priorityLevels { - _, err := flowcontrolClientSet.PriorityLevelConfigurations().Create(priorityLevelConfiguration) + _, err := flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), priorityLevelConfiguration) if apierrors.IsAlreadyExists(err) { klog.V(3).Infof("system preset PriorityLevelConfiguration %s already exists, skipping creating", priorityLevelConfiguration.Name) continue @@ -179,7 +180,7 @@ func ensure(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface, func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface, flowSchemas []*flowcontrolv1alpha1.FlowSchema, priorityLevels []*flowcontrolv1alpha1.PriorityLevelConfiguration) error { for _, expectedFlowSchema := range flowSchemas { - actualFlowSchema, err := flowcontrolClientSet.FlowSchemas().Get(expectedFlowSchema.Name, metav1.GetOptions{}) + actualFlowSchema, err := flowcontrolClientSet.FlowSchemas().Get(context.TODO(), expectedFlowSchema.Name, metav1.GetOptions{}) if err == nil { // TODO(yue9944882): extract existing version from label and compare // TODO(yue9944882): create w/ version string attached @@ -188,7 +189,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface return fmt.Errorf("failed checking if mandatory FlowSchema %s is up-to-date due to %v, will retry later", expectedFlowSchema.Name, err) } if !identical { - if _, err := flowcontrolClientSet.FlowSchemas().Update(expectedFlowSchema); err != nil { + if _, err := flowcontrolClientSet.FlowSchemas().Update(context.TODO(), expectedFlowSchema); err != nil { return fmt.Errorf("failed upgrading mandatory FlowSchema %s due to %v, will retry later", expectedFlowSchema.Name, err) } } @@ -197,7 +198,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface if !apierrors.IsNotFound(err) { return fmt.Errorf("failed getting FlowSchema %s due to %v, will retry later", expectedFlowSchema.Name, err) } - _, err = flowcontrolClientSet.FlowSchemas().Create(expectedFlowSchema) + _, err = flowcontrolClientSet.FlowSchemas().Create(context.TODO(), expectedFlowSchema) if apierrors.IsAlreadyExists(err) { klog.V(3).Infof("system preset FlowSchema %s already exists, skipping creating", expectedFlowSchema.Name) continue @@ -208,7 +209,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface klog.V(3).Infof("created system preset FlowSchema %s", expectedFlowSchema.Name) } for _, expectedPriorityLevelConfiguration := range priorityLevels { - actualPriorityLevelConfiguration, err := flowcontrolClientSet.PriorityLevelConfigurations().Get(expectedPriorityLevelConfiguration.Name, metav1.GetOptions{}) + actualPriorityLevelConfiguration, err := flowcontrolClientSet.PriorityLevelConfigurations().Get(context.TODO(), expectedPriorityLevelConfiguration.Name, metav1.GetOptions{}) if err == nil { // TODO(yue9944882): extract existing version from label and compare // TODO(yue9944882): create w/ version string attached @@ -217,7 +218,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface return fmt.Errorf("failed checking if mandatory PriorityLevelConfiguration %s is up-to-date due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err) } if !identical { - if _, err := flowcontrolClientSet.PriorityLevelConfigurations().Update(expectedPriorityLevelConfiguration); err != nil { + if _, err := flowcontrolClientSet.PriorityLevelConfigurations().Update(context.TODO(), expectedPriorityLevelConfiguration); err != nil { return fmt.Errorf("failed upgrading mandatory PriorityLevelConfiguration %s due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err) } } @@ -226,7 +227,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface if !apierrors.IsNotFound(err) { return fmt.Errorf("failed getting PriorityLevelConfiguration %s due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err) } - _, err = flowcontrolClientSet.PriorityLevelConfigurations().Create(expectedPriorityLevelConfiguration) + _, err = flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), expectedPriorityLevelConfiguration) if apierrors.IsAlreadyExists(err) { klog.V(3).Infof("system preset PriorityLevelConfiguration %s already exists, skipping creating", expectedPriorityLevelConfiguration.Name) continue diff --git a/pkg/registry/flowcontrol/rest/storage_flowcontrol_test.go b/pkg/registry/flowcontrol/rest/storage_flowcontrol_test.go index 6c8cba4f21a..1c344bb5b9b 100644 --- a/pkg/registry/flowcontrol/rest/storage_flowcontrol_test.go +++ b/pkg/registry/flowcontrol/rest/storage_flowcontrol_test.go @@ -17,6 +17,7 @@ limitations under the License. package rest import ( + "context" "github.com/stretchr/testify/require" "testing" @@ -49,7 +50,7 @@ func TestShouldEnsurePredefinedSettings(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { c := fake.NewSimpleClientset() if testCase.existingPriorityLevel != nil { - c.FlowcontrolV1alpha1().PriorityLevelConfigurations().Create(testCase.existingPriorityLevel) + c.FlowcontrolV1alpha1().PriorityLevelConfigurations().Create(context.TODO(), testCase.existingPriorityLevel) } should, err := lastMandatoryExists(c.FlowcontrolV1alpha1()) assert.NoError(t, err) diff --git a/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go b/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go index 68dc97fd3f7..cb90ca0d82a 100644 --- a/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go +++ b/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go @@ -17,6 +17,7 @@ limitations under the License. package reconciliation import ( + "context" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -79,7 +80,7 @@ type ClusterRoleModifier struct { } func (c ClusterRoleModifier) Get(namespace, name string) (RuleOwner, error) { - ret, err := c.Client.Get(name, metav1.GetOptions{}) + ret, err := c.Client.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -87,7 +88,7 @@ func (c ClusterRoleModifier) Get(namespace, name string) (RuleOwner, error) { } func (c ClusterRoleModifier) Create(in RuleOwner) (RuleOwner, error) { - ret, err := c.Client.Create(in.(ClusterRoleRuleOwner).ClusterRole) + ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole) if err != nil { return nil, err } @@ -95,7 +96,7 @@ func (c ClusterRoleModifier) Create(in RuleOwner) (RuleOwner, error) { } func (c ClusterRoleModifier) Update(in RuleOwner) (RuleOwner, error) { - ret, err := c.Client.Update(in.(ClusterRoleRuleOwner).ClusterRole) + ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole) if err != nil { return nil, err } diff --git a/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go b/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go index b669a595157..0261ec2d92d 100644 --- a/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go +++ b/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go @@ -17,6 +17,7 @@ limitations under the License. package reconciliation import ( + "context" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -80,7 +81,7 @@ type ClusterRoleBindingClientAdapter struct { } func (c ClusterRoleBindingClientAdapter) Get(namespace, name string) (RoleBinding, error) { - ret, err := c.Client.Get(name, metav1.GetOptions{}) + ret, err := c.Client.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -88,7 +89,7 @@ func (c ClusterRoleBindingClientAdapter) Get(namespace, name string) (RoleBindin } func (c ClusterRoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) { - ret, err := c.Client.Create(in.(ClusterRoleBindingAdapter).ClusterRoleBinding) + ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding) if err != nil { return nil, err } @@ -96,7 +97,7 @@ func (c ClusterRoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, er } func (c ClusterRoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) { - ret, err := c.Client.Update(in.(ClusterRoleBindingAdapter).ClusterRoleBinding) + ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding) if err != nil { return nil, err } @@ -105,5 +106,5 @@ func (c ClusterRoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, er } func (c ClusterRoleBindingClientAdapter) Delete(namespace, name string, uid types.UID) error { - return c.Client.Delete(name, &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}) + return c.Client.Delete(context.TODO(), name, &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}) } diff --git a/pkg/registry/rbac/reconciliation/namespace.go b/pkg/registry/rbac/reconciliation/namespace.go index 2ee7fe9a2b0..75d161a95d0 100644 --- a/pkg/registry/rbac/reconciliation/namespace.go +++ b/pkg/registry/rbac/reconciliation/namespace.go @@ -17,6 +17,7 @@ limitations under the License. package reconciliation import ( + "context" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,7 +29,7 @@ import ( // It is a best effort attempt as the user may not be able to get or create namespaces. // This allows us to handle flows where the user can only mutate roles and role bindings. func tryEnsureNamespace(client corev1client.NamespaceInterface, namespace string) error { - _, getErr := client.Get(namespace, metav1.GetOptions{}) + _, getErr := client.Get(context.TODO(), namespace, metav1.GetOptions{}) if getErr == nil { return nil } @@ -38,7 +39,7 @@ func tryEnsureNamespace(client corev1client.NamespaceInterface, namespace string } ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - _, createErr := client.Create(ns) + _, createErr := client.Create(context.TODO(), ns) return utilerrors.FilterOut(createErr, apierrors.IsAlreadyExists, apierrors.IsForbidden) } diff --git a/pkg/registry/rbac/reconciliation/role_interfaces.go b/pkg/registry/rbac/reconciliation/role_interfaces.go index 1c349d36e7d..30819f3b573 100644 --- a/pkg/registry/rbac/reconciliation/role_interfaces.go +++ b/pkg/registry/rbac/reconciliation/role_interfaces.go @@ -17,6 +17,7 @@ limitations under the License. package reconciliation import ( + "context" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -80,7 +81,7 @@ type RoleModifier struct { } func (c RoleModifier) Get(namespace, name string) (RuleOwner, error) { - ret, err := c.Client.Roles(namespace).Get(name, metav1.GetOptions{}) + ret, err := c.Client.Roles(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -92,7 +93,7 @@ func (c RoleModifier) Create(in RuleOwner) (RuleOwner, error) { return nil, err } - ret, err := c.Client.Roles(in.GetNamespace()).Create(in.(RoleRuleOwner).Role) + ret, err := c.Client.Roles(in.GetNamespace()).Create(context.TODO(), in.(RoleRuleOwner).Role) if err != nil { return nil, err } @@ -100,7 +101,7 @@ func (c RoleModifier) Create(in RuleOwner) (RuleOwner, error) { } func (c RoleModifier) Update(in RuleOwner) (RuleOwner, error) { - ret, err := c.Client.Roles(in.GetNamespace()).Update(in.(RoleRuleOwner).Role) + ret, err := c.Client.Roles(in.GetNamespace()).Update(context.TODO(), in.(RoleRuleOwner).Role) if err != nil { return nil, err } diff --git a/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go b/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go index 3d60537815e..ed55cbe668d 100644 --- a/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go +++ b/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go @@ -17,6 +17,7 @@ limitations under the License. package reconciliation import ( + "context" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -82,7 +83,7 @@ type RoleBindingClientAdapter struct { } func (c RoleBindingClientAdapter) Get(namespace, name string) (RoleBinding, error) { - ret, err := c.Client.RoleBindings(namespace).Get(name, metav1.GetOptions{}) + ret, err := c.Client.RoleBindings(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -94,7 +95,7 @@ func (c RoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) { return nil, err } - ret, err := c.Client.RoleBindings(in.GetNamespace()).Create(in.(RoleBindingAdapter).RoleBinding) + ret, err := c.Client.RoleBindings(in.GetNamespace()).Create(context.TODO(), in.(RoleBindingAdapter).RoleBinding) if err != nil { return nil, err } @@ -102,7 +103,7 @@ func (c RoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) { } func (c RoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) { - ret, err := c.Client.RoleBindings(in.GetNamespace()).Update(in.(RoleBindingAdapter).RoleBinding) + ret, err := c.Client.RoleBindings(in.GetNamespace()).Update(context.TODO(), in.(RoleBindingAdapter).RoleBinding) if err != nil { return nil, err } @@ -111,5 +112,5 @@ func (c RoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) { } func (c RoleBindingClientAdapter) Delete(namespace, name string, uid types.UID) error { - return c.Client.RoleBindings(namespace).Delete(name, &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}) + return c.Client.RoleBindings(namespace).Delete(context.TODO(), name, &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}) } diff --git a/pkg/registry/rbac/rest/storage_rbac.go b/pkg/registry/rbac/rest/storage_rbac.go index 5454592ab21..3dd1b4794c2 100644 --- a/pkg/registry/rbac/rest/storage_rbac.go +++ b/pkg/registry/rbac/rest/storage_rbac.go @@ -17,6 +17,7 @@ limitations under the License. package rest import ( + "context" "fmt" "time" @@ -177,11 +178,11 @@ func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc { return false, nil } // Make sure etcd is responding before we start reconciling - if _, err := clientset.ClusterRoles().List(metav1.ListOptions{}); err != nil { + if _, err := clientset.ClusterRoles().List(context.TODO(), metav1.ListOptions{}); err != nil { utilruntime.HandleError(fmt.Errorf("unable to initialize clusterroles: %v", err)) return false, nil } - if _, err := clientset.ClusterRoleBindings().List(metav1.ListOptions{}); err != nil { + if _, err := clientset.ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{}); err != nil { utilruntime.HandleError(fmt.Errorf("unable to initialize clusterrolebindings: %v", err)) return false, nil } @@ -337,7 +338,7 @@ func (p RESTStorageProvider) GroupName() string { // that were done to the legacy roles. func primeAggregatedClusterRoles(clusterRolesToAggregate map[string]string, clusterRoleClient rbacv1client.ClusterRolesGetter) error { for oldName, newName := range clusterRolesToAggregate { - _, err := clusterRoleClient.ClusterRoles().Get(newName, metav1.GetOptions{}) + _, err := clusterRoleClient.ClusterRoles().Get(context.TODO(), newName, metav1.GetOptions{}) if err == nil { continue } @@ -345,7 +346,7 @@ func primeAggregatedClusterRoles(clusterRolesToAggregate map[string]string, clus return err } - existingRole, err := clusterRoleClient.ClusterRoles().Get(oldName, metav1.GetOptions{}) + existingRole, err := clusterRoleClient.ClusterRoles().Get(context.TODO(), oldName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { continue } @@ -359,7 +360,7 @@ func primeAggregatedClusterRoles(clusterRolesToAggregate map[string]string, clus klog.V(1).Infof("migrating %v to %v", existingRole.Name, newName) existingRole.Name = newName existingRole.ResourceVersion = "" // clear this so the object can be created. - if _, err := clusterRoleClient.ClusterRoles().Create(existingRole); err != nil && !apierrors.IsAlreadyExists(err) { + if _, err := clusterRoleClient.ClusterRoles().Create(context.TODO(), existingRole); err != nil && !apierrors.IsAlreadyExists(err) { return err } } @@ -373,7 +374,7 @@ func primeAggregatedClusterRoles(clusterRolesToAggregate map[string]string, clus func primeSplitClusterRoleBindings(clusterRoleBindingToSplit map[string]rbacapiv1.ClusterRoleBinding, clusterRoleBindingClient rbacv1client.ClusterRoleBindingsGetter) error { for existingBindingName, clusterRoleBindingToCreate := range clusterRoleBindingToSplit { // If source ClusterRoleBinding does not exist, do nothing. - existingRoleBinding, err := clusterRoleBindingClient.ClusterRoleBindings().Get(existingBindingName, metav1.GetOptions{}) + existingRoleBinding, err := clusterRoleBindingClient.ClusterRoleBindings().Get(context.TODO(), existingBindingName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { continue } @@ -382,7 +383,7 @@ func primeSplitClusterRoleBindings(clusterRoleBindingToSplit map[string]rbacapiv } // If the target ClusterRoleBinding already exists, do nothing. - _, err = clusterRoleBindingClient.ClusterRoleBindings().Get(clusterRoleBindingToCreate.Name, metav1.GetOptions{}) + _, err = clusterRoleBindingClient.ClusterRoleBindings().Get(context.TODO(), clusterRoleBindingToCreate.Name, metav1.GetOptions{}) if err == nil { continue } @@ -397,7 +398,7 @@ func primeSplitClusterRoleBindings(clusterRoleBindingToSplit map[string]rbacapiv newCRB.Subjects = existingRoleBinding.Subjects newCRB.Labels = existingRoleBinding.Labels newCRB.Annotations = existingRoleBinding.Annotations - if _, err := clusterRoleBindingClient.ClusterRoleBindings().Create(newCRB); err != nil && !apierrors.IsAlreadyExists(err) { + if _, err := clusterRoleBindingClient.ClusterRoleBindings().Create(context.TODO(), newCRB); err != nil && !apierrors.IsAlreadyExists(err) { return err } } diff --git a/pkg/registry/scheduling/rest/storage_scheduling.go b/pkg/registry/scheduling/rest/storage_scheduling.go index cb32bf83ea9..966644b6957 100644 --- a/pkg/registry/scheduling/rest/storage_scheduling.go +++ b/pkg/registry/scheduling/rest/storage_scheduling.go @@ -17,6 +17,7 @@ limitations under the License. package rest import ( + "context" "fmt" "time" @@ -123,10 +124,10 @@ func AddSystemPriorityClasses() genericapiserver.PostStartHookFunc { } for _, pc := range schedulingapiv1.SystemPriorityClasses() { - _, err := schedClientSet.PriorityClasses().Get(pc.Name, metav1.GetOptions{}) + _, err := schedClientSet.PriorityClasses().Get(context.TODO(), pc.Name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { - _, err := schedClientSet.PriorityClasses().Create(pc) + _, err := schedClientSet.PriorityClasses().Create(context.TODO(), pc) if err != nil && !apierrors.IsAlreadyExists(err) { return false, err } else { diff --git a/pkg/scheduler/factory.go b/pkg/scheduler/factory.go index 459cd0d1600..8f53e152a59 100644 --- a/pkg/scheduler/factory.go +++ b/pkg/scheduler/factory.go @@ -17,6 +17,7 @@ limitations under the License. package scheduler import ( + "context" "fmt" "sort" "time" @@ -422,7 +423,7 @@ func MakeDefaultErrorFunc(client clientset.Interface, podQueue internalqueue.Sch nodeName := errStatus.Status().Details.Name // when node is not found, We do not remove the node right away. Trying again to get // the node and if the node is still not found, then remove it from the scheduler cache. - _, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + _, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} if err := schedulerCache.RemoveNode(&node); err != nil { @@ -451,7 +452,7 @@ func MakeDefaultErrorFunc(client clientset.Interface, podQueue internalqueue.Sch // Get the pod again; it may have changed/been scheduled already. getBackoff := initialGetBackoff for { - pod, err := client.CoreV1().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{}) + pod, err := client.CoreV1().Pods(podID.Namespace).Get(context.TODO(), podID.Name, metav1.GetOptions{}) if err == nil { if len(pod.Spec.NodeName) == 0 { podInfo.Pod = pod diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index a4f9a057133..a298df07e65 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -357,7 +357,7 @@ func initPolicyFromFile(policyFile string, policy *schedulerapi.Policy) error { // initPolicyFromConfigMap initialize policy from configMap func initPolicyFromConfigMap(client clientset.Interface, policyRef *schedulerapi.SchedulerPolicyConfigMapSource, policy *schedulerapi.Policy) error { // Use a policy serialized in a config map value. - policyConfigMap, err := client.CoreV1().ConfigMaps(policyRef.Namespace).Get(policyRef.Name, metav1.GetOptions{}) + policyConfigMap, err := client.CoreV1().ConfigMaps(policyRef.Namespace).Get(context.TODO(), policyRef.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("couldn't get policy config map %s/%s: %v", policyRef.Namespace, policyRef.Name, err) } @@ -752,7 +752,7 @@ type podConditionUpdaterImpl struct { func (p *podConditionUpdaterImpl) update(pod *v1.Pod, condition *v1.PodCondition) error { klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s, Reason=%s)", pod.Namespace, pod.Name, condition.Type, condition.Status, condition.Reason) if podutil.UpdatePodCondition(&pod.Status, condition) { - _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) + _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod) return err } return nil @@ -763,17 +763,17 @@ type podPreemptorImpl struct { } func (p *podPreemptorImpl) getUpdatedPod(pod *v1.Pod) (*v1.Pod, error) { - return p.Client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + return p.Client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) } func (p *podPreemptorImpl) deletePod(pod *v1.Pod) error { - return p.Client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{}) + return p.Client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, &metav1.DeleteOptions{}) } func (p *podPreemptorImpl) setNominatedNodeName(pod *v1.Pod, nominatedNodeName string) error { podCopy := pod.DeepCopy() podCopy.Status.NominatedNodeName = nominatedNodeName - _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(podCopy) + _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), podCopy) return err } diff --git a/pkg/serviceaccount/jwt_test.go b/pkg/serviceaccount/jwt_test.go index a58d8de8b4b..6852015d0e8 100644 --- a/pkg/serviceaccount/jwt_test.go +++ b/pkg/serviceaccount/jwt_test.go @@ -306,13 +306,13 @@ func TestTokenGenerateAndValidate(t *testing.T) { getter := serviceaccountcontroller.NewGetterFromClient( tc.Client, v1listers.NewSecretLister(newIndexer(func(namespace, name string) (interface{}, error) { - return tc.Client.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) + return tc.Client.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) })), v1listers.NewServiceAccountLister(newIndexer(func(namespace, name string) (interface{}, error) { - return tc.Client.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + return tc.Client.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) })), v1listers.NewPodLister(newIndexer(func(namespace, name string) (interface{}, error) { - return tc.Client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + return tc.Client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) })), ) authn := serviceaccount.JWTTokenAuthenticator(serviceaccount.LegacyIssuer, tc.Keys, auds, serviceaccount.NewLegacyValidator(tc.Client != nil, getter)) diff --git a/pkg/util/node/node.go b/pkg/util/node/node.go index 63e0e6fc3cc..ac16760f9b1 100644 --- a/pkg/util/node/node.go +++ b/pkg/util/node/node.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "encoding/json" "fmt" "net" @@ -119,7 +120,7 @@ func GetNodeIP(client clientset.Interface, hostname string) net.IP { } err := wait.ExponentialBackoff(backoff, func() (bool, error) { - node, err := client.CoreV1().Nodes().Get(hostname, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(context.TODO(), hostname, metav1.GetOptions{}) if err != nil { klog.Errorf("Failed to retrieve node info: %v", err) return false, nil @@ -226,7 +227,7 @@ func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) erro return fmt.Errorf("failed to json.Marshal CIDR: %v", err) } - if _, err := c.CoreV1().Nodes().Patch(string(node), types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes); err != nil { return fmt.Errorf("failed to patch node CIDR: %v", err) } return nil @@ -247,7 +248,7 @@ func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) return fmt.Errorf("failed to json.Marshal CIDR: %v", err) } klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes)) - if _, err := c.CoreV1().Nodes().Patch(string(node), types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes); err != nil { return fmt.Errorf("failed to patch node CIDR: %v", err) } return nil @@ -260,7 +261,7 @@ func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode return nil, nil, err } - updatedNode, err := c.Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes, "status") + updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, "status") if err != nil { return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err) } diff --git a/pkg/util/pod/pod.go b/pkg/util/pod/pod.go index 984f0ea673d..844eec83908 100644 --- a/pkg/util/pod/pod.go +++ b/pkg/util/pod/pod.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "encoding/json" "fmt" @@ -34,7 +35,7 @@ func PatchPodStatus(c clientset.Interface, namespace, name string, uid types.UID return nil, nil, err } - updatedPod, err := c.CoreV1().Pods(namespace).Patch(name, types.StrategicMergePatchType, patchBytes, "status") + updatedPod, err := c.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes, "status") if err != nil { return nil, nil, fmt.Errorf("failed to patch status %q for pod %q/%q: %v", patchBytes, namespace, name, err) } diff --git a/pkg/util/pod/pod_test.go b/pkg/util/pod/pod_test.go index 9c2b7e4f11a..e8937cf727c 100644 --- a/pkg/util/pod/pod_test.go +++ b/pkg/util/pod/pod_test.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "fmt" "testing" @@ -33,7 +34,7 @@ func TestPatchPodStatus(t *testing.T) { name := "name" uid := types.UID("myuid") client := &fake.Clientset{} - client.CoreV1().Pods(ns).Create(&v1.Pod{ + client.CoreV1().Pods(ns).Create(context.TODO(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, Name: name, diff --git a/pkg/volume/azure_file/azure_util.go b/pkg/volume/azure_file/azure_util.go index 3407a20a045..8117c8cdc6f 100644 --- a/pkg/volume/azure_file/azure_util.go +++ b/pkg/volume/azure_file/azure_util.go @@ -19,6 +19,7 @@ limitations under the License. package azure_file import ( + "context" "fmt" "strings" @@ -53,7 +54,7 @@ func (s *azureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, secret return "", "", fmt.Errorf("Cannot get kube client") } - keys, err := kubeClient.CoreV1().Secrets(nameSpace).Get(secretName, metav1.GetOptions{}) + keys, err := kubeClient.CoreV1().Secrets(nameSpace).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { return "", "", fmt.Errorf("Couldn't get secret %v/%v", nameSpace, secretName) } @@ -89,7 +90,7 @@ func (s *azureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accoun }, Type: "Opaque", } - _, err := kubeClient.CoreV1().Secrets(nameSpace).Create(secret) + _, err := kubeClient.CoreV1().Secrets(nameSpace).Create(context.TODO(), secret) if errors.IsAlreadyExists(err) { err = nil } diff --git a/pkg/volume/cephfs/cephfs.go b/pkg/volume/cephfs/cephfs.go index 4ba29214fd9..6bb939048f0 100644 --- a/pkg/volume/cephfs/cephfs.go +++ b/pkg/volume/cephfs/cephfs.go @@ -17,6 +17,7 @@ limitations under the License. package cephfs import ( + "context" "fmt" "os" "os/exec" @@ -104,7 +105,7 @@ func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume. if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index 60092dfc52a..a129746a9f7 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -19,6 +19,7 @@ limitations under the License. package cinder import ( + "context" "errors" "fmt" "io/ioutil" @@ -149,7 +150,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { // TODO: caching, currently it is overkill because it calls this function // only when it creates dynamic PV zones := make(sets.String) - nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { klog.V(2).Infof("Error listing nodes") return zones, err diff --git a/pkg/volume/csi/csi_attacher.go b/pkg/volume/csi/csi_attacher.go index a89c177e517..fc6f94fafa1 100644 --- a/pkg/volume/csi/csi_attacher.go +++ b/pkg/volume/csi/csi_attacher.go @@ -105,7 +105,7 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string }, } - _, err = c.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = c.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) alreadyExist := false if err != nil { if !apierrors.IsAlreadyExists(err) { @@ -153,7 +153,7 @@ func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, tim func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) (string, error) { klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) - attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attach, err := c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { klog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) return "", fmt.Errorf("volume %v has GET error for volume attachment %v: %v", volumeHandle, attachID, err) @@ -197,7 +197,7 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No attachID := getAttachmentName(volumeHandle, driverName, string(nodeName)) klog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID)) - attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attach, err := c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { attached[spec] = false klog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err)) @@ -387,7 +387,7 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error { attachID = getAttachmentName(volID, driverName, string(nodeName)) } - if err := c.k8s.StorageV1().VolumeAttachments().Delete(attachID, nil); err != nil { + if err := c.k8s.StorageV1().VolumeAttachments().Delete(context.TODO(), attachID, nil); err != nil { if apierrors.IsNotFound(err) { // object deleted or never existed, done klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID)) @@ -413,7 +413,7 @@ func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string, tim func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) error { klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) - attach, err := c.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attach, err := c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { //object deleted or never existed, done @@ -439,7 +439,7 @@ func (c *csiAttacher) waitForVolumeAttachDetachStatus(attach *storage.VolumeAtta return nil } - watcher, err := c.k8s.StorageV1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion})) + watcher, err := c.k8s.StorageV1().VolumeAttachments().Watch(context.TODO(), meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion})) if err != nil { return fmt.Errorf("watch error:%v for volume %v", err, volumeHandle) } @@ -588,7 +588,7 @@ func getDriverAndVolNameFromDeviceMountPath(k8s kubernetes.Interface, deviceMoun pvName := filepath.Base(dir) // Get PV and check for errors - pv, err := k8s.CoreV1().PersistentVolumes().Get(pvName, meta.GetOptions{}) + pv, err := k8s.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, meta.GetOptions{}) if err != nil { return "", "", err } diff --git a/pkg/volume/csi/csi_attacher_test.go b/pkg/volume/csi/csi_attacher_test.go index 4a7df878aa2..c5be40cda13 100644 --- a/pkg/volume/csi/csi_attacher_test.go +++ b/pkg/volume/csi/csi_attacher_test.go @@ -17,6 +17,7 @@ limitations under the License. package csi import ( + "context" "fmt" "io/ioutil" "os" @@ -81,7 +82,7 @@ func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.R defer ticker.Stop() // wait for attachment to be saved for i := 0; i < 100; i++ { - attach, err = client.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attach, err = client.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { <-ticker.C @@ -101,7 +102,7 @@ func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.R } else { attach.Status = status t.Logf("updating attachment %s with attach status %v", attachID, status) - _, err := client.StorageV1().VolumeAttachments().Update(attach) + _, err := client.StorageV1().VolumeAttachments().Update(context.TODO(), attach) if err != nil { t.Error(err) } @@ -529,11 +530,11 @@ func TestAttacherWaitForAttach(t *testing.T) { if test.makeAttachment != nil { attachment := test.makeAttachment() - _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to create VolumeAttachment: %v", err) } - gotAttachment, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(attachment.Name, meta.GetOptions{}) + gotAttachment, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachment.Name, meta.GetOptions{}) if err != nil { t.Fatalf("failed to get created VolumeAttachment: %v", err) } @@ -611,11 +612,11 @@ func TestAttacherWaitForAttachWithInline(t *testing.T) { if test.makeAttachment != nil { attachment := test.makeAttachment() - _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to create VolumeAttachment: %v", err) } - gotAttachment, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(attachment.Name, meta.GetOptions{}) + gotAttachment, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachment.Name, meta.GetOptions{}) if err != nil { t.Fatalf("failed to get created VolumeAttachment: %v", err) } @@ -702,7 +703,7 @@ func TestAttacherWaitForVolumeAttachment(t *testing.T) { attachment := makeTestAttachment(attachID, nodeName, pvName) attachment.Status.Attached = tc.initAttached attachment.Status.AttachError = tc.initAttachErr - _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -800,7 +801,7 @@ func TestAttacherVolumesAreAttached(t *testing.T) { attachID := getAttachmentName(attachedSpec.volName, testDriver, nodeName) attachment := makeTestAttachment(attachID, nodeName, attachedSpec.spec.Name()) attachment.Status.Attached = attachedSpec.attached - _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -871,7 +872,7 @@ func TestAttacherVolumesAreAttachedWithInline(t *testing.T) { attachID := getAttachmentName(attachedSpec.volName, testDriver, nodeName) attachment := makeTestAttachment(attachID, nodeName, attachedSpec.spec.Name()) attachment.Status.Attached = attachedSpec.attached - _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -959,7 +960,7 @@ func TestAttacherDetach(t *testing.T) { pv := makeTestPV("test-pv", 10, testDriver, tc.volID) spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly) attachment := makeTestAttachment(tc.attachID, nodeName, "test-pv") - _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -984,7 +985,7 @@ func TestAttacherDetach(t *testing.T) { if !tc.shouldFail && err != nil { t.Fatalf("unexpected err: %v", err) } - attach, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(tc.attachID, meta.GetOptions{}) + attach, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), tc.attachID, meta.GetOptions{}) if err != nil { if !apierrors.IsNotFound(err) { t.Fatalf("unexpected err: %v", err) @@ -1185,7 +1186,7 @@ func TestAttacherMountDevice(t *testing.T) { if tc.createAttachment { // Set up volume attachment attachment := makeTestAttachment(attachID, nodeName, pvName) - _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -1335,7 +1336,7 @@ func TestAttacherMountDeviceWithInline(t *testing.T) { // Set up volume attachment attachment := makeTestAttachment(attachID, nodeName, pvName) - _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err := csiAttacher.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to attach: %v", err) } @@ -1479,7 +1480,7 @@ func TestAttacherUnmountDevice(t *testing.T) { // Make the PV for this object pvName := filepath.Base(dir) pv := makeTestPV(pvName, 5, "csi", tc.volID) - _, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(pv) + _, err := csiAttacher.k8s.CoreV1().PersistentVolumes().Create(context.TODO(), pv) if err != nil && !tc.shouldFail { t.Fatalf("Failed to create PV: %v", err) } diff --git a/pkg/volume/csi/csi_block.go b/pkg/volume/csi/csi_block.go index 8b46d3c3e72..43484b3ad2c 100644 --- a/pkg/volume/csi/csi_block.go +++ b/pkg/volume/csi/csi_block.go @@ -276,7 +276,7 @@ func (m *csiBlockMapper) SetUpDevice() error { // Search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName nodeName := string(m.plugin.host.GetNodeName()) attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName) - attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { return errors.New(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err)) } @@ -332,7 +332,7 @@ func (m *csiBlockMapper) MapPodDevice() (string, error) { // Search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName nodeName := string(m.plugin.host.GetNodeName()) attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName) - attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attachment, err = m.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { return "", errors.New(log("blockMapper.MapPodDevice failed to get volume attachment [id=%v]: %v", attachID, err)) } diff --git a/pkg/volume/csi/csi_block_test.go b/pkg/volume/csi/csi_block_test.go index bfe793d6ce4..11fc94e4090 100644 --- a/pkg/volume/csi/csi_block_test.go +++ b/pkg/volume/csi/csi_block_test.go @@ -17,6 +17,7 @@ limitations under the License. package csi import ( + "context" "fmt" "os" "path/filepath" @@ -257,7 +258,7 @@ func TestBlockMapperSetupDevice(t *testing.T) { attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName)) attachment := makeTestAttachment(attachID, nodeName, pvName) attachment.Status.Attached = true - _, err = csiMapper.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = csiMapper.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -299,7 +300,7 @@ func TestBlockMapperMapPodDevice(t *testing.T) { attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName)) attachment := makeTestAttachment(attachID, nodeName, pvName) attachment.Status.Attached = true - _, err = csiMapper.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = csiMapper.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -341,7 +342,7 @@ func TestBlockMapperMapPodDeviceNotSupportAttach(t *testing.T) { AttachRequired: &attachRequired, }, } - _, err := fakeClient.StorageV1beta1().CSIDrivers().Create(fakeDriver) + _, err := fakeClient.StorageV1beta1().CSIDrivers().Create(context.TODO(), fakeDriver) if err != nil { t.Fatalf("Failed to create a fakeDriver: %v", err) } diff --git a/pkg/volume/csi/csi_mounter_test.go b/pkg/volume/csi/csi_mounter_test.go index ef73b31ad97..84f8ca65d00 100644 --- a/pkg/volume/csi/csi_mounter_test.go +++ b/pkg/volume/csi/csi_mounter_test.go @@ -17,6 +17,7 @@ limitations under the License. package csi import ( + "context" "fmt" "math/rand" "os" @@ -212,7 +213,7 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) { DetachError: nil, }, } - _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -355,7 +356,7 @@ func TestMounterSetUpSimple(t *testing.T) { attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName())) attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name()) - _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -483,7 +484,7 @@ func TestMounterSetupWithStatusTracking(t *testing.T) { if tc.createAttachment { attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName())) attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name()) - _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -597,7 +598,7 @@ func TestMounterSetUpWithInline(t *testing.T) { if csiMounter.volumeLifecycleMode == storagev1beta1.VolumeLifecyclePersistent { attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName())) attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name()) - _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Fatalf("failed to setup VolumeAttachment: %v", err) } @@ -743,7 +744,7 @@ func TestMounterSetUpWithFSGroup(t *testing.T) { attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName())) attachment := makeTestAttachment(attachID, "test-node", pvName) - _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(attachment) + _, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment) if err != nil { t.Errorf("failed to setup VolumeAttachment: %v", err) continue diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index 28009ab67f4..b112801b852 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -842,7 +842,7 @@ func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver attachID := getAttachmentName(handle, driver, nodeName) // search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName - attachment, err := client.StorageV1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + attachment, err := client.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{}) if err != nil { return nil, err // This err already has enough context ("VolumeAttachment xyz not found") } diff --git a/pkg/volume/csi/csi_util.go b/pkg/volume/csi/csi_util.go index 8201d144100..ca678bfac45 100644 --- a/pkg/volume/csi/csi_util.go +++ b/pkg/volume/csi/csi_util.go @@ -17,6 +17,7 @@ limitations under the License. package csi import ( + "context" "encoding/json" "errors" "fmt" @@ -43,7 +44,7 @@ const ( func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) (map[string]string, error) { credentials := map[string]string{} - secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, meta.GetOptions{}) + secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(context.TODO(), secretRef.Name, meta.GetOptions{}) if err != nil { return credentials, errors.New(log("failed to find the secret %s in the namespace %s with error: %v", secretRef.Name, secretRef.Namespace, err)) } diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index b6920ba078d..6a243550402 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -19,6 +19,7 @@ limitations under the License. package nodeinfomanager // import "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager" import ( + "context" "encoding/json" goerrors "errors" "fmt" @@ -183,7 +184,7 @@ func (nim *nodeInfoManager) tryUpdateNode(updateFuncs ...nodeUpdateFunc) error { } nodeClient := kubeClient.CoreV1().Nodes() - originalNode, err := nodeClient.Get(string(nim.nodeName), metav1.GetOptions{}) + originalNode, err := nodeClient.Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{}) if err != nil { return err } @@ -379,7 +380,7 @@ func (nim *nodeInfoManager) tryUpdateCSINode( maxAttachLimit int64, topology map[string]string) error { - nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(string(nim.nodeName), metav1.GetOptions{}) + nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{}) if nodeInfo == nil || errors.IsNotFound(err) { nodeInfo, err = nim.CreateCSINode() } @@ -412,7 +413,7 @@ func (nim *nodeInfoManager) InitializeCSINodeWithAnnotation() error { } func (nim *nodeInfoManager) tryInitializeCSINodeWithAnnotation(csiKubeClient clientset.Interface) error { - nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(string(nim.nodeName), metav1.GetOptions{}) + nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{}) if nodeInfo == nil || errors.IsNotFound(err) { // CreateCSINode will set the annotation _, err = nim.CreateCSINode() @@ -424,7 +425,7 @@ func (nim *nodeInfoManager) tryInitializeCSINodeWithAnnotation(csiKubeClient cli annotationModified := setMigrationAnnotation(nim.migratedPlugins, nodeInfo) if annotationModified { - _, err := csiKubeClient.StorageV1().CSINodes().Update(nodeInfo) + _, err := csiKubeClient.StorageV1().CSINodes().Update(context.TODO(), nodeInfo) return err } return nil @@ -443,7 +444,7 @@ func (nim *nodeInfoManager) CreateCSINode() (*storagev1.CSINode, error) { return nil, fmt.Errorf("error getting CSI client") } - node, err := kubeClient.CoreV1().Nodes().Get(string(nim.nodeName), metav1.GetOptions{}) + node, err := kubeClient.CoreV1().Nodes().Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{}) if err != nil { return nil, err } @@ -467,7 +468,7 @@ func (nim *nodeInfoManager) CreateCSINode() (*storagev1.CSINode, error) { setMigrationAnnotation(nim.migratedPlugins, nodeInfo) - return csiKubeClient.StorageV1().CSINodes().Create(nodeInfo) + return csiKubeClient.StorageV1().CSINodes().Create(context.TODO(), nodeInfo) } func setMigrationAnnotation(migratedPlugins map[string](func() bool), nodeInfo *storagev1.CSINode) (modified bool) { @@ -570,7 +571,7 @@ func (nim *nodeInfoManager) installDriverToCSINode( newDriverSpecs = append(newDriverSpecs, driverSpec) nodeInfo.Spec.Drivers = newDriverSpecs - _, err := csiKubeClient.StorageV1().CSINodes().Update(nodeInfo) + _, err := csiKubeClient.StorageV1().CSINodes().Update(context.TODO(), nodeInfo) return err } @@ -601,7 +602,7 @@ func (nim *nodeInfoManager) tryUninstallDriverFromCSINode( csiDriverName string) error { nodeInfoClient := csiKubeClient.StorageV1().CSINodes() - nodeInfo, err := nodeInfoClient.Get(string(nim.nodeName), metav1.GetOptions{}) + nodeInfo, err := nodeInfoClient.Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { return nil } else if err != nil { @@ -627,7 +628,7 @@ func (nim *nodeInfoManager) tryUninstallDriverFromCSINode( } nodeInfo.Spec.Drivers = drivers - _, err = nodeInfoClient.Update(nodeInfo) + _, err = nodeInfoClient.Update(context.TODO(), nodeInfo) return err // do not wrap error diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go index 7f3b9f67002..cd8e836fa2e 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go @@ -17,6 +17,7 @@ limitations under the License. package nodeinfomanager import ( + "context" "encoding/json" "fmt" "math" @@ -985,7 +986,7 @@ func TestInstallCSIDriverExistingAnnotation(t *testing.T) { } // Assert - nodeInfo, err := client.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{}) + nodeInfo, err := client.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { t.Errorf("error getting CSINode: %v", err) continue @@ -1058,7 +1059,7 @@ func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []t node, err = applyNodeStatusPatch(tc.existingNode, action.(clienttesting.PatchActionImpl).GetPatch()) assert.NoError(t, err) } else { - node, err = client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err = client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) assert.NoError(t, err) } @@ -1073,7 +1074,7 @@ func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []t if csiNodeInfoEnabled { // CSINode validation - nodeInfo, err := client.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{}) + nodeInfo, err := client.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { if !errors.IsNotFound(err) { t.Errorf("error getting CSINode: %v", err) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 8067ba83344..6b92663922d 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -17,6 +17,7 @@ limitations under the License. package glusterfs import ( + "context" "fmt" "math" "math/rand" @@ -152,7 +153,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu if kubeClient == nil { return nil, fmt.Errorf("failed to get kube client to initialize mounter") } - ep, err := kubeClient.CoreV1().Endpoints(epNamespace).Get(epName, metav1.GetOptions{}) + ep, err := kubeClient.CoreV1().Endpoints(epNamespace).Get(context.TODO(), epName, metav1.GetOptions{}) if err != nil { klog.Errorf("failed to get endpoint %s: %v", epName, err) return nil, err @@ -542,7 +543,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll if kubeClient == nil { return fmt.Errorf("failed to get kube client when collecting gids") } - pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pvList, err := kubeClient.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { return fmt.Errorf("failed to get existing persistent volumes") } @@ -832,14 +833,14 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi endpoint.Subsets = subset endpoint.Subsets[0].Addresses = addrlist endpoint.Subsets[0].Ports = ports - _, err = kubeClient.CoreV1().Endpoints(epNamespace).Update(endpoint) + _, err = kubeClient.CoreV1().Endpoints(epNamespace).Update(context.TODO(), endpoint) if err != nil { deleteErr := cli.VolumeDelete(volume.Id) if deleteErr != nil { klog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr) } klog.V(3).Infof("failed to update endpoint, deleting %s", endpoint) - err = kubeClient.CoreV1().Services(epNamespace).Delete(epServiceName, nil) + err = kubeClient.CoreV1().Services(epNamespace).Delete(context.TODO(), epServiceName, nil) if err != nil && errors.IsNotFound(err) { klog.V(1).Infof("service %s does not exist in namespace %s", epServiceName, epNamespace) err = nil @@ -883,7 +884,7 @@ func (p *glusterfsVolumeProvisioner) createOrGetEndpointService(namespace string if kubeClient == nil { return nil, nil, fmt.Errorf("failed to get kube client when creating endpoint service") } - _, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint) + _, err = kubeClient.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoint) if err != nil && errors.IsAlreadyExists(err) { klog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace) err = nil @@ -903,7 +904,7 @@ func (p *glusterfsVolumeProvisioner) createOrGetEndpointService(namespace string Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Protocol: "TCP", Port: 1}}}} - _, err = kubeClient.CoreV1().Services(namespace).Create(service) + _, err = kubeClient.CoreV1().Services(namespace).Create(context.TODO(), service) if err != nil && errors.IsAlreadyExists(err) { klog.V(1).Infof("service %s already exist in namespace %s", service, namespace) err = nil @@ -920,7 +921,7 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi if kubeClient == nil { return fmt.Errorf("failed to get kube client when deleting endpoint service") } - err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil) + err = kubeClient.CoreV1().Services(namespace).Delete(context.TODO(), epServiceName, nil) if err != nil { return fmt.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err) } diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index 957a90a331a..2dbab1e54f5 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -17,6 +17,7 @@ limitations under the License. package iscsi import ( + "context" "fmt" "os" "path/filepath" @@ -590,7 +591,7 @@ func createSecretMap(spec *volume.Spec, plugin *iscsiPlugin, namespace string) ( if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secretObj, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) return nil, err diff --git a/pkg/volume/portworx/portworx_util.go b/pkg/volume/portworx/portworx_util.go index d6bd04d8727..e76031132ab 100644 --- a/pkg/volume/portworx/portworx_util.go +++ b/pkg/volume/portworx/portworx_util.go @@ -17,6 +17,7 @@ limitations under the License. package portworx import ( + "context" "fmt" osdapi "github.com/libopenstorage/openstorage/api" @@ -358,7 +359,7 @@ func getPortworxService(host volume.VolumeHost) (*v1.Service, error) { } opts := metav1.GetOptions{} - svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(pxServiceName, opts) + svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(context.TODO(), pxServiceName, opts) if err != nil { klog.Errorf("Failed to get service. Err: %v", err) return nil, err diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index a07fe8bf06d..eb5caa71da6 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -17,6 +17,7 @@ limitations under the License. package rbd import ( + "context" "fmt" "os" "path/filepath" @@ -271,7 +272,7 @@ func (plugin *rbdPlugin) createMounterFromVolumeSpecAndPod(spec *volume.Spec, po if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err @@ -304,7 +305,7 @@ func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.Vol if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err @@ -486,7 +487,7 @@ func (plugin *rbdPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err diff --git a/pkg/volume/storageos/storageos_test.go b/pkg/volume/storageos/storageos_test.go index d26f24af7c5..3cfb9a54715 100644 --- a/pkg/volume/storageos/storageos_test.go +++ b/pkg/volume/storageos/storageos_test.go @@ -17,6 +17,7 @@ limitations under the License. package storageos import ( + "context" "fmt" "os" "path/filepath" @@ -173,7 +174,7 @@ func TestPlugin(t *testing.T) { client := fake.NewSimpleClientset() - client.CoreV1().Secrets("default").Create(&v1.Secret{ + client.CoreV1().Secrets("default").Create(context.TODO(), &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, Namespace: "default", diff --git a/pkg/volume/testing/testing.go b/pkg/volume/testing/testing.go index 1d45b0dcadc..c69c73f2e4d 100644 --- a/pkg/volume/testing/testing.go +++ b/pkg/volume/testing/testing.go @@ -17,6 +17,7 @@ limitations under the License. package testing import ( + "context" "fmt" "net" "os" @@ -249,7 +250,7 @@ func (f *fakeVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) { func (f *fakeVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) { return func(namespace, name string) (*v1.Secret, error) { - return f.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) + return f.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } } @@ -259,13 +260,13 @@ func (f *fakeVolumeHost) GetExec(pluginName string) exec.Interface { func (f *fakeVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) { return func(namespace, name string) (*v1.ConfigMap, error) { - return f.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + return f.kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } } func (f *fakeVolumeHost) GetServiceAccountTokenFunc() func(string, string, *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { return func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { - return f.kubeClient.CoreV1().ServiceAccounts(namespace).CreateToken(name, tr) + return f.kubeClient.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr) } } diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index d453c049608..684ae29f6cc 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -17,6 +17,7 @@ limitations under the License. package operationexecutor import ( + "context" goerrors "errors" "fmt" "path/filepath" @@ -1274,7 +1275,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( } // Fetch current node object - node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{}) + node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(nodeName), metav1.GetOptions{}) if fetchErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateError("VerifyControllerAttachedVolume failed fetching node from API server", fetchErr) @@ -1316,7 +1317,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( func (og *operationGenerator) verifyVolumeIsSafeToDetach( volumeToDetach AttachedVolume) error { // Fetch current node object - node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{}) + node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(volumeToDetach.NodeName), metav1.GetOptions{}) if fetchErr != nil { if errors.IsNotFound(fetchErr) { klog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", "")) @@ -1547,7 +1548,7 @@ func (og *operationGenerator) nodeExpandVolume(volumeToMount VolumeToMount, rsOp expandableVolumePlugin.RequiresFSResize() && volumeToMount.VolumeSpec.PersistentVolume != nil { pv := volumeToMount.VolumeSpec.PersistentVolume - pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(pv.Spec.ClaimRef.Name, metav1.GetOptions{}) + pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(context.TODO(), pv.Spec.ClaimRef.Name, metav1.GetOptions{}) if err != nil { // Return error rather than leave the file system un-resized, caller will log and retry return false, fmt.Errorf("MountVolume.NodeExpandVolume get PVC failed : %v", err) diff --git a/pkg/volume/util/recyclerclient/recycler_client.go b/pkg/volume/util/recyclerclient/recycler_client.go index 0011c1cf493..f66e609dadd 100644 --- a/pkg/volume/util/recyclerclient/recycler_client.go +++ b/pkg/volume/util/recyclerclient/recycler_client.go @@ -17,6 +17,7 @@ limitations under the License. package recyclerclient import ( + "context" "fmt" "sync" @@ -177,15 +178,15 @@ type realRecyclerClient struct { } func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) { - return c.client.CoreV1().Pods(pod.Namespace).Create(pod) + return c.client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) } func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { - return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + return c.client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } func (c *realRecyclerClient) DeletePod(name, namespace string) error { - return c.client.CoreV1().Pods(namespace).Delete(name, nil) + return c.client.CoreV1().Pods(namespace).Delete(context.TODO(), name, nil) } func (c *realRecyclerClient) Event(eventtype, message string) { @@ -204,13 +205,13 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s Watch: true, } - podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options) + podWatch, err := c.client.CoreV1().Pods(namespace).Watch(context.TODO(), options) if err != nil { return nil, err } eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name) - eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{ + eventWatch, err := c.client.CoreV1().Events(namespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: eventSelector.String(), Watch: true, }) diff --git a/pkg/volume/util/resize_util.go b/pkg/volume/util/resize_util.go index 0f10f1d57bb..5a988a8d385 100644 --- a/pkg/volume/util/resize_util.go +++ b/pkg/volume/util/resize_util.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "context" "encoding/json" "fmt" @@ -76,7 +77,7 @@ func UpdatePVSize( return fmt.Errorf("error Creating two way merge patch for PV %q with error : %v", pvClone.Name, err) } - _, err = kubeClient.CoreV1().PersistentVolumes().Patch(pvClone.Name, types.StrategicMergePatchType, patchBytes) + _, err = kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, types.StrategicMergePatchType, patchBytes) if err != nil { return fmt.Errorf("error Patching PV %q with error : %v", pvClone.Name, err) } @@ -171,7 +172,7 @@ func PatchPVCStatus( } updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace). - Patch(oldPVC.Name, types.StrategicMergePatchType, patchBytes, "status") + Patch(context.TODO(), oldPVC.Name, types.StrategicMergePatchType, patchBytes, "status") if updateErr != nil { return nil, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, updateErr) } diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index e3baaa7c3a6..ff32e666d78 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "context" "fmt" "io/ioutil" "os" @@ -116,7 +117,7 @@ func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interf if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { return secret, err } @@ -132,7 +133,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { return secret, err } @@ -155,7 +156,7 @@ func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) return nil, fmt.Errorf("Volume has no storage class") } - class, err := kubeClient.StorageV1().StorageClasses().Get(className, metav1.GetOptions{}) + class, err := kubeClient.StorageV1().StorageClasses().Get(context.TODO(), className, metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/plugin/pkg/admission/exec/admission.go b/plugin/pkg/admission/exec/admission.go index 4d782a53533..1a0051dd978 100644 --- a/plugin/pkg/admission/exec/admission.go +++ b/plugin/pkg/admission/exec/admission.go @@ -122,7 +122,7 @@ func (d *DenyExec) Validate(ctx context.Context, a admission.Attributes, o admis if path != "pods/exec" && path != "pods/attach" { return nil } - pod, err := d.client.CoreV1().Pods(a.GetNamespace()).Get(a.GetName(), metav1.GetOptions{}) + pod, err := d.client.CoreV1().Pods(a.GetNamespace()).Get(context.TODO(), a.GetName(), metav1.GetOptions{}) if err != nil { return admission.NewForbidden(a, err) } diff --git a/plugin/pkg/admission/limitranger/admission.go b/plugin/pkg/admission/limitranger/admission.go index 34bbb2ff504..aeec0ed50e9 100644 --- a/plugin/pkg/admission/limitranger/admission.go +++ b/plugin/pkg/admission/limitranger/admission.go @@ -167,7 +167,7 @@ func (l *LimitRanger) GetLimitRanges(a admission.Attributes) ([]*corev1.LimitRan // If there is already in-flight List() for a given namespace, we should wait until // it is finished and cache is updated instead of doing the same, also to avoid // throttling - see #22422 for details. - liveList, err := l.client.CoreV1().LimitRanges(a.GetNamespace()).List(metav1.ListOptions{}) + liveList, err := l.client.CoreV1().LimitRanges(a.GetNamespace()).List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, admission.NewForbidden(a, err) } diff --git a/plugin/pkg/admission/namespace/autoprovision/admission.go b/plugin/pkg/admission/namespace/autoprovision/admission.go index fd5dc275779..33a29ab28bc 100644 --- a/plugin/pkg/admission/namespace/autoprovision/admission.go +++ b/plugin/pkg/admission/namespace/autoprovision/admission.go @@ -90,7 +90,7 @@ func (p *Provision) Admit(ctx context.Context, a admission.Attributes, o admissi Status: corev1.NamespaceStatus{}, } - _, err = p.client.CoreV1().Namespaces().Create(namespace) + _, err = p.client.CoreV1().Namespaces().Create(context.TODO(), namespace) if err != nil && !errors.IsAlreadyExists(err) { return admission.NewForbidden(a, err) } diff --git a/plugin/pkg/admission/namespace/exists/admission.go b/plugin/pkg/admission/namespace/exists/admission.go index 61314f729ba..a70be147ce2 100644 --- a/plugin/pkg/admission/namespace/exists/admission.go +++ b/plugin/pkg/admission/namespace/exists/admission.go @@ -76,7 +76,7 @@ func (e *Exists) Validate(ctx context.Context, a admission.Attributes, o admissi } // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not - _, err = e.client.CoreV1().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) + _, err = e.client.CoreV1().Namespaces().Get(context.TODO(), a.GetNamespace(), metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return err diff --git a/plugin/pkg/admission/podnodeselector/admission.go b/plugin/pkg/admission/podnodeselector/admission.go index 65e3cf75ba8..ddc5fb55b4c 100644 --- a/plugin/pkg/admission/podnodeselector/admission.go +++ b/plugin/pkg/admission/podnodeselector/admission.go @@ -223,7 +223,7 @@ func (p *Plugin) ValidateInitialization() error { } func (p *Plugin) defaultGetNamespace(name string) (*corev1.Namespace, error) { - namespace, err := p.client.CoreV1().Namespaces().Get(name, metav1.GetOptions{}) + namespace, err := p.client.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("namespace %s does not exist", name) } diff --git a/plugin/pkg/admission/podtolerationrestriction/admission.go b/plugin/pkg/admission/podtolerationrestriction/admission.go index 966a86ab73d..25bfdc37551 100644 --- a/plugin/pkg/admission/podtolerationrestriction/admission.go +++ b/plugin/pkg/admission/podtolerationrestriction/admission.go @@ -205,7 +205,7 @@ func (p *Plugin) getNamespace(nsName string) (*corev1.Namespace, error) { namespace, err := p.namespaceLister.Get(nsName) if errors.IsNotFound(err) { // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not - namespace, err = p.client.CoreV1().Namespaces().Get(nsName, metav1.GetOptions{}) + namespace, err = p.client.CoreV1().Namespaces().Get(context.TODO(), nsName, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return nil, err diff --git a/plugin/pkg/admission/resourcequota/resource_access.go b/plugin/pkg/admission/resourcequota/resource_access.go index 8ae21a80632..820212d2283 100644 --- a/plugin/pkg/admission/resourcequota/resource_access.go +++ b/plugin/pkg/admission/resourcequota/resource_access.go @@ -17,6 +17,7 @@ limitations under the License. package resourcequota import ( + "context" "fmt" "time" @@ -78,7 +79,7 @@ func newQuotaAccessor() (*quotaAccessor, error) { } func (e *quotaAccessor) UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error { - updatedQuota, err := e.client.CoreV1().ResourceQuotas(newQuota.Namespace).UpdateStatus(newQuota) + updatedQuota, err := e.client.CoreV1().ResourceQuotas(newQuota.Namespace).UpdateStatus(context.TODO(), newQuota) if err != nil { return err } @@ -125,7 +126,7 @@ func (e *quotaAccessor) GetQuotas(namespace string) ([]corev1.ResourceQuota, err // If there is already in-flight List() for a given namespace, we should wait until // it is finished and cache is updated instead of doing the same, also to avoid // throttling - see #22422 for details. - liveList, err := e.client.CoreV1().ResourceQuotas(namespace).List(metav1.ListOptions{}) + liveList, err := e.client.CoreV1().ResourceQuotas(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/plugin/pkg/admission/serviceaccount/admission.go b/plugin/pkg/admission/serviceaccount/admission.go index 17f931890f4..610d39f4251 100644 --- a/plugin/pkg/admission/serviceaccount/admission.go +++ b/plugin/pkg/admission/serviceaccount/admission.go @@ -315,7 +315,7 @@ func (s *Plugin) getServiceAccount(namespace string, name string) (*corev1.Servi if i != 0 { time.Sleep(retryInterval) } - serviceAccount, err := s.client.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + serviceAccount, err := s.client.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err == nil { return serviceAccount, nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/example.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/example.go index 2496cd2ce05..d0389d42b65 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/example.go +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/example.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" crv1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1" @@ -61,13 +62,13 @@ func NewFilteredExampleInformer(client versioned.Interface, namespace string, re if tweakListOptions != nil { tweakListOptions(&options) } - return client.CrV1().Examples(namespace).List(options) + return client.CrV1().Examples(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CrV1().Examples(namespace).Watch(options) + return client.CrV1().Examples(namespace).Watch(context.TODO(), options) }, }, &crv1.Example{}, diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/customresourcedefinition.go index 7e15a96174e..7d1b5711126 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/customresourcedefinition.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -60,13 +61,13 @@ func NewFilteredCustomResourceDefinitionInformer(client clientset.Interface, res if tweakListOptions != nil { tweakListOptions(&options) } - return client.ApiextensionsV1().CustomResourceDefinitions().List(options) + return client.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ApiextensionsV1().CustomResourceDefinitions().Watch(options) + return client.ApiextensionsV1().CustomResourceDefinitions().Watch(context.TODO(), options) }, }, &apiextensionsv1.CustomResourceDefinition{}, diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go index 05e8c421799..489c87ae905 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredCustomResourceDefinitionInformer(client clientset.Interface, res if tweakListOptions != nil { tweakListOptions(&options) } - return client.ApiextensionsV1beta1().CustomResourceDefinitions().List(options) + return client.ApiextensionsV1beta1().CustomResourceDefinitions().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ApiextensionsV1beta1().CustomResourceDefinitions().Watch(options) + return client.ApiextensionsV1beta1().CustomResourceDefinitions().Watch(context.TODO(), options) }, }, &apiextensionsv1beta1.CustomResourceDefinition{}, diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go index 3b32a0dac6b..8f7592bbdd5 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/apiapproval/apiapproval_controller.go @@ -17,6 +17,7 @@ limitations under the License. package apiapproval import ( + "context" "fmt" "sync" "time" @@ -158,7 +159,7 @@ func (c *KubernetesAPIApprovalPolicyConformantConditionController) sync(key stri crd := inCustomResourceDefinition.DeepCopy() apihelpers.SetCRDCondition(crd, *cond) - _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(crd) + _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go index 1d1501669bb..bc8df0391c3 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go @@ -17,6 +17,7 @@ limitations under the License. package establish import ( + "context" "fmt" "time" @@ -135,7 +136,7 @@ func (ec *EstablishingController) sync(key string) error { apiextensionshelpers.SetCRDCondition(crd, establishedCondition) // Update server with new CRD condition. - _, err = ec.crdClient.CustomResourceDefinitions().UpdateStatus(crd) + _, err = ec.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go index aac8bd64352..dd5ed3210d8 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go @@ -17,6 +17,7 @@ limitations under the License. package finalizer import ( + "context" "fmt" "reflect" "time" @@ -127,7 +128,7 @@ func (c *CRDFinalizer) sync(key string) error { Reason: "InstanceDeletionInProgress", Message: "CustomResource deletion is in progress", }) - crd, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(crd) + crd, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil @@ -150,7 +151,7 @@ func (c *CRDFinalizer) sync(key string) error { cond, deleteErr := c.deleteInstances(crd) apiextensionshelpers.SetCRDCondition(crd, cond) if deleteErr != nil { - if _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(crd); err != nil { + if _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd); err != nil { utilruntime.HandleError(err) } return deleteErr @@ -165,7 +166,7 @@ func (c *CRDFinalizer) sync(key string) error { } apiextensionshelpers.CRDRemoveFinalizer(crd, apiextensionsv1.CustomResourceCleanupFinalizer) - _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(crd) + _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go index 1bdd4fbd0ac..d41f653a0c1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/nonstructuralschema/nonstructuralschema_controller.go @@ -17,6 +17,7 @@ limitations under the License. package nonstructuralschema import ( + "context" "fmt" "sync" "time" @@ -159,7 +160,7 @@ func (c *ConditionController) sync(key string) error { apiextensionshelpers.SetCRDCondition(crd, *cond) } - _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(crd) + _, err = c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go index b3abd739958..9492863b711 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go @@ -17,6 +17,7 @@ limitations under the License. package status import ( + "context" "fmt" "reflect" "strings" @@ -261,7 +262,7 @@ func (c *NamingConditionController) sync(key string) error { apiextensionshelpers.SetCRDCondition(crd, namingCondition) apiextensionshelpers.SetCRDCondition(crd, establishedCondition) - updatedObj, err := c.crdClient.CustomResourceDefinitions().UpdateStatus(crd) + updatedObj, err := c.crdClient.CustomResourceDefinitions().UpdateStatus(context.TODO(), crd) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiapproval_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiapproval_test.go index 5b736864f36..003e507c5d0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiapproval_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/apiapproval_test.go @@ -17,6 +17,7 @@ limitations under the License. package integration import ( + "context" "testing" "time" @@ -65,7 +66,7 @@ func TestAPIApproval(t *testing.T) { t.Fatal(err) } err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { - approvedKubeAPI, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(approvedKubeAPI.Name, metav1.GetOptions{}) + approvedKubeAPI, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), approvedKubeAPI.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go index ed5269c9c53..0302a423898 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go @@ -945,14 +945,14 @@ func TestNameConflict(t *testing.T) { } noxu2Definition := fixtures.NewNoxu2CustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(noxu2Definition) + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), noxu2Definition) if err != nil { t.Fatal(err) } // A NameConflict occurs err = wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - crd, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxu2Definition.Name, metav1.GetOptions{}) + crd, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), noxu2Definition.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -975,7 +975,7 @@ func TestNameConflict(t *testing.T) { // Names are now accepted err = wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - crd, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxu2Definition.Name, metav1.GetOptions{}) + crd, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), noxu2Definition.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -1019,7 +1019,7 @@ func TestStatusGetAndPatch(t *testing.T) { // make sure we don't get 405 Method Not Allowed from Patching CRD/status subresource _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions(). - Patch(noxuDefinition.Name, types.StrategicMergePatchType, + Patch(context.TODO(), noxuDefinition.Name, types.StrategicMergePatchType, []byte(fmt.Sprintf(`{"labels":{"test-label":"dummy"}}`)), "status") if err != nil { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go index 46e56f1b426..fd335cb6dd7 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/change_test.go @@ -17,6 +17,7 @@ limitations under the License. package integration import ( + "context" "fmt" "sync" "testing" @@ -75,7 +76,7 @@ func TestChangeCRD(t *testing.T) { time.Sleep(10 * time.Millisecond) - noxuDefinitionToUpdate, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}) + noxuDefinitionToUpdate, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), noxuDefinition.Name, metav1.GetOptions{}) if err != nil { t.Error(err) continue @@ -89,7 +90,7 @@ func TestChangeCRD(t *testing.T) { } else { noxuDefinitionToUpdate.Spec.Versions = noxuDefinitionToUpdate.Spec.Versions[0:1] } - if _, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(noxuDefinitionToUpdate); err != nil && !apierrors.IsConflict(err) { + if _, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), noxuDefinitionToUpdate); err != nil && !apierrors.IsConflict(err) { t.Error(err) continue } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/conversion_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/conversion_test.go index 5c18683507b..fcce485eb9b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/conversion_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/conversion/conversion_test.go @@ -17,6 +17,7 @@ limitations under the License. package conversion import ( + "context" "encoding/json" "fmt" "net/http" @@ -904,7 +905,7 @@ func newConversionTestContext(t *testing.T, apiExtensionsClient clientset.Interf if err != nil { t.Fatal(err) } - crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(v1CRD.Name, metav1.GetOptions{}) + crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), v1CRD.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -943,7 +944,7 @@ func (c *conversionTestContext) versionedClients(ns string) map[string]dynamic.R } func (c *conversionTestContext) setConversionWebhook(t *testing.T, webhookClientConfig *apiextensionsv1beta1.WebhookClientConfig, reviewVersions []string) { - crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{}) + crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), c.crd.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -952,7 +953,7 @@ func (c *conversionTestContext) setConversionWebhook(t *testing.T, webhookClient WebhookClientConfig: webhookClientConfig, ConversionReviewVersions: reviewVersions, } - crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) + crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) if err != nil { t.Fatal(err) } @@ -961,7 +962,7 @@ func (c *conversionTestContext) setConversionWebhook(t *testing.T, webhookClient } func (c *conversionTestContext) removeConversionWebhook(t *testing.T) { - crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{}) + crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), c.crd.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -969,7 +970,7 @@ func (c *conversionTestContext) removeConversionWebhook(t *testing.T) { Strategy: apiextensionsv1beta1.NoneConverter, } - crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) + crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) if err != nil { t.Fatal(err) } @@ -997,14 +998,14 @@ func (c *conversionTestContext) setAndWaitStorageVersion(t *testing.T, version s } func (c *conversionTestContext) setStorageVersion(t *testing.T, version string) { - crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{}) + crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), c.crd.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } for i, v := range crd.Spec.Versions { crd.Spec.Versions[i].Storage = v.Name == version } - crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) + crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) if err != nil { t.Fatal(err) } @@ -1026,7 +1027,7 @@ func (c *conversionTestContext) waitForStorageVersion(t *testing.T, version stri } func (c *conversionTestContext) setServed(t *testing.T, version string, served bool) { - crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{}) + crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), c.crd.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -1035,7 +1036,7 @@ func (c *conversionTestContext) setServed(t *testing.T, version string, served b crd.Spec.Versions[i].Served = served } } - crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) + crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/defaulting_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/defaulting_test.go index 3505b4097bd..d5c7883bc1f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/defaulting_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/defaulting_test.go @@ -17,6 +17,7 @@ limitations under the License. package integration import ( + "context" "fmt" "reflect" "strings" @@ -206,12 +207,12 @@ func testDefaulting(t *testing.T, watchCache bool) { var err error for retry := 0; retry < 10; retry++ { var obj *apiextensionsv1.CustomResourceDefinition - obj, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + obj, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } update(obj) - obj, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(obj) + obj, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), obj) if err != nil && apierrors.IsConflict(err) { continue } else if err != nil { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go index 745aa585416..b933cdd025b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go @@ -17,6 +17,7 @@ limitations under the License. package integration import ( + "context" "testing" "time" @@ -156,7 +157,7 @@ func TestFinalizationAndDeletion(t *testing.T) { } err = wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}) + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), noxuDefinition.Name, metav1.GetOptions{}) return errors.IsNotFound(err), err }) if !errors.IsNotFound(err) { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go index 9f53896c023..a48d7f40988 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures/resources.go @@ -17,6 +17,7 @@ limitations under the License. package fixtures import ( + "context" "fmt" "time" @@ -325,7 +326,7 @@ func existsInDiscoveryV1(crd *apiextensionsv1.CustomResourceDefinition, apiExten // the created CR. Please call CreateNewCustomResourceDefinition if you need to // watch the CR. func CreateNewCustomResourceDefinitionWatchUnsafe(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) (*apiextensionsv1beta1.CustomResourceDefinition, error) { - crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) + crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd) if err != nil { return nil, err } @@ -374,11 +375,11 @@ func CreateNewCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceD // the created CR. Please call CreateNewV1CustomResourceDefinition if you need to // watch the CR. func CreateNewV1CustomResourceDefinitionWatchUnsafe(v1CRD *apiextensionsv1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) (*apiextensionsv1.CustomResourceDefinition, error) { - v1CRD, err := apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Create(v1CRD) + v1CRD, err := apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), v1CRD) if err != nil { return nil, err } - crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(v1CRD.Name, metav1.GetOptions{}) + crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), v1CRD.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -402,7 +403,7 @@ func CreateNewV1CustomResourceDefinition(v1CRD *apiextensionsv1.CustomResourceDe if err != nil { return nil, err } - crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(v1CRD.Name, metav1.GetOptions{}) + crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), v1CRD.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -506,7 +507,7 @@ func isWatchCachePrimed(crd *apiextensionsv1beta1.CustomResourceDefinition, dyna // DeleteCustomResourceDefinition deletes a CRD and waits until it disappears from discovery. func DeleteCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) error { - if err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(crd.Name, nil); err != nil { + if err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(context.TODO(), crd.Name, nil); err != nil { return err } for _, version := range servedVersions(crd) { @@ -523,7 +524,7 @@ func DeleteCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefi // DeleteV1CustomResourceDefinition deletes a CRD and waits until it disappears from discovery. func DeleteV1CustomResourceDefinition(crd *apiextensionsv1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) error { - if err := apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Delete(crd.Name, nil); err != nil { + if err := apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Delete(context.TODO(), crd.Name, nil); err != nil { return err } for _, version := range servedV1Versions(crd) { @@ -540,11 +541,11 @@ func DeleteV1CustomResourceDefinition(crd *apiextensionsv1.CustomResourceDefinit // DeleteCustomResourceDefinitions deletes all CRD matching the provided deleteListOpts and waits until all the CRDs disappear from discovery. func DeleteCustomResourceDefinitions(deleteListOpts metav1.ListOptions, apiExtensionsClient clientset.Interface) error { - list, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().List(deleteListOpts) + list, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().List(context.TODO(), deleteListOpts) if err != nil { return err } - if err = apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().DeleteCollection(nil, deleteListOpts); err != nil { + if err = apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().DeleteCollection(context.TODO(), nil, deleteListOpts); err != nil { return err } for _, crd := range list.Items { @@ -563,11 +564,11 @@ func DeleteCustomResourceDefinitions(deleteListOpts metav1.ListOptions, apiExten // DeleteV1CustomResourceDefinitions deletes all CRD matching the provided deleteListOpts and waits until all the CRDs disappear from discovery. func DeleteV1CustomResourceDefinitions(deleteListOpts metav1.ListOptions, apiExtensionsClient clientset.Interface) error { - list, err := apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().List(deleteListOpts) + list, err := apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), deleteListOpts) if err != nil { return err } - if err = apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().DeleteCollection(nil, deleteListOpts); err != nil { + if err = apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().DeleteCollection(context.TODO(), nil, deleteListOpts); err != nil { return err } for _, crd := range list.Items { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go index 121f065b616..9b5e9196014 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go @@ -17,6 +17,7 @@ limitations under the License. package integration import ( + "context" "fmt" "testing" @@ -80,12 +81,12 @@ func newNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd // UpdateCustomResourceDefinitionWithRetry updates a CRD, retrying up to 5 times on version conflict errors. func UpdateCustomResourceDefinitionWithRetry(client clientset.Interface, name string, update func(*apiextensionsv1beta1.CustomResourceDefinition)) (*apiextensionsv1beta1.CustomResourceDefinition, error) { for i := 0; i < 5; i++ { - crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get CustomResourceDefinition %q: %v", name, err) } update(crd) - crd, err = client.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) + crd, err = client.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) if err == nil { return crd, nil } @@ -99,12 +100,12 @@ func UpdateCustomResourceDefinitionWithRetry(client clientset.Interface, name st // UpdateV1CustomResourceDefinitionWithRetry updates a CRD, retrying up to 5 times on version conflict errors. func UpdateV1CustomResourceDefinitionWithRetry(client clientset.Interface, name string, update func(*apiextensionsv1.CustomResourceDefinition)) (*apiextensionsv1.CustomResourceDefinition, error) { for i := 0; i < 5; i++ { - crd, err := client.ApiextensionsV1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + crd, err := client.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get CustomResourceDefinition %q: %v", name, err) } update(crd) - crd, err = client.ApiextensionsV1().CustomResourceDefinitions().Update(crd) + crd, err = client.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), crd) if err == nil { return crd, nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go index d7ce6cf6713..5db5c1ee571 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go @@ -240,7 +240,7 @@ func TestDeRegistrationAndReRegistration(t *testing.T) { if err := fixtures.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient); err != nil { t.Fatal(err) } - if _, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}); err == nil || !errors.IsNotFound(err) { + if _, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), noxuDefinition.Name, metav1.GetOptions{}); err == nil || !errors.IsNotFound(err) { t.Fatalf("expected a NotFound error, got:%v", err) } if _, err = noxuNamespacedResourceClient.List(metav1.ListOptions{}); err == nil || !errors.IsNotFound(err) { @@ -252,7 +252,7 @@ func TestDeRegistrationAndReRegistration(t *testing.T) { }() func() { - if _, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}); err == nil || !errors.IsNotFound(err) { + if _, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), noxuDefinition.Name, metav1.GetOptions{}); err == nil || !errors.IsNotFound(err) { t.Fatalf("expected a NotFound error, got:%v", err) } noxuDefinition, err := fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go index 66df20f5336..ca250c34586 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go @@ -124,7 +124,7 @@ func TestTableGet(t *testing.T) { t.Fatal(err) } - crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -388,12 +388,12 @@ func TestColumnsPatch(t *testing.T) { // error about top-level and per-version columns being mutual exclusive. patch := []byte(`{"spec":{"versions":[{"name":"v1beta1","served":true,"storage":true,"additionalPrinterColumns":[{"name":"Age","type":"date","JSONPath":".metadata.creationTimestamp"}]},{"name":"v1","served":true,"storage":false,"additionalPrinterColumns":[{"name":"Age2","type":"date","JSONPath":".metadata.creationTimestamp"}]}]}}`) - _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch) + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.MergePatchType, patch) if err != nil { t.Fatal(err) } - crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -434,12 +434,12 @@ func TestPatchCleanTopLevelColumns(t *testing.T) { // the top-level columns. patch := []byte(`{"spec":{"additionalPrinterColumns":null,"versions":[{"name":"v1beta1","served":true,"storage":true,"additionalPrinterColumns":[{"name":"Age","type":"date","JSONPath":".metadata.creationTimestamp"}]},{"name":"v1","served":true,"storage":false,"additionalPrinterColumns":[{"name":"Age2","type":"date","JSONPath":".metadata.creationTimestamp"}]}]}}`) - _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch) + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.MergePatchType, patch) if err != nil { t.Fatal(err) } - crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go index 50d1a5e76cf..221e9e9cab0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go @@ -17,6 +17,7 @@ limitations under the License. package integration import ( + "context" "fmt" "strings" "testing" @@ -758,7 +759,7 @@ spec: // create CRDs t.Logf("Creating CRD %s", crd.Name) - if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil { + if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd); err != nil { t.Fatalf("unexpected create error: %v", err) } @@ -766,7 +767,7 @@ spec: t.Log("Waiting for NonStructuralSchema condition") var cond *apiextensionsv1beta1.CustomResourceDefinitionCondition err = wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) { - obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -783,12 +784,12 @@ spec: // remove schema t.Log("Remove schema") for retry := 0; retry < 5; retry++ { - crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { t.Fatalf("unexpected get error: %v", err) } crd.Spec.Validation = nil - if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd); apierrors.IsConflict(err) { + if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd); apierrors.IsConflict(err) { continue } if err != nil { @@ -802,7 +803,7 @@ spec: // wait for condition to go away t.Log("Wait for condition to disappear") err = wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) { - obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -816,12 +817,12 @@ spec: // readd schema t.Log("Readd schema") for retry := 0; retry < 5; retry++ { - crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { t.Fatalf("unexpected get error: %v", err) } crd.Spec.Validation = &apiextensionsv1beta1.CustomResourceValidation{OpenAPIV3Schema: origSchema} - if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd); apierrors.IsConflict(err) { + if _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd); apierrors.IsConflict(err) { continue } if err != nil { @@ -835,7 +836,7 @@ spec: // wait for condition with violations t.Log("Wait for condition to reappear") err = wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) { - obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -1608,7 +1609,7 @@ properties: crd.Name = fmt.Sprintf("foos.%s", crd.Spec.Group) // create CRDs - crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd) if len(tst.expectedCreateErrors) > 0 && err == nil { t.Fatalf("expected create errors, got none") } else if len(tst.expectedCreateErrors) == 0 && err != nil { @@ -1633,7 +1634,7 @@ properties: // wait for condition to not appear var cond *apiextensionsv1beta1.CustomResourceDefinitionCondition err := wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) { - obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -1652,7 +1653,7 @@ properties: // wait for condition to appear with the given violations var cond *apiextensionsv1beta1.CustomResourceDefinitionCondition err = wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + obj, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go index 4fdccff9dad..469585be28e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go @@ -17,6 +17,7 @@ limitations under the License. package integration import ( + "context" "fmt" "net/http" "reflect" @@ -210,7 +211,7 @@ func testStoragedVersionInCRDStatus(t *testing.T, ns string, noxuDefinition *api } // The storage version list should be initilized to storage version - crd, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}) + crd, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), noxuDefinition.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -220,11 +221,11 @@ func testStoragedVersionInCRDStatus(t *testing.T, ns string, noxuDefinition *api // Changing CRD storage version should be reflected immediately crd.Spec.Versions = versionsV1Beta2Storage - _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), crd) if err != nil { t.Fatal(err) } - crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}) + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), noxuDefinition.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index 5db82404583..779ab425d82 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -154,7 +154,7 @@ func (l *Lifecycle) Admit(ctx context.Context, a admission.Attributes, o admissi // refuse to operate on non-existent namespaces if !exists || forceLiveLookup { // as a last resort, make a call directly to storage - namespace, err = l.client.CoreV1().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) + namespace, err = l.client.CoreV1().Namespaces().Get(context.TODO(), a.GetNamespace(), metav1.GetOptions{}) switch { case errors.IsNotFound(err): return err diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go index eb7d5ec4c86..183be7b39a4 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go @@ -17,6 +17,7 @@ limitations under the License. package namespace import ( + "context" "fmt" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -76,7 +77,7 @@ func (m *Matcher) GetNamespaceLabels(attr admission.Attributes) (map[string]stri } if apierrors.IsNotFound(err) { // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not - namespace, err = m.Client.CoreV1().Namespaces().Get(namespaceName, metav1.GetOptions{}) + namespace, err = m.Client.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go index 709817637fe..719c2c5f4cc 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go @@ -17,6 +17,7 @@ limitations under the License. package options import ( + "context" "encoding/json" "fmt" "strings" @@ -339,7 +340,7 @@ func (s *DelegatingAuthenticationOptions) createRequestHeaderConfig(client kuber return nil, fmt.Errorf("unable to create request header authentication config: %v", err) } - authConfigMap, err := client.CoreV1().ConfigMaps(authenticationConfigMapNamespace).Get(authenticationConfigMapName, metav1.GetOptions{}) + authConfigMap, err := client.CoreV1().ConfigMaps(authenticationConfigMapNamespace).Get(context.TODO(), authenticationConfigMapName, metav1.GetOptions{}) switch { case errors.IsNotFound(err): // ignore, authConfigMap is nil now diff --git a/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go b/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go index 32aadb7d99e..9d005d828cc 100644 --- a/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go +++ b/staging/src/k8s.io/client-go/examples/create-update-delete-deployment/main.go @@ -19,6 +19,7 @@ package main import ( "bufio" + "context" "flag" "fmt" "os" @@ -100,7 +101,7 @@ func main() { // Create Deployment fmt.Println("Creating deployment...") - result, err := deploymentsClient.Create(deployment) + result, err := deploymentsClient.Create(context.TODO(), deployment) if err != nil { panic(err) } @@ -125,14 +126,14 @@ func main() { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of Deployment before attempting update // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver - result, getErr := deploymentsClient.Get("demo-deployment", metav1.GetOptions{}) + result, getErr := deploymentsClient.Get(context.TODO(), "demo-deployment", metav1.GetOptions{}) if getErr != nil { panic(fmt.Errorf("Failed to get latest version of Deployment: %v", getErr)) } result.Spec.Replicas = int32Ptr(1) // reduce replica count result.Spec.Template.Spec.Containers[0].Image = "nginx:1.13" // change nginx version - _, updateErr := deploymentsClient.Update(result) + _, updateErr := deploymentsClient.Update(context.TODO(), result) return updateErr }) if retryErr != nil { @@ -143,7 +144,7 @@ func main() { // List Deployments prompt() fmt.Printf("Listing deployments in namespace %q:\n", apiv1.NamespaceDefault) - list, err := deploymentsClient.List(metav1.ListOptions{}) + list, err := deploymentsClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { panic(err) } @@ -155,7 +156,7 @@ func main() { prompt() fmt.Println("Deleting deployment...") deletePolicy := metav1.DeletePropagationForeground - if err := deploymentsClient.Delete("demo-deployment", &metav1.DeleteOptions{ + if err := deploymentsClient.Delete(context.TODO(), "demo-deployment", &metav1.DeleteOptions{ PropagationPolicy: &deletePolicy, }); err != nil { panic(err) diff --git a/staging/src/k8s.io/client-go/examples/fake-client/main_test.go b/staging/src/k8s.io/client-go/examples/fake-client/main_test.go index f478a013699..f523279ec7a 100644 --- a/staging/src/k8s.io/client-go/examples/fake-client/main_test.go +++ b/staging/src/k8s.io/client-go/examples/fake-client/main_test.go @@ -59,7 +59,7 @@ func TestFakeClient(t *testing.T) { // Inject an event into the fake client. p := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "my-pod"}} - _, err := client.CoreV1().Pods("test-ns").Create(p) + _, err := client.CoreV1().Pods("test-ns").Create(context.TODO(), p) if err != nil { t.Fatalf("error injecting pod add: %v", err) } diff --git a/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/main.go b/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/main.go index 80c80cfb971..a8c71612e31 100644 --- a/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/main.go +++ b/staging/src/k8s.io/client-go/examples/in-cluster-client-configuration/main.go @@ -18,6 +18,7 @@ limitations under the License. package main import ( + "context" "fmt" "time" @@ -50,7 +51,7 @@ func main() { for { // get pods in all the namespaces by omitting namespace // Or specify namespace to get pods in particular namespace - pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{}) + pods, err := clientset.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{}) if err != nil { panic(err.Error()) } @@ -59,7 +60,7 @@ func main() { // Examples for error handling: // - Use helper functions e.g. errors.IsNotFound() // - And/or cast to StatusError and use its properties like e.g. ErrStatus.Message - _, err = clientset.CoreV1().Pods("default").Get("example-xxxxx", metav1.GetOptions{}) + _, err = clientset.CoreV1().Pods("default").Get(context.TODO(), "example-xxxxx", metav1.GetOptions{}) if errors.IsNotFound(err) { fmt.Printf("Pod example-xxxxx not found in default namespace\n") } else if statusError, isStatus := err.(*errors.StatusError); isStatus { diff --git a/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go b/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go index 9718582c9d1..d5f183d3c40 100644 --- a/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go +++ b/staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go @@ -18,6 +18,7 @@ limitations under the License. package main import ( + "context" "flag" "fmt" "os" @@ -60,7 +61,7 @@ func main() { panic(err.Error()) } for { - pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{}) + pods, err := clientset.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{}) if err != nil { panic(err.Error()) } @@ -71,7 +72,7 @@ func main() { // - And/or cast to StatusError and use its properties like e.g. ErrStatus.Message namespace := "default" pod := "example-xxxxx" - _, err = clientset.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{}) + _, err = clientset.CoreV1().Pods(namespace).Get(context.TODO(), pod, metav1.GetOptions{}) if errors.IsNotFound(err) { fmt.Printf("Pod %s in namespace %s not found\n", pod, namespace) } else if statusError, isStatus := err.(*errors.StatusError); isStatus { diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go index 4fadd9a216b..b768f6f7f39 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" @@ -60,13 +61,13 @@ func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(options) + return client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(options) + return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(context.TODO(), options) }, }, &admissionregistrationv1.MutatingWebhookConfiguration{}, diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go index 1c648e6081a..8ddcdf2d905 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" @@ -60,13 +61,13 @@ func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interfa if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(options) + return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(options) + return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(context.TODO(), options) }, }, &admissionregistrationv1.ValidatingWebhookConfiguration{}, diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index a06c406c2c5..12c8ec1fbdd 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().List(options) + return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(options) + return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(context.TODO(), options) }, }, &admissionregistrationv1beta1.MutatingWebhookConfiguration{}, diff --git a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 3b7fafd29c0..05eb05097f5 100644 --- a/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interfa if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().List(options) + return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(options) + return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(context.TODO(), options) }, }, &admissionregistrationv1beta1.ValidatingWebhookConfiguration{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go b/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go index 2f69e0df01e..31e2b74d0f3 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/controllerrevision.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" appsv1 "k8s.io/api/apps/v1" @@ -61,13 +62,13 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().ControllerRevisions(namespace).List(options) + return client.AppsV1().ControllerRevisions(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().ControllerRevisions(namespace).Watch(options) + return client.AppsV1().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, &appsv1.ControllerRevision{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go b/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go index db649ccbf27..da7fe9509b9 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/daemonset.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" appsv1 "k8s.io/api/apps/v1" @@ -61,13 +62,13 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().DaemonSets(namespace).List(options) + return client.AppsV1().DaemonSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().DaemonSets(namespace).Watch(options) + return client.AppsV1().DaemonSets(namespace).Watch(context.TODO(), options) }, }, &appsv1.DaemonSet{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go b/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go index 71cd002733f..bd639bb3d90 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" appsv1 "k8s.io/api/apps/v1" @@ -61,13 +62,13 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().Deployments(namespace).List(options) + return client.AppsV1().Deployments(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().Deployments(namespace).Watch(options) + return client.AppsV1().Deployments(namespace).Watch(context.TODO(), options) }, }, &appsv1.Deployment{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go b/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go index 6ee7a0537e2..6d81a471a44 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/replicaset.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" appsv1 "k8s.io/api/apps/v1" @@ -61,13 +62,13 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().ReplicaSets(namespace).List(options) + return client.AppsV1().ReplicaSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().ReplicaSets(namespace).Watch(options) + return client.AppsV1().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, &appsv1.ReplicaSet{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go b/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go index 385e6536600..c99bbb73ed8 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1/statefulset.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" appsv1 "k8s.io/api/apps/v1" @@ -61,13 +62,13 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().StatefulSets(namespace).List(options) + return client.AppsV1().StatefulSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1().StatefulSets(namespace).Watch(options) + return client.AppsV1().StatefulSets(namespace).Watch(context.TODO(), options) }, }, &appsv1.StatefulSet{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go b/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go index c7d3e30e044..cb36bd7fd84 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().ControllerRevisions(namespace).List(options) + return client.AppsV1beta1().ControllerRevisions(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().ControllerRevisions(namespace).Watch(options) + return client.AppsV1beta1().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, &appsv1beta1.ControllerRevision{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go b/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go index 03bafca6ba1..e02a13c2f4c 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta1/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().Deployments(namespace).List(options) + return client.AppsV1beta1().Deployments(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().Deployments(namespace).Watch(options) + return client.AppsV1beta1().Deployments(namespace).Watch(context.TODO(), options) }, }, &appsv1beta1.Deployment{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go index e4d1b46fa6a..b845cc99c98 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta1/statefulset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().StatefulSets(namespace).List(options) + return client.AppsV1beta1().StatefulSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta1().StatefulSets(namespace).Watch(options) + return client.AppsV1beta1().StatefulSets(namespace).Watch(context.TODO(), options) }, }, &appsv1beta1.StatefulSet{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go index 975e81077ef..4d0e91320b6 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" time "time" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -61,13 +62,13 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().ControllerRevisions(namespace).List(options) + return client.AppsV1beta2().ControllerRevisions(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().ControllerRevisions(namespace).Watch(options) + return client.AppsV1beta2().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, &appsv1beta2.ControllerRevision{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go index 99f17fa6c4c..280e2fe4656 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/daemonset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" time "time" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -61,13 +62,13 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().DaemonSets(namespace).List(options) + return client.AppsV1beta2().DaemonSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().DaemonSets(namespace).Watch(options) + return client.AppsV1beta2().DaemonSets(namespace).Watch(context.TODO(), options) }, }, &appsv1beta2.DaemonSet{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go index b25da82bde1..67bdb79720a 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" time "time" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -61,13 +62,13 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().Deployments(namespace).List(options) + return client.AppsV1beta2().Deployments(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().Deployments(namespace).Watch(options) + return client.AppsV1beta2().Deployments(namespace).Watch(context.TODO(), options) }, }, &appsv1beta2.Deployment{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go index 6ce7fcfd0d1..85d12bb65da 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/replicaset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" time "time" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -61,13 +62,13 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().ReplicaSets(namespace).List(options) + return client.AppsV1beta2().ReplicaSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().ReplicaSets(namespace).Watch(options) + return client.AppsV1beta2().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, &appsv1beta2.ReplicaSet{}, diff --git a/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go b/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go index e77bb2f8fd6..2fab6f7b2bf 100644 --- a/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go +++ b/staging/src/k8s.io/client-go/informers/apps/v1beta2/statefulset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta2 import ( + "context" time "time" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -61,13 +62,13 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().StatefulSets(namespace).List(options) + return client.AppsV1beta2().StatefulSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AppsV1beta2().StatefulSets(namespace).Watch(options) + return client.AppsV1beta2().StatefulSets(namespace).Watch(context.TODO(), options) }, }, &appsv1beta2.StatefulSet{}, diff --git a/staging/src/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go b/staging/src/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go index 69778ad2cfe..ef178c3aa82 100644 --- a/staging/src/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go +++ b/staging/src/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredAuditSinkInformer(client kubernetes.Interface, resyncPeriod time if tweakListOptions != nil { tweakListOptions(&options) } - return client.AuditregistrationV1alpha1().AuditSinks().List(options) + return client.AuditregistrationV1alpha1().AuditSinks().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AuditregistrationV1alpha1().AuditSinks().Watch(options) + return client.AuditregistrationV1alpha1().AuditSinks().Watch(context.TODO(), options) }, }, &auditregistrationv1alpha1.AuditSink{}, diff --git a/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go index 205e4ecd794..44f041e906b 100644 --- a/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -61,13 +62,13 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(options) + return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(options) + return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, &autoscalingv1.HorizontalPodAutoscaler{}, diff --git a/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go index 4627c5a0b5c..6385a2a190a 100644 --- a/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,6 +19,7 @@ limitations under the License. package v2beta1 import ( + "context" time "time" autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" @@ -61,13 +62,13 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).List(options) + return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(options) + return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, &autoscalingv2beta1.HorizontalPodAutoscaler{}, diff --git a/staging/src/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go b/staging/src/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go index b4863f9b74d..f1ac3f0737c 100644 --- a/staging/src/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/staging/src/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,6 +19,7 @@ limitations under the License. package v2beta2 import ( + "context" time "time" autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" @@ -61,13 +62,13 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).List(options) + return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Watch(options) + return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, &autoscalingv2beta2.HorizontalPodAutoscaler{}, diff --git a/staging/src/k8s.io/client-go/informers/batch/v1/job.go b/staging/src/k8s.io/client-go/informers/batch/v1/job.go index 30d41104adb..4992f522863 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v1/job.go +++ b/staging/src/k8s.io/client-go/informers/batch/v1/job.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" batchv1 "k8s.io/api/batch/v1" @@ -61,13 +62,13 @@ func NewFilteredJobInformer(client kubernetes.Interface, namespace string, resyn if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV1().Jobs(namespace).List(options) + return client.BatchV1().Jobs(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV1().Jobs(namespace).Watch(options) + return client.BatchV1().Jobs(namespace).Watch(context.TODO(), options) }, }, &batchv1.Job{}, diff --git a/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go b/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go index 0b7598e0f83..820c93eaaaf 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go +++ b/staging/src/k8s.io/client-go/informers/batch/v1beta1/cronjob.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" batchv1beta1 "k8s.io/api/batch/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV1beta1().CronJobs(namespace).List(options) + return client.BatchV1beta1().CronJobs(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV1beta1().CronJobs(namespace).Watch(options) + return client.BatchV1beta1().CronJobs(namespace).Watch(context.TODO(), options) }, }, &batchv1beta1.CronJob{}, diff --git a/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go b/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go index 20cf7d498d1..5f5b870d4b6 100644 --- a/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go +++ b/staging/src/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go @@ -19,6 +19,7 @@ limitations under the License. package v2alpha1 import ( + "context" time "time" batchv2alpha1 "k8s.io/api/batch/v2alpha1" @@ -61,13 +62,13 @@ func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV2alpha1().CronJobs(namespace).List(options) + return client.BatchV2alpha1().CronJobs(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.BatchV2alpha1().CronJobs(namespace).Watch(options) + return client.BatchV2alpha1().CronJobs(namespace).Watch(context.TODO(), options) }, }, &batchv2alpha1.CronJob{}, diff --git a/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go b/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go index 6472d20e296..4e167ab8b13 100644 --- a/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go +++ b/staging/src/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.CertificatesV1beta1().CertificateSigningRequests().List(options) + return client.CertificatesV1beta1().CertificateSigningRequests().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CertificatesV1beta1().CertificateSigningRequests().Watch(options) + return client.CertificatesV1beta1().CertificateSigningRequests().Watch(context.TODO(), options) }, }, &certificatesv1beta1.CertificateSigningRequest{}, diff --git a/staging/src/k8s.io/client-go/informers/coordination/v1/lease.go b/staging/src/k8s.io/client-go/informers/coordination/v1/lease.go index b8a3de471c9..e538923a86b 100644 --- a/staging/src/k8s.io/client-go/informers/coordination/v1/lease.go +++ b/staging/src/k8s.io/client-go/informers/coordination/v1/lease.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" coordinationv1 "k8s.io/api/coordination/v1" @@ -61,13 +62,13 @@ func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, res if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1().Leases(namespace).List(options) + return client.CoordinationV1().Leases(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1().Leases(namespace).Watch(options) + return client.CoordinationV1().Leases(namespace).Watch(context.TODO(), options) }, }, &coordinationv1.Lease{}, diff --git a/staging/src/k8s.io/client-go/informers/coordination/v1beta1/lease.go b/staging/src/k8s.io/client-go/informers/coordination/v1beta1/lease.go index bb59be13e12..5a6959c0ba9 100644 --- a/staging/src/k8s.io/client-go/informers/coordination/v1beta1/lease.go +++ b/staging/src/k8s.io/client-go/informers/coordination/v1beta1/lease.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, res if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1beta1().Leases(namespace).List(options) + return client.CoordinationV1beta1().Leases(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1beta1().Leases(namespace).Watch(options) + return client.CoordinationV1beta1().Leases(namespace).Watch(context.TODO(), options) }, }, &coordinationv1beta1.Lease{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go b/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go index a5ae6fc4962..ccdee535bc2 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/componentstatus.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -60,13 +61,13 @@ func NewFilteredComponentStatusInformer(client kubernetes.Interface, resyncPerio if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ComponentStatuses().List(options) + return client.CoreV1().ComponentStatuses().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ComponentStatuses().Watch(options) + return client.CoreV1().ComponentStatuses().Watch(context.TODO(), options) }, }, &corev1.ComponentStatus{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/configmap.go b/staging/src/k8s.io/client-go/informers/core/v1/configmap.go index 48cb1a48e4f..6253581784e 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/configmap.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/configmap.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredConfigMapInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ConfigMaps(namespace).List(options) + return client.CoreV1().ConfigMaps(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ConfigMaps(namespace).Watch(options) + return client.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), options) }, }, &corev1.ConfigMap{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go b/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go index 77fa8cf8a01..cd0f25b7f70 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/endpoints.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredEndpointsInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Endpoints(namespace).List(options) + return client.CoreV1().Endpoints(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Endpoints(namespace).Watch(options) + return client.CoreV1().Endpoints(namespace).Watch(context.TODO(), options) }, }, &corev1.Endpoints{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/event.go b/staging/src/k8s.io/client-go/informers/core/v1/event.go index 52f4911c1df..8825e9b7a49 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/event.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/event.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Events(namespace).List(options) + return client.CoreV1().Events(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Events(namespace).Watch(options) + return client.CoreV1().Events(namespace).Watch(context.TODO(), options) }, }, &corev1.Event{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go b/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go index 7499e1869d2..4cbfda1f7a6 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/limitrange.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredLimitRangeInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().LimitRanges(namespace).List(options) + return client.CoreV1().LimitRanges(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().LimitRanges(namespace).Watch(options) + return client.CoreV1().LimitRanges(namespace).Watch(context.TODO(), options) }, }, &corev1.LimitRange{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/namespace.go b/staging/src/k8s.io/client-go/informers/core/v1/namespace.go index 57a073355a0..506f930a7d5 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/namespace.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/namespace.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -60,13 +61,13 @@ func NewFilteredNamespaceInformer(client kubernetes.Interface, resyncPeriod time if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Namespaces().List(options) + return client.CoreV1().Namespaces().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Namespaces().Watch(options) + return client.CoreV1().Namespaces().Watch(context.TODO(), options) }, }, &corev1.Namespace{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/node.go b/staging/src/k8s.io/client-go/informers/core/v1/node.go index d9b85f83c29..9939fc2cb6c 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/node.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/node.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -60,13 +61,13 @@ func NewFilteredNodeInformer(client kubernetes.Interface, resyncPeriod time.Dura if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Nodes().List(options) + return client.CoreV1().Nodes().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Nodes().Watch(options) + return client.CoreV1().Nodes().Watch(context.TODO(), options) }, }, &corev1.Node{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go index a50bcfc663f..c82445997ca 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolume.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -60,13 +61,13 @@ func NewFilteredPersistentVolumeInformer(client kubernetes.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PersistentVolumes().List(options) + return client.CoreV1().PersistentVolumes().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PersistentVolumes().Watch(options) + return client.CoreV1().PersistentVolumes().Watch(context.TODO(), options) }, }, &corev1.PersistentVolume{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go index 3fb5e5f6ccb..7a7df1cff81 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredPersistentVolumeClaimInformer(client kubernetes.Interface, names if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PersistentVolumeClaims(namespace).List(options) + return client.CoreV1().PersistentVolumeClaims(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PersistentVolumeClaims(namespace).Watch(options) + return client.CoreV1().PersistentVolumeClaims(namespace).Watch(context.TODO(), options) }, }, &corev1.PersistentVolumeClaim{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/pod.go b/staging/src/k8s.io/client-go/informers/core/v1/pod.go index 57aadd94581..5c713a9b6f4 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/pod.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/pod.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredPodInformer(client kubernetes.Interface, namespace string, resyn if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Pods(namespace).List(options) + return client.CoreV1().Pods(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Pods(namespace).Watch(options) + return client.CoreV1().Pods(namespace).Watch(context.TODO(), options) }, }, &corev1.Pod{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go b/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go index ff47094fb8d..2a16e910dbe 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/podtemplate.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PodTemplates(namespace).List(options) + return client.CoreV1().PodTemplates(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().PodTemplates(namespace).Watch(options) + return client.CoreV1().PodTemplates(namespace).Watch(context.TODO(), options) }, }, &corev1.PodTemplate{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go index 903fe3fbad8..930beb4cd5f 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/replicationcontroller.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredReplicationControllerInformer(client kubernetes.Interface, names if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ReplicationControllers(namespace).List(options) + return client.CoreV1().ReplicationControllers(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ReplicationControllers(namespace).Watch(options) + return client.CoreV1().ReplicationControllers(namespace).Watch(context.TODO(), options) }, }, &corev1.ReplicationController{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go b/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go index 27ae53ccb4f..619262a6128 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/resourcequota.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredResourceQuotaInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ResourceQuotas(namespace).List(options) + return client.CoreV1().ResourceQuotas(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ResourceQuotas(namespace).Watch(options) + return client.CoreV1().ResourceQuotas(namespace).Watch(context.TODO(), options) }, }, &corev1.ResourceQuota{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/secret.go b/staging/src/k8s.io/client-go/informers/core/v1/secret.go index e13776b2b1f..a6be0706930 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/secret.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/secret.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredSecretInformer(client kubernetes.Interface, namespace string, re if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Secrets(namespace).List(options) + return client.CoreV1().Secrets(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Secrets(namespace).Watch(options) + return client.CoreV1().Secrets(namespace).Watch(context.TODO(), options) }, }, &corev1.Secret{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/service.go b/staging/src/k8s.io/client-go/informers/core/v1/service.go index 1c758668c48..3d9ecc6e954 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/service.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/service.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredServiceInformer(client kubernetes.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Services(namespace).List(options) + return client.CoreV1().Services(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().Services(namespace).Watch(options) + return client.CoreV1().Services(namespace).Watch(context.TODO(), options) }, }, &corev1.Service{}, diff --git a/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go b/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go index c701b8f1e61..44371c9fa4f 100644 --- a/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go +++ b/staging/src/k8s.io/client-go/informers/core/v1/serviceaccount.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" corev1 "k8s.io/api/core/v1" @@ -61,13 +62,13 @@ func NewFilteredServiceAccountInformer(client kubernetes.Interface, namespace st if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ServiceAccounts(namespace).List(options) + return client.CoreV1().ServiceAccounts(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoreV1().ServiceAccounts(namespace).Watch(options) + return client.CoreV1().ServiceAccounts(namespace).Watch(context.TODO(), options) }, }, &corev1.ServiceAccount{}, diff --git a/staging/src/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go b/staging/src/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go index a545ce15514..c5e383c0b20 100644 --- a/staging/src/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go +++ b/staging/src/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" @@ -61,13 +62,13 @@ func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.DiscoveryV1alpha1().EndpointSlices(namespace).List(options) + return client.DiscoveryV1alpha1().EndpointSlices(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.DiscoveryV1alpha1().EndpointSlices(namespace).Watch(options) + return client.DiscoveryV1alpha1().EndpointSlices(namespace).Watch(context.TODO(), options) }, }, &discoveryv1alpha1.EndpointSlice{}, diff --git a/staging/src/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go b/staging/src/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go index f658866c2a7..69ae38a91a1 100644 --- a/staging/src/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go +++ b/staging/src/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" discoveryv1beta1 "k8s.io/api/discovery/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.DiscoveryV1beta1().EndpointSlices(namespace).List(options) + return client.DiscoveryV1beta1().EndpointSlices(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.DiscoveryV1beta1().EndpointSlices(namespace).Watch(options) + return client.DiscoveryV1beta1().EndpointSlices(namespace).Watch(context.TODO(), options) }, }, &discoveryv1beta1.EndpointSlice{}, diff --git a/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go b/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go index 0ac6fa2827d..025f6a5cf36 100644 --- a/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go +++ b/staging/src/k8s.io/client-go/informers/events/v1beta1/event.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" eventsv1beta1 "k8s.io/api/events/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res if tweakListOptions != nil { tweakListOptions(&options) } - return client.EventsV1beta1().Events(namespace).List(options) + return client.EventsV1beta1().Events(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.EventsV1beta1().Events(namespace).Watch(options) + return client.EventsV1beta1().Events(namespace).Watch(context.TODO(), options) }, }, &eventsv1beta1.Event{}, diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go index 80e84eba801..050080a5986 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().DaemonSets(namespace).List(options) + return client.ExtensionsV1beta1().DaemonSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().DaemonSets(namespace).Watch(options) + return client.ExtensionsV1beta1().DaemonSets(namespace).Watch(context.TODO(), options) }, }, &extensionsv1beta1.DaemonSet{}, diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go index cef4b8150ff..1b16c5cc919 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/deployment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().Deployments(namespace).List(options) + return client.ExtensionsV1beta1().Deployments(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().Deployments(namespace).Watch(options) + return client.ExtensionsV1beta1().Deployments(namespace).Watch(context.TODO(), options) }, }, &extensionsv1beta1.Deployment{}, diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go index 72a88f3138a..f01a8876174 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/ingress.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().Ingresses(namespace).List(options) + return client.ExtensionsV1beta1().Ingresses(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().Ingresses(namespace).Watch(options) + return client.ExtensionsV1beta1().Ingresses(namespace).Watch(context.TODO(), options) }, }, &extensionsv1beta1.Ingress{}, diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go index 92f4f040072..4a924619fb2 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().NetworkPolicies(namespace).List(options) + return client.ExtensionsV1beta1().NetworkPolicies(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(options) + return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(context.TODO(), options) }, }, &extensionsv1beta1.NetworkPolicy{}, diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go index 6f91e54582c..11be2751ccf 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredPodSecurityPolicyInformer(client kubernetes.Interface, resyncPer if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().PodSecurityPolicies().List(options) + return client.ExtensionsV1beta1().PodSecurityPolicies().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().PodSecurityPolicies().Watch(options) + return client.ExtensionsV1beta1().PodSecurityPolicies().Watch(context.TODO(), options) }, }, &extensionsv1beta1.PodSecurityPolicy{}, diff --git a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go index e8847aa2cf2..f7e224bcfbf 100644 --- a/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go +++ b/staging/src/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().ReplicaSets(namespace).List(options) + return client.ExtensionsV1beta1().ReplicaSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().ReplicaSets(namespace).Watch(options) + return client.ExtensionsV1beta1().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, &extensionsv1beta1.ReplicaSet{}, diff --git a/staging/src/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go b/staging/src/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go index af1d874c72f..9a4a904481d 100644 --- a/staging/src/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go +++ b/staging/src/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().FlowSchemas().List(options) + return client.FlowcontrolV1alpha1().FlowSchemas().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().FlowSchemas().Watch(options) + return client.FlowcontrolV1alpha1().FlowSchemas().Watch(context.TODO(), options) }, }, &flowcontrolv1alpha1.FlowSchema{}, diff --git a/staging/src/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/staging/src/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go index c145b7d4114..b81f5c9c36b 100644 --- a/staging/src/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/staging/src/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().List(options) + return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().Watch(options) + return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, &flowcontrolv1alpha1.PriorityLevelConfiguration{}, diff --git a/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go b/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go index c2255c0dfdb..a75c9ac21f2 100644 --- a/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go +++ b/staging/src/k8s.io/client-go/informers/networking/v1/networkpolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" networkingv1 "k8s.io/api/networking/v1" @@ -61,13 +62,13 @@ func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1().NetworkPolicies(namespace).List(options) + return client.NetworkingV1().NetworkPolicies(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1().NetworkPolicies(namespace).Watch(options) + return client.NetworkingV1().NetworkPolicies(namespace).Watch(context.TODO(), options) }, }, &networkingv1.NetworkPolicy{}, diff --git a/staging/src/k8s.io/client-go/informers/networking/v1beta1/ingress.go b/staging/src/k8s.io/client-go/informers/networking/v1beta1/ingress.go index 8abd00e17b0..8800d6c9cd8 100644 --- a/staging/src/k8s.io/client-go/informers/networking/v1beta1/ingress.go +++ b/staging/src/k8s.io/client-go/informers/networking/v1beta1/ingress.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" networkingv1beta1 "k8s.io/api/networking/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1beta1().Ingresses(namespace).List(options) + return client.NetworkingV1beta1().Ingresses(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1beta1().Ingresses(namespace).Watch(options) + return client.NetworkingV1beta1().Ingresses(namespace).Watch(context.TODO(), options) }, }, &networkingv1beta1.Ingress{}, diff --git a/staging/src/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go b/staging/src/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go index 31edf930ae5..d314a9573c6 100644 --- a/staging/src/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go +++ b/staging/src/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t if tweakListOptions != nil { tweakListOptions(&options) } - return client.NodeV1alpha1().RuntimeClasses().List(options) + return client.NodeV1alpha1().RuntimeClasses().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NodeV1alpha1().RuntimeClasses().Watch(options) + return client.NodeV1alpha1().RuntimeClasses().Watch(context.TODO(), options) }, }, &nodev1alpha1.RuntimeClass{}, diff --git a/staging/src/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go b/staging/src/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go index 6972993ad8f..07619b2306f 100644 --- a/staging/src/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go +++ b/staging/src/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" nodev1beta1 "k8s.io/api/node/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t if tweakListOptions != nil { tweakListOptions(&options) } - return client.NodeV1beta1().RuntimeClasses().List(options) + return client.NodeV1beta1().RuntimeClasses().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NodeV1beta1().RuntimeClasses().Watch(options) + return client.NodeV1beta1().RuntimeClasses().Watch(context.TODO(), options) }, }, &nodev1beta1.RuntimeClass{}, diff --git a/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go b/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go index dce61f7f18b..4530343ecc0 100644 --- a/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go +++ b/staging/src/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespa if tweakListOptions != nil { tweakListOptions(&options) } - return client.PolicyV1beta1().PodDisruptionBudgets(namespace).List(options) + return client.PolicyV1beta1().PodDisruptionBudgets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.PolicyV1beta1().PodDisruptionBudgets(namespace).Watch(options) + return client.PolicyV1beta1().PodDisruptionBudgets(namespace).Watch(context.TODO(), options) }, }, &policyv1beta1.PodDisruptionBudget{}, diff --git a/staging/src/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go b/staging/src/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go index 7ce5684fb0e..b87d23434ef 100644 --- a/staging/src/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go +++ b/staging/src/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredPodSecurityPolicyInformer(client kubernetes.Interface, resyncPer if tweakListOptions != nil { tweakListOptions(&options) } - return client.PolicyV1beta1().PodSecurityPolicies().List(options) + return client.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.PolicyV1beta1().PodSecurityPolicies().Watch(options) + return client.PolicyV1beta1().PodSecurityPolicies().Watch(context.TODO(), options) }, }, &policyv1beta1.PodSecurityPolicy{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go index b8096e6bca7..0572be264bb 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrole.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" rbacv1 "k8s.io/api/rbac/v1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().ClusterRoles().List(options) + return client.RbacV1().ClusterRoles().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().ClusterRoles().Watch(options) + return client.RbacV1().ClusterRoles().Watch(context.TODO(), options) }, }, &rbacv1.ClusterRole{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go index 5ef3407c4ca..51026c05580 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" rbacv1 "k8s.io/api/rbac/v1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().ClusterRoleBindings().List(options) + return client.RbacV1().ClusterRoleBindings().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().ClusterRoleBindings().Watch(options) + return client.RbacV1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, &rbacv1.ClusterRoleBinding{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/role.go b/staging/src/k8s.io/client-go/informers/rbac/v1/role.go index 2d98874e5d1..986a5f29f4a 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/role.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/role.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" rbacv1 "k8s.io/api/rbac/v1" @@ -61,13 +62,13 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().Roles(namespace).List(options) + return client.RbacV1().Roles(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().Roles(namespace).Watch(options) + return client.RbacV1().Roles(namespace).Watch(context.TODO(), options) }, }, &rbacv1.Role{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go index a97107de1a7..0264049fb0e 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1/rolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" rbacv1 "k8s.io/api/rbac/v1" @@ -61,13 +62,13 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().RoleBindings(namespace).List(options) + return client.RbacV1().RoleBindings(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1().RoleBindings(namespace).Watch(options) + return client.RbacV1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, &rbacv1.RoleBinding{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go index 58c9c412594..70d9885f0a0 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().ClusterRoles().List(options) + return client.RbacV1alpha1().ClusterRoles().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().ClusterRoles().Watch(options) + return client.RbacV1alpha1().ClusterRoles().Watch(context.TODO(), options) }, }, &rbacv1alpha1.ClusterRole{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go index 759c716bf84..8c18f679284 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().ClusterRoleBindings().List(options) + return client.RbacV1alpha1().ClusterRoleBindings().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().ClusterRoleBindings().Watch(options) + return client.RbacV1alpha1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, &rbacv1alpha1.ClusterRoleBinding{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go index 1d1f99f0648..7dc4551d927 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/role.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -61,13 +62,13 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().Roles(namespace).List(options) + return client.RbacV1alpha1().Roles(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().Roles(namespace).Watch(options) + return client.RbacV1alpha1().Roles(namespace).Watch(context.TODO(), options) }, }, &rbacv1alpha1.Role{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go index 9fcb01d3abb..d49ec8b362b 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -61,13 +62,13 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().RoleBindings(namespace).List(options) + return client.RbacV1alpha1().RoleBindings(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1alpha1().RoleBindings(namespace).Watch(options) + return client.RbacV1alpha1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, &rbacv1alpha1.RoleBinding{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go index b82c1c740ad..e50e1d39354 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" rbacv1beta1 "k8s.io/api/rbac/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().ClusterRoles().List(options) + return client.RbacV1beta1().ClusterRoles().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().ClusterRoles().Watch(options) + return client.RbacV1beta1().ClusterRoles().Watch(context.TODO(), options) }, }, &rbacv1beta1.ClusterRole{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go index d662e7f563e..a7ea4cd38d5 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" rbacv1beta1 "k8s.io/api/rbac/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().ClusterRoleBindings().List(options) + return client.RbacV1beta1().ClusterRoleBindings().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().ClusterRoleBindings().Watch(options) + return client.RbacV1beta1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, &rbacv1beta1.ClusterRoleBinding{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go index b885beb2729..e56961e81e7 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/role.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" rbacv1beta1 "k8s.io/api/rbac/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().Roles(namespace).List(options) + return client.RbacV1beta1().Roles(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().Roles(namespace).Watch(options) + return client.RbacV1beta1().Roles(namespace).Watch(context.TODO(), options) }, }, &rbacv1beta1.Role{}, diff --git a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go index 63d9d7264ec..d893882db35 100644 --- a/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go +++ b/staging/src/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" rbacv1beta1 "k8s.io/api/rbac/v1beta1" @@ -61,13 +62,13 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().RoleBindings(namespace).List(options) + return client.RbacV1beta1().RoleBindings(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.RbacV1beta1().RoleBindings(namespace).Watch(options) + return client.RbacV1beta1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, &rbacv1beta1.RoleBinding{}, diff --git a/staging/src/k8s.io/client-go/informers/scheduling/v1/priorityclass.go b/staging/src/k8s.io/client-go/informers/scheduling/v1/priorityclass.go index a9ee6289e47..730616b4a52 100644 --- a/staging/src/k8s.io/client-go/informers/scheduling/v1/priorityclass.go +++ b/staging/src/k8s.io/client-go/informers/scheduling/v1/priorityclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" schedulingv1 "k8s.io/api/scheduling/v1" @@ -60,13 +61,13 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1().PriorityClasses().List(options) + return client.SchedulingV1().PriorityClasses().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1().PriorityClasses().Watch(options) + return client.SchedulingV1().PriorityClasses().Watch(context.TODO(), options) }, }, &schedulingv1.PriorityClass{}, diff --git a/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go b/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go index cd90dd7654b..f82b6643690 100644 --- a/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go +++ b/staging/src/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1alpha1().PriorityClasses().List(options) + return client.SchedulingV1alpha1().PriorityClasses().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1alpha1().PriorityClasses().Watch(options) + return client.SchedulingV1alpha1().PriorityClasses().Watch(context.TODO(), options) }, }, &schedulingv1alpha1.PriorityClass{}, diff --git a/staging/src/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go b/staging/src/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go index 3c7d90938f1..fc7848891ee 100644 --- a/staging/src/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go +++ b/staging/src/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1beta1().PriorityClasses().List(options) + return client.SchedulingV1beta1().PriorityClasses().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SchedulingV1beta1().PriorityClasses().Watch(options) + return client.SchedulingV1beta1().PriorityClasses().Watch(context.TODO(), options) }, }, &schedulingv1beta1.PriorityClass{}, diff --git a/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go b/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go index 33fcf2359e7..8c10b16c85b 100644 --- a/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go +++ b/staging/src/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" @@ -61,13 +62,13 @@ func NewFilteredPodPresetInformer(client kubernetes.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.SettingsV1alpha1().PodPresets(namespace).List(options) + return client.SettingsV1alpha1().PodPresets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SettingsV1alpha1().PodPresets(namespace).Watch(options) + return client.SettingsV1alpha1().PodPresets(namespace).Watch(context.TODO(), options) }, }, &settingsv1alpha1.PodPreset{}, diff --git a/staging/src/k8s.io/client-go/informers/storage/v1/csinode.go b/staging/src/k8s.io/client-go/informers/storage/v1/csinode.go index eed947c4a6f..96416967fb8 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1/csinode.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1/csinode.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" storagev1 "k8s.io/api/storage/v1" @@ -60,13 +61,13 @@ func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.D if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().CSINodes().List(options) + return client.StorageV1().CSINodes().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().CSINodes().Watch(options) + return client.StorageV1().CSINodes().Watch(context.TODO(), options) }, }, &storagev1.CSINode{}, diff --git a/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go b/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go index b4609b4d2f8..8cde79d9a3b 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1/storageclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" storagev1 "k8s.io/api/storage/v1" @@ -60,13 +61,13 @@ func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod t if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().StorageClasses().List(options) + return client.StorageV1().StorageClasses().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().StorageClasses().Watch(options) + return client.StorageV1().StorageClasses().Watch(context.TODO(), options) }, }, &storagev1.StorageClass{}, diff --git a/staging/src/k8s.io/client-go/informers/storage/v1/volumeattachment.go b/staging/src/k8s.io/client-go/informers/storage/v1/volumeattachment.go index 7ca3b86f22c..be605ff48c6 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1/volumeattachment.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1/volumeattachment.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" storagev1 "k8s.io/api/storage/v1" @@ -60,13 +61,13 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().VolumeAttachments().List(options) + return client.StorageV1().VolumeAttachments().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1().VolumeAttachments().Watch(options) + return client.StorageV1().VolumeAttachments().Watch(context.TODO(), options) }, }, &storagev1.VolumeAttachment{}, diff --git a/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go b/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go index e169c8a29cb..445496dade4 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" storagev1alpha1 "k8s.io/api/storage/v1alpha1" @@ -60,13 +61,13 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1alpha1().VolumeAttachments().List(options) + return client.StorageV1alpha1().VolumeAttachments().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1alpha1().VolumeAttachments().Watch(options) + return client.StorageV1alpha1().VolumeAttachments().Watch(context.TODO(), options) }, }, &storagev1alpha1.VolumeAttachment{}, diff --git a/staging/src/k8s.io/client-go/informers/storage/v1beta1/csidriver.go b/staging/src/k8s.io/client-go/informers/storage/v1beta1/csidriver.go index 7f7cb216df0..f138a915b88 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1beta1/csidriver.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1beta1/csidriver.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().CSIDrivers().List(options) + return client.StorageV1beta1().CSIDrivers().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().CSIDrivers().Watch(options) + return client.StorageV1beta1().CSIDrivers().Watch(context.TODO(), options) }, }, &storagev1beta1.CSIDriver{}, diff --git a/staging/src/k8s.io/client-go/informers/storage/v1beta1/csinode.go b/staging/src/k8s.io/client-go/informers/storage/v1beta1/csinode.go index 218bb118314..6ba63172a34 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1beta1/csinode.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1beta1/csinode.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.D if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().CSINodes().List(options) + return client.StorageV1beta1().CSINodes().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().CSINodes().Watch(options) + return client.StorageV1beta1().CSINodes().Watch(context.TODO(), options) }, }, &storagev1beta1.CSINode{}, diff --git a/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go b/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go index ed898a77b84..a6582bf3d61 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1beta1/storageclass.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod t if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().StorageClasses().List(options) + return client.StorageV1beta1().StorageClasses().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().StorageClasses().Watch(options) + return client.StorageV1beta1().StorageClasses().Watch(context.TODO(), options) }, }, &storagev1beta1.StorageClass{}, diff --git a/staging/src/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go b/staging/src/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go index c75fc06b15e..e8942463490 100644 --- a/staging/src/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go +++ b/staging/src/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -60,13 +61,13 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().VolumeAttachments().List(options) + return client.StorageV1beta1().VolumeAttachments().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.StorageV1beta1().VolumeAttachments().Watch(options) + return client.StorageV1beta1().VolumeAttachments().Watch(context.TODO(), options) }, }, &storagev1beta1.VolumeAttachment{}, diff --git a/staging/src/k8s.io/client-go/kubernetes_test/timeout_test.go b/staging/src/k8s.io/client-go/kubernetes_test/timeout_test.go index 055fb4909df..eb3c5f3efe0 100644 --- a/staging/src/k8s.io/client-go/kubernetes_test/timeout_test.go +++ b/staging/src/k8s.io/client-go/kubernetes_test/timeout_test.go @@ -18,6 +18,7 @@ package kubernetes_test import ( "bytes" + "context" "io/ioutil" "net/http" "testing" @@ -55,6 +56,6 @@ func TestListTimeout(t *testing.T) { realClient := kubernetes.New(restClient) timeout := int64(21) - realClient.AppsV1().DaemonSets("").List(metav1.ListOptions{TimeoutSeconds: &timeout}) - realClient.AppsV1().DaemonSets("").Watch(metav1.ListOptions{TimeoutSeconds: &timeout}) + realClient.AppsV1().DaemonSets("").List(context.TODO(), metav1.ListOptions{TimeoutSeconds: &timeout}) + realClient.AppsV1().DaemonSets("").Watch(context.TODO(), metav1.ListOptions{TimeoutSeconds: &timeout}) } diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index fd152b072cb..4a9c030fb0b 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -17,6 +17,7 @@ limitations under the License. package resourcelock import ( + "context" "encoding/json" "errors" "fmt" @@ -44,7 +45,7 @@ type ConfigMapLock struct { func (cml *ConfigMapLock) Get() (*LeaderElectionRecord, []byte, error) { var record LeaderElectionRecord var err error - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(cml.ConfigMapMeta.Name, metav1.GetOptions{}) + cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(context.TODO(), cml.ConfigMapMeta.Name, metav1.GetOptions{}) if err != nil { return nil, nil, err } @@ -66,7 +67,7 @@ func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error { if err != nil { return err } - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(&v1.ConfigMap{ + cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: cml.ConfigMapMeta.Name, Namespace: cml.ConfigMapMeta.Namespace, @@ -88,7 +89,7 @@ func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error { return err } cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(cml.cm) + cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(context.TODO(), cml.cm) return err } diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go index f5a8ffcc867..5127a925e76 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go @@ -17,6 +17,7 @@ limitations under the License. package resourcelock import ( + "context" "encoding/json" "errors" "fmt" @@ -39,7 +40,7 @@ type EndpointsLock struct { func (el *EndpointsLock) Get() (*LeaderElectionRecord, []byte, error) { var record LeaderElectionRecord var err error - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, metav1.GetOptions{}) + el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(context.TODO(), el.EndpointsMeta.Name, metav1.GetOptions{}) if err != nil { return nil, nil, err } @@ -61,7 +62,7 @@ func (el *EndpointsLock) Create(ler LeaderElectionRecord) error { if err != nil { return err } - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(&v1.Endpoints{ + el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(context.TODO(), &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: el.EndpointsMeta.Name, Namespace: el.EndpointsMeta.Namespace, @@ -86,7 +87,7 @@ func (el *EndpointsLock) Update(ler LeaderElectionRecord) error { el.e.Annotations = make(map[string]string) } el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(el.e) + el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(context.TODO(), el.e) return err } diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 74016b8df15..5d46ae38f4c 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -17,6 +17,7 @@ limitations under the License. package resourcelock import ( + "context" "encoding/json" "errors" "fmt" @@ -39,7 +40,7 @@ type LeaseLock struct { // Get returns the election record from a Lease spec func (ll *LeaseLock) Get() (*LeaderElectionRecord, []byte, error) { var err error - ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ll.LeaseMeta.Name, metav1.GetOptions{}) + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(context.TODO(), ll.LeaseMeta.Name, metav1.GetOptions{}) if err != nil { return nil, nil, err } @@ -54,7 +55,7 @@ func (ll *LeaseLock) Get() (*LeaderElectionRecord, []byte, error) { // Create attempts to create a Lease func (ll *LeaseLock) Create(ler LeaderElectionRecord) error { var err error - ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Create(&coordinationv1.Lease{ + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Create(context.TODO(), &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: ll.LeaseMeta.Name, Namespace: ll.LeaseMeta.Namespace, @@ -71,7 +72,7 @@ func (ll *LeaseLock) Update(ler LeaderElectionRecord) error { } ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler) var err error - ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ll.lease) + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(context.TODO(), ll.lease) return err } diff --git a/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go b/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go index d6e1e8e223f..b56980b76d9 100644 --- a/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go +++ b/staging/src/k8s.io/client-go/tools/watch/informerwatcher_test.go @@ -226,10 +226,10 @@ func TestNewInformerWatcher(t *testing.T) { lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return fake.CoreV1().Secrets("").List(options) + return fake.CoreV1().Secrets("").List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return fake.CoreV1().Secrets("").Watch(options) + return fake.CoreV1().Secrets("").Watch(context.TODO(), options) }, } _, _, w, done := NewIndexerInformerWatcher(lw, &corev1.Secret{}) diff --git a/staging/src/k8s.io/client-go/tools/watch/until_test.go b/staging/src/k8s.io/client-go/tools/watch/until_test.go index 06231f26c24..e8ab6cab4f8 100644 --- a/staging/src/k8s.io/client-go/tools/watch/until_test.go +++ b/staging/src/k8s.io/client-go/tools/watch/until_test.go @@ -237,10 +237,10 @@ func TestUntilWithSync(t *testing.T) { return &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return fakeclient.CoreV1().Secrets("").List(options) + return fakeclient.CoreV1().Secrets("").List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return fakeclient.CoreV1().Secrets("").Watch(options) + return fakeclient.CoreV1().Secrets("").Watch(context.TODO(), options) }, } }(), @@ -267,10 +267,10 @@ func TestUntilWithSync(t *testing.T) { return &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return fakeclient.CoreV1().Secrets("").List(options) + return fakeclient.CoreV1().Secrets("").List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return fakeclient.CoreV1().Secrets("").Watch(options) + return fakeclient.CoreV1().Secrets("").Watch(context.TODO(), options) }, } }(), diff --git a/staging/src/k8s.io/client-go/util/certificate/csr/csr.go b/staging/src/k8s.io/client-go/util/certificate/csr/csr.go index e36f2d3e133..63862a1361a 100644 --- a/staging/src/k8s.io/client-go/util/certificate/csr/csr.go +++ b/staging/src/k8s.io/client-go/util/certificate/csr/csr.go @@ -62,12 +62,12 @@ func RequestCertificate(client certificatesclient.CertificateSigningRequestInter csr.GenerateName = "csr-" } - req, err = client.Create(csr) + req, err = client.Create(context.TODO(), csr) switch { case err == nil: case errors.IsAlreadyExists(err) && len(name) > 0: klog.Infof("csr for this node already exists, reusing") - req, err = client.Get(name, metav1.GetOptions{}) + req, err = client.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, formatError("cannot retrieve certificate signing request: %v", err) } @@ -87,11 +87,11 @@ func WaitForCertificate(ctx context.Context, client certificatesclient.Certifica lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector - return client.List(options) + return client.List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector - return client.Watch(options) + return client.Watch(context.TODO(), options) }, } event, err := watchtools.UntilWithSync( diff --git a/staging/src/k8s.io/cloud-provider/node/helpers/labels.go b/staging/src/k8s.io/cloud-provider/node/helpers/labels.go index 80e77bb145c..eea04350e32 100644 --- a/staging/src/k8s.io/cloud-provider/node/helpers/labels.go +++ b/staging/src/k8s.io/cloud-provider/node/helpers/labels.go @@ -17,6 +17,7 @@ limitations under the License. package helpers import ( + "context" "encoding/json" "fmt" "time" @@ -64,10 +65,10 @@ func addOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -94,7 +95,7 @@ func addOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la if err != nil { return fmt.Errorf("failed to create a two-way merge patch: %v", err) } - if _, err := kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil { + if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes); err != nil { return fmt.Errorf("failed to patch the node: %v", err) } return nil diff --git a/staging/src/k8s.io/cloud-provider/node/helpers/taints.go b/staging/src/k8s.io/cloud-provider/node/helpers/taints.go index 2e3e31cce5e..6fa0a82f602 100644 --- a/staging/src/k8s.io/cloud-provider/node/helpers/taints.go +++ b/staging/src/k8s.io/cloud-provider/node/helpers/taints.go @@ -25,6 +25,7 @@ is moved to an external repository, this file should be removed and replaced wit package helpers import ( + "context" "encoding/json" "fmt" "time" @@ -58,10 +59,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -106,7 +107,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } - _, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes) return err } diff --git a/staging/src/k8s.io/cloud-provider/service/helpers/helper.go b/staging/src/k8s.io/cloud-provider/service/helpers/helper.go index f0eb5015f8d..5ec2a49fa5d 100644 --- a/staging/src/k8s.io/cloud-provider/service/helpers/helper.go +++ b/staging/src/k8s.io/cloud-provider/service/helpers/helper.go @@ -17,6 +17,7 @@ limitations under the License. package helpers import ( + "context" "encoding/json" "fmt" "strings" @@ -136,7 +137,7 @@ func PatchService(c corev1.CoreV1Interface, oldSvc, newSvc *v1.Service) (*v1.Ser return nil, err } - return c.Services(oldSvc.Namespace).Patch(oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status") + return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status") } diff --git a/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go b/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go index e96b2fa8759..927013223b1 100644 --- a/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go +++ b/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go @@ -17,6 +17,7 @@ limitations under the License. package helpers import ( + "context" "reflect" "strings" "testing" @@ -287,7 +288,7 @@ func TestPatchService(t *testing.T) { // Issue a separate update and verify patch doesn't fail after this. svcToUpdate := svcOrigin.DeepCopy() addAnnotations(svcToUpdate) - if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(svcToUpdate); err != nil { + if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(context.TODO(), svcToUpdate); err != nil { t.Fatalf("Failed to update service: %v", err) } diff --git a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/informers/externalversions/example/v1/clustertesttype.go b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/informers/externalversions/example/v1/clustertesttype.go index bff72e69cbf..ddd960feb91 100644 --- a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/informers/externalversions/example/v1/clustertesttype.go +++ b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/informers/externalversions/example/v1/clustertesttype.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,13 +61,13 @@ func NewFilteredClusterTestTypeInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleGroupV1().ClusterTestTypes().List(options) + return client.ExampleGroupV1().ClusterTestTypes().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleGroupV1().ClusterTestTypes().Watch(options) + return client.ExampleGroupV1().ClusterTestTypes().Watch(context.TODO(), options) }, }, &examplev1.ClusterTestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/informers/externalversions/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/informers/externalversions/example/v1/testtype.go index 88002571e20..ab9ca41c1cf 100644 --- a/staging/src/k8s.io/code-generator/_examples/HyphenGroup/informers/externalversions/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/HyphenGroup/informers/externalversions/example/v1/testtype.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredTestTypeInformer(client versioned.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleGroupV1().TestTypes(namespace).List(options) + return client.ExampleGroupV1().TestTypes(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleGroupV1().TestTypes(namespace).Watch(options) + return client.ExampleGroupV1().TestTypes(namespace).Watch(context.TODO(), options) }, }, &examplev1.TestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/clustertesttype.go b/staging/src/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/clustertesttype.go index e0607c11a05..c02987a5950 100644 --- a/staging/src/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/clustertesttype.go +++ b/staging/src/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/clustertesttype.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,13 +61,13 @@ func NewFilteredClusterTestTypeInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleV1().ClusterTestTypes().List(options) + return client.ExampleV1().ClusterTestTypes().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleV1().ClusterTestTypes().Watch(options) + return client.ExampleV1().ClusterTestTypes().Watch(context.TODO(), options) }, }, &examplev1.ClusterTestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/testtype.go index 18f3b88d0d3..c04c4658a9a 100644 --- a/staging/src/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/testtype.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredTestTypeInformer(client versioned.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleV1().TestTypes(namespace).List(options) + return client.ExampleV1().TestTypes(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleV1().TestTypes(namespace).Watch(options) + return client.ExampleV1().TestTypes(namespace).Watch(context.TODO(), options) }, }, &examplev1.TestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go index 025113612cb..c70fac464e1 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example/v1/testtype.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredTestTypeInformer(client versioned.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleV1().TestTypes(namespace).List(options) + return client.ExampleV1().TestTypes(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleV1().TestTypes(namespace).Watch(options) + return client.ExampleV1().TestTypes(namespace).Watch(context.TODO(), options) }, }, &examplev1.TestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go index 979ee4d5890..5a9ddc98d2e 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2/v1/testtype.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredTestTypeInformer(client versioned.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.SecondExampleV1().TestTypes(namespace).List(options) + return client.SecondExampleV1().TestTypes(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SecondExampleV1().TestTypes(namespace).Watch(options) + return client.SecondExampleV1().TestTypes(namespace).Watch(context.TODO(), options) }, }, &example2v1.TestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/v1/testtype.go index dddf77204cf..6094863a59a 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/v1/testtype.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredTestTypeInformer(client versioned.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.ThirdExampleV1().TestTypes(namespace).List(options) + return client.ThirdExampleV1().TestTypes(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ThirdExampleV1().TestTypes(namespace).Watch(options) + return client.ThirdExampleV1().TestTypes(namespace).Watch(context.TODO(), options) }, }, &example3iov1.TestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go index 22d7610567a..12268096511 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example/internalversion/testtype.go @@ -19,6 +19,7 @@ limitations under the License. package internalversion import ( + "context" time "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredTestTypeInformer(client clientsetinternalversion.Interface, name if tweakListOptions != nil { tweakListOptions(&options) } - return client.Example().TestTypes(namespace).List(options) + return client.Example().TestTypes(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.Example().TestTypes(namespace).Watch(options) + return client.Example().TestTypes(namespace).Watch(context.TODO(), options) }, }, &example.TestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go index 0b68d093678..63573988d28 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2/internalversion/testtype.go @@ -19,6 +19,7 @@ limitations under the License. package internalversion import ( + "context" time "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,13 +61,13 @@ func NewFilteredTestTypeInformer(client clientsetinternalversion.Interface, resy if tweakListOptions != nil { tweakListOptions(&options) } - return client.SecondExample().TestTypes().List(options) + return client.SecondExample().TestTypes().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SecondExample().TestTypes().Watch(options) + return client.SecondExample().TestTypes().Watch(context.TODO(), options) }, }, &example2.TestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/internalversion/testtype.go b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/internalversion/testtype.go index b99a9be2e7f..1d30f4fe060 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/internalversion/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/internalversion/testtype.go @@ -19,6 +19,7 @@ limitations under the License. package internalversion import ( + "context" time "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredTestTypeInformer(client clientsetinternalversion.Interface, name if tweakListOptions != nil { tweakListOptions(&options) } - return client.ThirdExample().TestTypes(namespace).List(options) + return client.ThirdExample().TestTypes(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ThirdExample().TestTypes(namespace).Watch(options) + return client.ThirdExample().TestTypes(namespace).Watch(context.TODO(), options) }, }, &example3io.TestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/clustertesttype.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/clustertesttype.go index 13c38e44fa3..ae0a3eab3e1 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/clustertesttype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/clustertesttype.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,13 +61,13 @@ func NewFilteredClusterTestTypeInformer(client versioned.Interface, resyncPeriod if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleV1().ClusterTestTypes().List(options) + return client.ExampleV1().ClusterTestTypes().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleV1().ClusterTestTypes().Watch(options) + return client.ExampleV1().ClusterTestTypes().Watch(context.TODO(), options) }, }, &examplev1.ClusterTestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go index 47ca5b28726..90713a16356 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example/v1/testtype.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredTestTypeInformer(client versioned.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleV1().TestTypes(namespace).List(options) + return client.ExampleV1().TestTypes(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExampleV1().TestTypes(namespace).Watch(options) + return client.ExampleV1().TestTypes(namespace).Watch(context.TODO(), options) }, }, &examplev1.TestType{}, diff --git a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go index eb99c32d043..e078a6d3fd6 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/informers/externalversions/example2/v1/testtype.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredTestTypeInformer(client versioned.Interface, namespace string, r if tweakListOptions != nil { tweakListOptions(&options) } - return client.SecondExampleV1().TestTypes(namespace).List(options) + return client.SecondExampleV1().TestTypes(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SecondExampleV1().TestTypes(namespace).Watch(options) + return client.SecondExampleV1().TestTypes(namespace).Watch(context.TODO(), options) }, }, &example2v1.TestType{}, diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1/apiservice.go index 67a5d63209c..1c6a5cf20bd 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1/apiservice.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + "context" time "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,13 +61,13 @@ func NewFilteredAPIServiceInformer(client clientset.Interface, resyncPeriod time if tweakListOptions != nil { tweakListOptions(&options) } - return client.ApiregistrationV1().APIServices().List(options) + return client.ApiregistrationV1().APIServices().List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ApiregistrationV1().APIServices().Watch(options) + return client.ApiregistrationV1().APIServices().Watch(context.TODO(), options) }, }, &apiregistrationv1.APIService{}, diff --git a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go index b96c469a558..aab505a2596 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1beta1/apiservice.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,13 +61,13 @@ func NewFilteredAPIServiceInformer(client clientset.Interface, resyncPeriod time if tweakListOptions != nil { tweakListOptions(&options) } - return client.ApiregistrationV1beta1().APIServices().List(options) + return client.ApiregistrationV1beta1().APIServices().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ApiregistrationV1beta1().APIServices().Watch(options) + return client.ApiregistrationV1beta1().APIServices().Watch(context.TODO(), options) }, }, &apiregistrationv1beta1.APIService{}, diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go index 1904714e616..2b5c58f8bbc 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go @@ -17,6 +17,7 @@ limitations under the License. package autoregister import ( + "context" "fmt" "reflect" "sync" @@ -240,7 +241,7 @@ func (c *autoRegisterController) checkAPIService(name string) (err error) { // we don't have an entry and we do want one (2B,2C) case apierrors.IsNotFound(err) && desired != nil: - _, err := c.apiServiceClient.APIServices().Create(desired) + _, err := c.apiServiceClient.APIServices().Create(context.TODO(), desired) if apierrors.IsAlreadyExists(err) { // created in the meantime, we'll get called again return nil @@ -262,7 +263,7 @@ func (c *autoRegisterController) checkAPIService(name string) (err error) { // we have a spurious APIService that we're managing, delete it (5A,6A) case desired == nil: opts := &metav1.DeleteOptions{Preconditions: metav1.NewUIDPreconditions(string(curr.UID))} - err := c.apiServiceClient.APIServices().Delete(curr.Name, opts) + err := c.apiServiceClient.APIServices().Delete(context.TODO(), curr.Name, opts) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil @@ -277,7 +278,7 @@ func (c *autoRegisterController) checkAPIService(name string) (err error) { // we have an entry and we have a desired, now we deconflict. Only a few fields matter. (5B,5C,6B,6C) apiService := curr.DeepCopy() apiService.Spec = desired.Spec - _, err = c.apiServiceClient.APIServices().Update(apiService) + _, err = c.apiServiceClient.APIServices().Update(context.TODO(), apiService) if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { // deleted or changed in the meantime, we'll get called again return nil diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go index f1f91e25fe0..73dfa12dadc 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go @@ -17,6 +17,7 @@ limitations under the License. package apiserver import ( + "context" "fmt" "net/http" "net/url" @@ -369,7 +370,7 @@ func updateAPIServiceStatus(client apiregistrationclient.APIServicesGetter, orig return newAPIService, nil } - newAPIService, err := client.APIServices().UpdateStatus(newAPIService) + newAPIService, err := client.APIServices().UpdateStatus(context.TODO(), newAPIService) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go b/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go index 604d63af647..ef22223b53c 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go @@ -17,6 +17,7 @@ limitations under the License. package autoscale import ( + "context" "fmt" "github.com/spf13/cobra" @@ -262,7 +263,7 @@ func (o *AutoscaleOptions) Run() error { return err } - actualHPA, err := o.HPAClient.HorizontalPodAutoscalers(o.namespace).Create(hpa) + actualHPA, err := o.HPAClient.HorizontalPodAutoscalers(o.namespace).Create(context.TODO(), hpa) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go b/staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go index cfe5e81582f..ff37674a461 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go @@ -166,7 +166,7 @@ func (o *ClusterInfoDumpOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) } func (o *ClusterInfoDumpOptions) Run() error { - nodes, err := o.CoreClient.Nodes().List(metav1.ListOptions{}) + nodes, err := o.CoreClient.Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -177,7 +177,7 @@ func (o *ClusterInfoDumpOptions) Run() error { var namespaces []string if o.AllNamespaces { - namespaceList, err := o.CoreClient.Namespaces().List(metav1.ListOptions{}) + namespaceList, err := o.CoreClient.Namespaces().List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -195,7 +195,7 @@ func (o *ClusterInfoDumpOptions) Run() error { for _, namespace := range namespaces { // TODO: this is repetitive in the extreme. Use reflection or // something to make this a for loop. - events, err := o.CoreClient.Events(namespace).List(metav1.ListOptions{}) + events, err := o.CoreClient.Events(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -203,7 +203,7 @@ func (o *ClusterInfoDumpOptions) Run() error { return err } - rcs, err := o.CoreClient.ReplicationControllers(namespace).List(metav1.ListOptions{}) + rcs, err := o.CoreClient.ReplicationControllers(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -211,7 +211,7 @@ func (o *ClusterInfoDumpOptions) Run() error { return err } - svcs, err := o.CoreClient.Services(namespace).List(metav1.ListOptions{}) + svcs, err := o.CoreClient.Services(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -219,7 +219,7 @@ func (o *ClusterInfoDumpOptions) Run() error { return err } - sets, err := o.AppsClient.DaemonSets(namespace).List(metav1.ListOptions{}) + sets, err := o.AppsClient.DaemonSets(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -227,7 +227,7 @@ func (o *ClusterInfoDumpOptions) Run() error { return err } - deps, err := o.AppsClient.Deployments(namespace).List(metav1.ListOptions{}) + deps, err := o.AppsClient.Deployments(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -235,7 +235,7 @@ func (o *ClusterInfoDumpOptions) Run() error { return err } - rps, err := o.AppsClient.ReplicaSets(namespace).List(metav1.ListOptions{}) + rps, err := o.AppsClient.ReplicaSets(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -243,7 +243,7 @@ func (o *ClusterInfoDumpOptions) Run() error { return err } - pods, err := o.CoreClient.Pods(namespace).List(metav1.ListOptions{}) + pods, err := o.CoreClient.Pods(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go index 54369988f58..043012c3dee 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go @@ -17,6 +17,7 @@ limitations under the License. package create import ( + "context" "fmt" "strings" @@ -200,7 +201,7 @@ func (c *CreateClusterRoleOptions) RunCreateRole() error { // Create ClusterRole. if !c.DryRun { - clusterRole, err = c.Client.ClusterRoles().Create(clusterRole) + clusterRole, err = c.Client.ClusterRoles().Create(context.TODO(), clusterRole) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go index 1727b596ece..adbdac48342 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go @@ -17,6 +17,7 @@ limitations under the License. package create import ( + "context" "fmt" "github.com/spf13/cobra" @@ -164,7 +165,7 @@ func (o *CreateCronJobOptions) Run() error { if !o.DryRun { var err error - cronjob, err = o.Client.CronJobs(o.Namespace).Create(cronjob) + cronjob, err = o.Client.CronJobs(o.Namespace).Create(context.TODO(), cronjob) if err != nil { return fmt.Errorf("failed to create cronjob: %v", err) } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go index 2213670421c..003cff4744b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go @@ -17,6 +17,7 @@ limitations under the License. package create import ( + "context" "fmt" "github.com/spf13/cobra" @@ -192,7 +193,7 @@ func (o *CreateJobOptions) Run() error { } if !o.DryRun { var err error - job, err = o.Client.Jobs(o.Namespace).Create(job) + job, err = o.Client.Jobs(o.Namespace).Create(context.TODO(), job) if err != nil { return fmt.Errorf("failed to create job: %v", err) } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go index 0a23e417044..8e540b5487d 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go @@ -17,6 +17,7 @@ limitations under the License. package create import ( + "context" "fmt" "strings" @@ -341,7 +342,7 @@ func (o *CreateRoleOptions) RunCreateRole() error { // Create role. if !o.DryRun { - role, err = o.Client.Roles(o.Namespace).Create(role) + role, err = o.Client.Roles(o.Namespace).Create(context.TODO(), role) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go b/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go index b70e3ea64f2..d4513a8e266 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go @@ -17,6 +17,7 @@ limitations under the License. package exec import ( + "context" "fmt" "io" "net/url" @@ -285,7 +286,7 @@ func (p *ExecOptions) Run() error { // since there are any other command run this function by providing Podname with PodsGetter // and without resource builder, eg: `kubectl cp`. if len(p.PodName) != 0 { - p.Pod, err = p.PodClient.Pods(p.Namespace).Get(p.PodName, metav1.GetOptions{}) + p.Pod, err = p.PodClient.Pods(p.Namespace).Get(context.TODO(), p.PodName, metav1.GetOptions{}) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go b/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go index 4a3aec82819..3d656a8b513 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go @@ -17,6 +17,7 @@ limitations under the License. package portforward import ( + "context" "fmt" "net/http" "net/url" @@ -311,7 +312,7 @@ func (o PortForwardOptions) Validate() error { // RunPortForward implements all the necessary functionality for port-forward cmd. func (o PortForwardOptions) RunPortForward() error { - pod, err := o.PodClient.Pods(o.Namespace).Get(o.PodName, metav1.GetOptions{}) + pod, err := o.PodClient.Pods(o.Namespace).Get(context.TODO(), o.PodName, metav1.GetOptions{}) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater.go index ebb077be5cc..15f1af0cb69 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater.go @@ -17,6 +17,7 @@ limitations under the License. package rollingupdate import ( + "context" "fmt" "io" "strconv" @@ -205,7 +206,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { // annotation if it doesn't yet exist. _, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation] if !hasOriginalAnnotation { - existing, err := r.rcClient.ReplicationControllers(oldRc.Namespace).Get(oldRc.Name, metav1.GetOptions{}) + existing, err := r.rcClient.ReplicationControllers(oldRc.Namespace).Get(context.TODO(), oldRc.Name, metav1.GetOptions{}) if err != nil { return err } @@ -415,7 +416,7 @@ func (r *RollingUpdater) scaleAndWaitWithScaler(rc *corev1.ReplicationController if err := scaler.Scale(rc.Namespace, rc.Name, uint(valOrZero(rc.Spec.Replicas)), &scale.ScalePrecondition{Size: -1, ResourceVersion: ""}, retry, wait, corev1.SchemeGroupVersion.WithResource("replicationcontrollers")); err != nil { return nil, err } - return r.rcClient.ReplicationControllers(rc.Namespace).Get(rc.Name, metav1.GetOptions{}) + return r.rcClient.ReplicationControllers(rc.Namespace).Get(context.TODO(), rc.Name, metav1.GetOptions{}) } // readyPods returns the old and new ready counts for their pods. @@ -433,7 +434,7 @@ func (r *RollingUpdater) readyPods(oldRc, newRc *corev1.ReplicationController, m controller := controllers[i] selector := labels.Set(controller.Spec.Selector).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := r.podClient.Pods(controller.Namespace).List(options) + pods, err := r.podClient.Pods(controller.Namespace).List(context.TODO(), options) if err != nil { return 0, 0, err } @@ -482,7 +483,7 @@ func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", valOrZero(controller.Spec.Replicas)) controller.Annotations[sourceIDAnnotation] = sourceID controller.Spec.Replicas = utilpointer.Int32Ptr(0) - newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(controller) + newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(context.TODO(), controller) return newRc, false, err } // Validate and use the existing controller. @@ -502,7 +503,7 @@ func (r *RollingUpdater) existingController(controller *corev1.ReplicationContro return nil, errors.NewNotFound(corev1.Resource("replicationcontrollers"), controller.Name) } // controller name is required to get rc back - return r.rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{}) + return r.rcClient.ReplicationControllers(controller.Namespace).Get(context.TODO(), controller.Name, metav1.GetOptions{}) } // cleanupWithClients performs cleanup tasks after the rolling update. Update @@ -511,7 +512,7 @@ func (r *RollingUpdater) existingController(controller *corev1.ReplicationContro func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error { // Clean up annotations var err error - newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name, metav1.GetOptions{}) + newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(context.TODO(), newRc.Name, metav1.GetOptions{}) if err != nil { return err } @@ -526,7 +527,7 @@ func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *corev1.ReplicationCont if err = wait.Poll(config.Interval, config.Timeout, controllerHasDesiredReplicas(r.rcClient, newRc)); err != nil { return err } - newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name, metav1.GetOptions{}) + newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(context.TODO(), newRc.Name, metav1.GetOptions{}) if err != nil { return err } @@ -535,11 +536,11 @@ func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *corev1.ReplicationCont case DeleteRollingUpdateCleanupPolicy: // delete old rc fmt.Fprintf(config.Out, "Update succeeded. Deleting %s\n", oldRc.Name) - return r.rcClient.ReplicationControllers(r.ns).Delete(oldRc.Name, nil) + return r.rcClient.ReplicationControllers(r.ns).Delete(context.TODO(), oldRc.Name, nil) case RenameRollingUpdateCleanupPolicy: // delete old rc fmt.Fprintf(config.Out, "Update succeeded. Deleting old controller: %s\n", oldRc.Name) - if err := r.rcClient.ReplicationControllers(r.ns).Delete(oldRc.Name, nil); err != nil { + if err := r.rcClient.ReplicationControllers(r.ns).Delete(context.TODO(), oldRc.Name, nil); err != nil { return err } fmt.Fprintf(config.Out, "Renaming %s to %s\n", newRc.Name, oldRc.Name) @@ -557,12 +558,12 @@ func Rename(c corev1client.ReplicationControllersGetter, rc *corev1.ReplicationC rc.ResourceVersion = "" // First delete the oldName RC and orphan its pods. policy := metav1.DeletePropagationOrphan - err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &metav1.DeleteOptions{PropagationPolicy: &policy}) + err := c.ReplicationControllers(rc.Namespace).Delete(context.TODO(), oldName, &metav1.DeleteOptions{PropagationPolicy: &policy}) if err != nil && !errors.IsNotFound(err) { return err } err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { - _, err := c.ReplicationControllers(rc.Namespace).Get(oldName, metav1.GetOptions{}) + _, err := c.ReplicationControllers(rc.Namespace).Get(context.TODO(), oldName, metav1.GetOptions{}) if err == nil { return false, nil } else if errors.IsNotFound(err) { @@ -575,7 +576,7 @@ func Rename(c corev1client.ReplicationControllersGetter, rc *corev1.ReplicationC return err } // Then create the same RC with the new name. - _, err = c.ReplicationControllers(rc.Namespace).Create(rc) + _, err = c.ReplicationControllers(rc.Namespace).Create(context.TODO(), rc) return err } @@ -583,7 +584,7 @@ func LoadExistingNextReplicationController(c corev1client.ReplicationControllers if len(newName) == 0 { return nil, nil } - newRc, err := c.ReplicationControllers(namespace).Get(newName, metav1.GetOptions{}) + newRc, err := c.ReplicationControllers(namespace).Get(context.TODO(), newName, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { return nil, nil } @@ -602,7 +603,7 @@ type NewControllerConfig struct { func CreateNewControllerFromCurrentController(rcClient corev1client.ReplicationControllersGetter, codec runtime.Codec, cfg *NewControllerConfig) (*corev1.ReplicationController, error) { containerIndex := 0 // load the old RC into the "new" RC - newRc, err := rcClient.ReplicationControllers(cfg.Namespace).Get(cfg.OldName, metav1.GetOptions{}) + newRc, err := rcClient.ReplicationControllers(cfg.Namespace).Get(context.TODO(), cfg.OldName, metav1.GetOptions{}) if err != nil { return nil, err } @@ -719,7 +720,7 @@ func AddDeploymentKeyToReplicationController(oldRc *corev1.ReplicationController // TODO: extract the code from the label command and re-use it here. selector := labels.SelectorFromSet(oldRc.Spec.Selector) options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := podClient.Pods(namespace).List(options) + podList, err := podClient.Pods(namespace).List(context.TODO(), options) if err != nil { return nil, err } @@ -755,13 +756,13 @@ func AddDeploymentKeyToReplicationController(oldRc *corev1.ReplicationController // we've finished re-adopting existing pods to the rc. selector = labels.SelectorFromSet(oldRc.Spec.Selector) options = metav1.ListOptions{LabelSelector: selector.String()} - if podList, err = podClient.Pods(namespace).List(options); err != nil { + if podList, err = podClient.Pods(namespace).List(context.TODO(), options); err != nil { return nil, err } for ix := range podList.Items { pod := &podList.Items[ix] if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue { - if err := podClient.Pods(namespace).Delete(pod.Name, nil); err != nil { + if err := podClient.Pods(namespace).Delete(context.TODO(), pod.Name, nil); err != nil { return nil, err } } @@ -782,14 +783,14 @@ func updateRcWithRetries(rcClient corev1client.ReplicationControllersGetter, nam err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) { // Apply the update, then attempt to push it to the apiserver. applyUpdate(rc) - if rc, e = rcClient.ReplicationControllers(namespace).Update(rc); e == nil { + if rc, e = rcClient.ReplicationControllers(namespace).Update(context.TODO(), rc); e == nil { // rc contains the latest controller post update return } updateErr := e // Update the controller with the latest resource version, if the update failed we // can't trust rc so use oldRc.Name. - if rc, e = rcClient.ReplicationControllers(namespace).Get(oldRc.Name, metav1.GetOptions{}); e != nil { + if rc, e = rcClient.ReplicationControllers(namespace).Get(context.TODO(), oldRc.Name, metav1.GetOptions{}); e != nil { // The Get failed: Value in rc cannot be trusted. rc = oldRc } @@ -813,11 +814,11 @@ func updatePodWithRetries(podClient corev1client.PodsGetter, namespace string, p err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) { // Apply the update, then attempt to push it to the apiserver. applyUpdate(pod) - if pod, e = podClient.Pods(namespace).Update(pod); e == nil { + if pod, e = podClient.Pods(namespace).Update(context.TODO(), pod); e == nil { return } updateErr := e - if pod, e = podClient.Pods(namespace).Get(oldPod.Name, metav1.GetOptions{}); e != nil { + if pod, e = podClient.Pods(namespace).Get(context.TODO(), oldPod.Name, metav1.GetOptions{}); e != nil { pod = oldPod } // Only return the error from update @@ -829,7 +830,7 @@ func updatePodWithRetries(podClient corev1client.PodsGetter, namespace string, p } func FindSourceController(r corev1client.ReplicationControllersGetter, namespace, name string) (*corev1.ReplicationController, error) { - list, err := r.ReplicationControllers(namespace).List(metav1.ListOptions{}) + list, err := r.ReplicationControllers(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, err } @@ -851,7 +852,7 @@ func controllerHasDesiredReplicas(rcClient corev1client.ReplicationControllersGe desiredGeneration := controller.Generation return func() (bool, error) { - ctrl, err := rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{}) + ctrl, err := rcClient.ReplicationControllers(controller.Namespace).Get(context.TODO(), controller.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go index b1ccd8ac9fd..f50afb4616e 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go @@ -18,6 +18,7 @@ package rollingupdate import ( "bytes" + "context" "fmt" "time" @@ -248,7 +249,7 @@ func (o *RollingUpdateOptions) Run() error { var newRc *corev1.ReplicationController // fetch rc - oldRc, err := coreClient.ReplicationControllers(o.Namespace).Get(o.OldName, metav1.GetOptions{}) + oldRc, err := coreClient.ReplicationControllers(o.Namespace).Get(context.TODO(), o.OldName, metav1.GetOptions{}) if err != nil { if !errors.IsNotFound(err) || len(o.Image) == 0 || !o.KeepOldName { return err @@ -431,7 +432,7 @@ func (o *RollingUpdateOptions) Run() error { if err != nil { return err } - coreClient.ReplicationControllers(config.NewRc.Namespace).Update(config.NewRc) + coreClient.ReplicationControllers(config.NewRc.Namespace).Update(context.TODO(), config.NewRc) } err = updater.Update(config) if err != nil { @@ -444,7 +445,7 @@ func (o *RollingUpdateOptions) Run() error { } else { message = fmt.Sprintf("rolling updated to %q", newRc.Name) } - newRc, err = coreClient.ReplicationControllers(o.Namespace).Get(newRc.Name, metav1.GetOptions{}) + newRc, err = coreClient.ReplicationControllers(o.Namespace).Get(context.TODO(), newRc.Name, metav1.GetOptions{}) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go b/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go index 849f236d335..721b21aa0a9 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go @@ -474,11 +474,11 @@ func waitForPod(podClient corev1client.PodsGetter, ns, name string, exitConditio lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector - return podClient.Pods(ns).List(options) + return podClient.Pods(ns).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector - return podClient.Pods(ns).Watch(options) + return podClient.Pods(ns).Watch(context.TODO(), options) }, } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/env/env_resolve.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/env/env_resolve.go index 2328ef7d3bd..93e28f98a26 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/env/env_resolve.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/env/env_resolve.go @@ -17,6 +17,7 @@ limitations under the License. package env import ( + "context" "fmt" "math" "strconv" @@ -51,7 +52,7 @@ func getSecretRefValue(client kubernetes.Interface, namespace string, store *Res secret, ok := store.SecretStore[secretSelector.Name] if !ok { var err error - secret, err = client.CoreV1().Secrets(namespace).Get(secretSelector.Name, metav1.GetOptions{}) + secret, err = client.CoreV1().Secrets(namespace).Get(context.TODO(), secretSelector.Name, metav1.GetOptions{}) if err != nil { return "", err } @@ -69,7 +70,7 @@ func getConfigMapRefValue(client kubernetes.Interface, namespace string, store * configMap, ok := store.ConfigMapStore[configMapSelector.Name] if !ok { var err error - configMap, err = client.CoreV1().ConfigMaps(namespace).Get(configMapSelector.Name, metav1.GetOptions{}) + configMap, err = client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMapSelector.Name, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go b/staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go index b13ea75130d..767a4dc8c2a 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go @@ -17,6 +17,7 @@ limitations under the License. package top import ( + "context" "errors" "github.com/spf13/cobra" @@ -199,13 +200,13 @@ func (o TopNodeOptions) RunTopNode() error { var nodes []v1.Node if len(o.ResourceName) > 0 { - node, err := o.NodeClient.Nodes().Get(o.ResourceName, metav1.GetOptions{}) + node, err := o.NodeClient.Nodes().Get(context.TODO(), o.ResourceName, metav1.GetOptions{}) if err != nil { return err } nodes = append(nodes, *node) } else { - nodeList, err := o.NodeClient.Nodes().List(metav1.ListOptions{ + nodeList, err := o.NodeClient.Nodes().List(context.TODO(), metav1.ListOptions{ LabelSelector: selector.String(), }) if err != nil { @@ -229,13 +230,13 @@ func getNodeMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, reso mc := metricsClient.MetricsV1beta1() nm := mc.NodeMetricses() if resourceName != "" { - m, err := nm.Get(resourceName, metav1.GetOptions{}) + m, err := nm.Get(context.TODO(), resourceName, metav1.GetOptions{}) if err != nil { return nil, err } versionedMetrics.Items = []metricsV1beta1api.NodeMetrics{*m} } else { - versionedMetrics, err = nm.List(metav1.ListOptions{LabelSelector: selector.String()}) + versionedMetrics, err = nm.List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go b/staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go index 04ab569fb78..3af15bca1be 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go @@ -17,6 +17,7 @@ limitations under the License. package top import ( + "context" "errors" "fmt" "time" @@ -212,13 +213,13 @@ func getMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, namespac } versionedMetrics := &metricsv1beta1api.PodMetricsList{} if resourceName != "" { - m, err := metricsClient.MetricsV1beta1().PodMetricses(ns).Get(resourceName, metav1.GetOptions{}) + m, err := metricsClient.MetricsV1beta1().PodMetricses(ns).Get(context.TODO(), resourceName, metav1.GetOptions{}) if err != nil { return nil, err } versionedMetrics.Items = []metricsv1beta1api.PodMetrics{*m} } else { - versionedMetrics, err = metricsClient.MetricsV1beta1().PodMetricses(ns).List(metav1.ListOptions{LabelSelector: selector.String()}) + versionedMetrics, err = metricsClient.MetricsV1beta1().PodMetricses(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, err } @@ -233,7 +234,7 @@ func getMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, namespac func verifyEmptyMetrics(o TopPodOptions, selector labels.Selector) error { if len(o.ResourceName) > 0 { - pod, err := o.PodClient.Pods(o.Namespace).Get(o.ResourceName, metav1.GetOptions{}) + pod, err := o.PodClient.Pods(o.Namespace).Get(context.TODO(), o.ResourceName, metav1.GetOptions{}) if err != nil { return err } @@ -241,7 +242,7 @@ func verifyEmptyMetrics(o TopPodOptions, selector labels.Selector) error { return err } } else { - pods, err := o.PodClient.Pods(o.Namespace).List(metav1.ListOptions{ + pods, err := o.PodClient.Pods(o.Namespace).List(context.TODO(), metav1.ListOptions{ LabelSelector: selector.String(), }) if err != nil { diff --git a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go index c73280300bd..dc3f838f008 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/versioned/describe.go @@ -18,6 +18,7 @@ package versioned import ( "bytes" + "context" "crypto/x509" "fmt" "io" @@ -363,11 +364,11 @@ type NamespaceDescriber struct { } func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - ns, err := d.CoreV1().Namespaces().Get(name, metav1.GetOptions{}) + ns, err := d.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } - resourceQuotaList, err := d.CoreV1().ResourceQuotas(name).List(metav1.ListOptions{}) + resourceQuotaList, err := d.CoreV1().ResourceQuotas(name).List(context.TODO(), metav1.ListOptions{}) if err != nil { if errors.IsNotFound(err) { // Server does not support resource quotas. @@ -377,7 +378,7 @@ func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings return "", err } } - limitRangeList, err := d.CoreV1().LimitRanges(name).List(metav1.ListOptions{}) + limitRangeList, err := d.CoreV1().LimitRanges(name).List(context.TODO(), metav1.ListOptions{}) if err != nil { if errors.IsNotFound(err) { // Server does not support limit ranges. @@ -538,7 +539,7 @@ type LimitRangeDescriber struct { func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { lr := d.CoreV1().LimitRanges(namespace) - limitRange, err := lr.Get(name, metav1.GetOptions{}) + limitRange, err := lr.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -565,7 +566,7 @@ type ResourceQuotaDescriber struct { func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { rq := d.CoreV1().ResourceQuotas(namespace) - resourceQuota, err := rq.Get(name, metav1.GetOptions{}) + resourceQuota, err := rq.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -633,13 +634,13 @@ type PodDescriber struct { } func (d *PodDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - pod, err := d.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + pod, err := d.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { if describerSettings.ShowEvents { eventsInterface := d.CoreV1().Events(namespace) selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil) options := metav1.ListOptions{FieldSelector: selector.String()} - events, err2 := eventsInterface.List(options) + events, err2 := eventsInterface.List(context.TODO(), options) if describerSettings.ShowEvents && err2 == nil && len(events.Items) > 0 { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) @@ -1326,7 +1327,7 @@ type PersistentVolumeDescriber struct { func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.CoreV1().PersistentVolumes() - pv, err := c.Get(name, metav1.GetOptions{}) + pv, err := c.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1474,7 +1475,7 @@ type PersistentVolumeClaimDescriber struct { func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.CoreV1().PersistentVolumeClaims(namespace) - pvc, err := c.Get(name, metav1.GetOptions{}) + pvc, err := c.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -1492,7 +1493,7 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri } func getMountPods(c corev1client.PodInterface, pvcName string) ([]corev1.Pod, error) { - nsPods, err := c.List(metav1.ListOptions{}) + nsPods, err := c.List(context.TODO(), metav1.ListOptions{}) if err != nil { return []corev1.Pod{}, err } @@ -1948,7 +1949,7 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string, descri rc := d.CoreV1().ReplicationControllers(namespace) pc := d.CoreV1().Pods(namespace) - controller, err := rc.Get(name, metav1.GetOptions{}) + controller, err := rc.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2023,7 +2024,7 @@ func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings rsc := d.AppsV1().ReplicaSets(namespace) pc := d.CoreV1().Pods(namespace) - rs, err := rsc.Get(name, metav1.GetOptions{}) + rs, err := rsc.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2082,7 +2083,7 @@ type JobDescriber struct { } func (d *JobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - job, err := d.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{}) + job, err := d.BatchV1().Jobs(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2143,7 +2144,7 @@ type CronJobDescriber struct { } func (d *CronJobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - cronJob, err := d.client.BatchV1beta1().CronJobs(namespace).Get(name, metav1.GetOptions{}) + cronJob, err := d.client.BatchV1beta1().CronJobs(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2245,7 +2246,7 @@ func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings dc := d.AppsV1().DaemonSets(namespace) pc := d.CoreV1().Pods(namespace) - daemon, err := dc.Get(name, metav1.GetOptions{}) + daemon, err := dc.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2302,7 +2303,7 @@ type SecretDescriber struct { func (d *SecretDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.CoreV1().Secrets(namespace) - secret, err := c.Get(name, metav1.GetOptions{}) + secret, err := c.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2341,7 +2342,7 @@ type IngressDescriber struct { func (i *IngressDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := i.NetworkingV1beta1().Ingresses(namespace) - ing, err := c.Get(name, metav1.GetOptions{}) + ing, err := c.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2349,8 +2350,8 @@ func (i *IngressDescriber) Describe(namespace, name string, describerSettings de } func (i *IngressDescriber) describeBackend(ns string, backend *networkingv1beta1.IngressBackend) string { - endpoints, _ := i.CoreV1().Endpoints(ns).Get(backend.ServiceName, metav1.GetOptions{}) - service, _ := i.CoreV1().Services(ns).Get(backend.ServiceName, metav1.GetOptions{}) + endpoints, _ := i.CoreV1().Endpoints(ns).Get(context.TODO(), backend.ServiceName, metav1.GetOptions{}) + service, _ := i.CoreV1().Services(ns).Get(context.TODO(), backend.ServiceName, metav1.GetOptions{}) spName := "" for i := range service.Spec.Ports { sp := &service.Spec.Ports[i] @@ -2448,12 +2449,12 @@ type ServiceDescriber struct { func (d *ServiceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.CoreV1().Services(namespace) - service, err := c.Get(name, metav1.GetOptions{}) + service, err := c.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } - endpoints, _ := d.CoreV1().Endpoints(namespace).Get(name, metav1.GetOptions{}) + endpoints, _ := d.CoreV1().Endpoints(namespace).Get(context.TODO(), name, metav1.GetOptions{}) var events *corev1.EventList if describerSettings.ShowEvents { events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, service) @@ -2551,7 +2552,7 @@ type EndpointsDescriber struct { func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.CoreV1().Endpoints(namespace) - ep, err := c.Get(name, metav1.GetOptions{}) + ep, err := c.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2626,7 +2627,7 @@ type EndpointSliceDescriber struct { func (d *EndpointSliceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.DiscoveryV1beta1().EndpointSlices(namespace) - eps, err := c.Get(name, metav1.GetOptions{}) + eps, err := c.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2719,7 +2720,7 @@ type ServiceAccountDescriber struct { func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.CoreV1().ServiceAccounts(namespace) - serviceAccount, err := c.Get(name, metav1.GetOptions{}) + serviceAccount, err := c.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2729,7 +2730,7 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett // missingSecrets is the set of all secrets present in the // serviceAccount but not present in the set of existing secrets. missingSecrets := sets.NewString() - secrets, err := d.CoreV1().Secrets(namespace).List(metav1.ListOptions{}) + secrets, err := d.CoreV1().Secrets(namespace).List(context.TODO(), metav1.ListOptions{}) // errors are tolerated here in order to describe the serviceAccount with all // of the secrets that it references, even if those secrets cannot be fetched. @@ -2834,7 +2835,7 @@ type RoleDescriber struct { } func (d *RoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - role, err := d.RbacV1().Roles(namespace).Get(name, metav1.GetOptions{}) + role, err := d.RbacV1().Roles(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2873,7 +2874,7 @@ type ClusterRoleDescriber struct { } func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - role, err := d.RbacV1().ClusterRoles().Get(name, metav1.GetOptions{}) + role, err := d.RbacV1().ClusterRoles().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2929,7 +2930,7 @@ type RoleBindingDescriber struct { } func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - binding, err := d.RbacV1().RoleBindings(namespace).Get(name, metav1.GetOptions{}) + binding, err := d.RbacV1().RoleBindings(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2961,7 +2962,7 @@ type ClusterRoleBindingDescriber struct { } func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - binding, err := d.RbacV1().ClusterRoleBindings().Get(name, metav1.GetOptions{}) + binding, err := d.RbacV1().ClusterRoleBindings().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -2994,12 +2995,12 @@ type NodeDescriber struct { func (d *NodeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { mc := d.CoreV1().Nodes() - node, err := mc.Get(name, metav1.GetOptions{}) + node, err := mc.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } - lease, err := d.CoordinationV1().Leases(corev1.NamespaceNodeLease).Get(name, metav1.GetOptions{}) + lease, err := d.CoordinationV1().Leases(corev1.NamespaceNodeLease).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { if !errors.IsNotFound(err) { return "", err @@ -3015,7 +3016,7 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings descr // in a policy aware setting, users may have access to a node, but not all pods // in that case, we note that the user does not have access to the pods canViewPods := true - nodeNonTerminatedPodsList, err := d.CoreV1().Pods(namespace).List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) + nodeNonTerminatedPodsList, err := d.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{FieldSelector: fieldSelector.String()}) if err != nil { if !errors.IsForbidden(err) { return "", err @@ -3149,7 +3150,7 @@ type StatefulSetDescriber struct { } func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - ps, err := p.client.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{}) + ps, err := p.client.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -3207,7 +3208,7 @@ type CertificateSigningRequestDescriber struct { } func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - csr, err := p.client.CertificatesV1beta1().CertificateSigningRequests().Get(name, metav1.GetOptions{}) + csr, err := p.client.CertificatesV1beta1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -3294,7 +3295,7 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, desc // autoscaling/v2beta2 is introduced since v1.12 and autoscaling/v1 does not have full backward compatibility // with autoscaling/v2beta2, so describer will try to get and describe hpa v2beta2 object firstly, if it fails, // describer will fall back to do with hpa v1 object - hpaV2beta2, err := d.client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{}) + hpaV2beta2, err := d.client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err == nil { if describerSettings.ShowEvents { events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpaV2beta2) @@ -3302,7 +3303,7 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, desc return describeHorizontalPodAutoscalerV2beta2(hpaV2beta2, events, d) } - hpaV1, err := d.client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{}) + hpaV1, err := d.client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err == nil { if describerSettings.ShowEvents { events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpaV1) @@ -3598,7 +3599,7 @@ type DeploymentDescriber struct { } func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - d, err := dd.client.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) + d, err := dd.client.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -3671,7 +3672,7 @@ func printReplicaSetsByLabels(matchingRSs []*appsv1.ReplicaSet) string { func getPodStatusForController(c corev1client.PodInterface, selector labels.Selector, uid types.UID) (running, waiting, succeeded, failed int, err error) { options := metav1.ListOptions{LabelSelector: selector.String()} - rcPods, err := c.List(options) + rcPods, err := c.List(context.TODO(), options) if err != nil { return } @@ -3703,7 +3704,7 @@ type ConfigMapDescriber struct { func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.CoreV1().ConfigMaps(namespace) - configMap, err := c.Get(name, metav1.GetOptions{}) + configMap, err := c.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -3741,7 +3742,7 @@ type NetworkPolicyDescriber struct { func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.NetworkingV1().NetworkPolicies(namespace) - networkPolicy, err := c.Get(name, metav1.GetOptions{}) + networkPolicy, err := c.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -3895,7 +3896,7 @@ type StorageClassDescriber struct { } func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - sc, err := s.StorageV1().StorageClasses().Get(name, metav1.GetOptions{}) + sc, err := s.StorageV1().StorageClasses().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -3947,7 +3948,7 @@ type CSINodeDescriber struct { } func (c *CSINodeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - csi, err := c.StorageV1().CSINodes().Get(name, metav1.GetOptions{}) + csi, err := c.StorageV1().CSINodes().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -4022,7 +4023,7 @@ type PodDisruptionBudgetDescriber struct { } func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - pdb, err := p.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{}) + pdb, err := p.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -4071,7 +4072,7 @@ type PriorityClassDescriber struct { } func (s *PriorityClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - pc, err := s.SchedulingV1().PriorityClasses().Get(name, metav1.GetOptions{}) + pc, err := s.SchedulingV1().PriorityClasses().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } @@ -4107,7 +4108,7 @@ type PodSecurityPolicyDescriber struct { } func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - psp, err := d.PolicyV1beta1().PodSecurityPolicies().Get(name, metav1.GetOptions{}) + psp, err := d.PolicyV1beta1().PodSecurityPolicies().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/staging/src/k8s.io/kubectl/pkg/drain/cordon.go b/staging/src/k8s.io/kubectl/pkg/drain/cordon.go index 8f0f56d2412..7ce6bc13247 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/cordon.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/cordon.go @@ -17,6 +17,7 @@ limitations under the License. package drain import ( + "context" "fmt" corev1 "k8s.io/api/core/v1" @@ -87,9 +88,9 @@ func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface) (error, er patchBytes, patchErr := strategicpatch.CreateTwoWayMergePatch(oldData, newData, c.node) if patchErr == nil { - _, err = client.Patch(c.node.Name, types.StrategicMergePatchType, patchBytes) + _, err = client.Patch(context.TODO(), c.node.Name, types.StrategicMergePatchType, patchBytes) } else { - _, err = client.Update(c.node) + _, err = client.Update(context.TODO(), c.node) } return err, patchErr } diff --git a/staging/src/k8s.io/kubectl/pkg/drain/drain.go b/staging/src/k8s.io/kubectl/pkg/drain/drain.go index 0ade8ebeb5c..ae6f166a0dd 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/drain.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/drain.go @@ -130,7 +130,7 @@ func (d *Helper) makeDeleteOptions() *metav1.DeleteOptions { // DeletePod will delete the given pod, or return an error if it couldn't func (d *Helper) DeletePod(pod corev1.Pod) error { - return d.Client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, d.makeDeleteOptions()) + return d.Client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, d.makeDeleteOptions()) } // EvictPod will evict the give pod, or return an error if it couldn't @@ -160,7 +160,7 @@ func (d *Helper) GetPodsForDeletion(nodeName string) (*podDeleteList, []error) { return nil, []error{err} } - podList, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ + podList, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ LabelSelector: labelSelector.String(), FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String()}) if err != nil { @@ -205,7 +205,7 @@ func (d *Helper) DeleteOrEvictPods(pods []corev1.Pod) error { // TODO(justinsb): unnecessary? getPodFn := func(namespace, name string) (*corev1.Pod, error) { - return d.Client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + return d.Client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } if !d.DisableEviction { diff --git a/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go b/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go index a63c3c8c120..b8795d421ee 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/drain_test.go @@ -241,7 +241,7 @@ func addEvictionSupport(t *testing.T, k *fake.Clientset) { eviction := *action.(ktest.CreateAction).GetObject().(*policyv1beta1.Eviction) // Avoid the lock go func() { - err := k.CoreV1().Pods(eviction.Namespace).Delete(eviction.Name, &metav1.DeleteOptions{}) + err := k.CoreV1().Pods(eviction.Namespace).Delete(context.TODO(), eviction.Name, &metav1.DeleteOptions{}) if err != nil { // Errorf because we can't call Fatalf from another goroutine t.Errorf("failed to delete pod: %s/%s", eviction.Namespace, eviction.Name) @@ -356,7 +356,7 @@ func TestDeleteOrEvict(t *testing.T) { // Test that other pods are still there var remainingPods []string { - podList, err := k.CoreV1().Pods("").List(metav1.ListOptions{}) + podList, err := k.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("error listing pods: %v", err) } diff --git a/staging/src/k8s.io/kubectl/pkg/drain/filters.go b/staging/src/k8s.io/kubectl/pkg/drain/filters.go index 29e3c21ede4..1ffbbbe5330 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/filters.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/filters.go @@ -17,6 +17,7 @@ limitations under the License. package drain import ( + "context" "fmt" "strings" "time" @@ -172,7 +173,7 @@ func (d *Helper) daemonSetFilter(pod corev1.Pod) podDeleteStatus { return makePodDeleteStatusOkay() } - if _, err := d.Client.AppsV1().DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil { + if _, err := d.Client.AppsV1().DaemonSets(pod.Namespace).Get(context.TODO(), controllerRef.Name, metav1.GetOptions{}); err != nil { // remove orphaned pods with a warning if --force is used if apierrors.IsNotFound(err) && d.Force { return makePodDeleteStatusWithWarning(true, err.Error()) diff --git a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go index 27286e71d5e..762d953f2c6 100644 --- a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go +++ b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/helpers.go @@ -41,7 +41,7 @@ import ( func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string, timeout time.Duration, sortBy func([]*corev1.Pod) sort.Interface) (*corev1.Pod, int, error) { options := metav1.ListOptions{LabelSelector: selector} - podList, err := client.Pods(namespace).List(options) + podList, err := client.Pods(namespace).List(context.TODO(), options) if err != nil { return nil, 0, err } @@ -57,7 +57,7 @@ func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string // Watch until we observe a pod options.ResourceVersion = podList.ResourceVersion - w, err := client.Pods(namespace).Watch(options) + w, err := client.Pods(namespace).Watch(context.TODO(), options) if err != nil { return nil, 0, err } diff --git a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history.go b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history.go index 2efc828131f..28cc6a81c20 100644 --- a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history.go +++ b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history.go @@ -18,6 +18,7 @@ package polymorphichelpers import ( "bytes" + "context" "fmt" "io" "text/tabwriter" @@ -101,7 +102,7 @@ type DeploymentHistoryViewer struct { // TODO: this should be a describer func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { versionedAppsClient := h.c.AppsV1() - deployment, err := versionedAppsClient.Deployments(namespace).Get(name, metav1.GetOptions{}) + deployment, err := versionedAppsClient.Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("failed to retrieve deployment %s: %v", name, err) } @@ -265,7 +266,7 @@ func controlledHistoryV1( selector labels.Selector, accessor metav1.Object) ([]*appsv1.ControllerRevision, error) { var result []*appsv1.ControllerRevision - historyList, err := apps.ControllerRevisions(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + historyList, err := apps.ControllerRevisions(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, err } @@ -286,7 +287,7 @@ func controlledHistory( selector labels.Selector, accessor metav1.Object) ([]*appsv1.ControllerRevision, error) { var result []*appsv1.ControllerRevision - historyList, err := apps.ControllerRevisions(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + historyList, err := apps.ControllerRevisions(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, err } @@ -304,7 +305,7 @@ func controlledHistory( func daemonSetHistory( apps clientappsv1.AppsV1Interface, namespace, name string) (*appsv1.DaemonSet, []*appsv1.ControllerRevision, error) { - ds, err := apps.DaemonSets(namespace).Get(name, metav1.GetOptions{}) + ds, err := apps.DaemonSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, nil, fmt.Errorf("failed to retrieve DaemonSet %s: %v", name, err) } @@ -327,7 +328,7 @@ func daemonSetHistory( func statefulSetHistory( apps clientappsv1.AppsV1Interface, namespace, name string) (*appsv1.StatefulSet, []*appsv1.ControllerRevision, error) { - sts, err := apps.StatefulSets(namespace).Get(name, metav1.GetOptions{}) + sts, err := apps.StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, nil, fmt.Errorf("failed to retrieve Statefulset %s: %s", name, err.Error()) } diff --git a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history_test.go b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history_test.go index 3bef0bc3678..45bb569e008 100644 --- a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history_test.go +++ b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/history_test.go @@ -17,6 +17,7 @@ limitations under the License. package polymorphichelpers import ( + "context" "reflect" "testing" @@ -84,7 +85,7 @@ func TestViewHistory(t *testing.T) { ) fakeClientSet := fake.NewSimpleClientset(ssStub) - _, err := fakeClientSet.AppsV1().ControllerRevisions("default").Create(ssStub1) + _, err := fakeClientSet.AppsV1().ControllerRevisions("default").Create(context.TODO(), ssStub1) if err != nil { t.Fatalf("create controllerRevisions error %v occurred ", err) } diff --git a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go index e3e64be8fa1..4ea1dc5cee6 100644 --- a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go +++ b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go @@ -18,6 +18,7 @@ package polymorphichelpers import ( "bytes" + "context" "fmt" "sort" @@ -109,7 +110,7 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m // to the external appsv1 Deployment without round-tripping through an internal version of Deployment. We're // currently getting rid of all internal versions of resources. So we specifically request the appsv1 version // here. This follows the same pattern as for DaemonSet and StatefulSet. - deployment, err := r.c.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) + deployment, err := r.c.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("failed to retrieve Deployment %s: %v", name, err) } @@ -153,7 +154,7 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m } // Restore revision - if _, err = r.c.AppsV1().Deployments(namespace).Patch(name, patchType, patch); err != nil { + if _, err = r.c.AppsV1().Deployments(namespace).Patch(context.TODO(), name, patchType, patch); err != nil { return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } return rollbackSuccess, nil @@ -293,7 +294,7 @@ func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations ma } // Restore revision - if _, err = r.c.AppsV1().DaemonSets(accessor.GetNamespace()).Patch(accessor.GetName(), types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { + if _, err = r.c.AppsV1().DaemonSets(accessor.GetNamespace()).Patch(context.TODO(), accessor.GetName(), types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } @@ -380,7 +381,7 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations } // Restore revision - if _, err = r.c.AppsV1().StatefulSets(sts.Namespace).Patch(sts.Name, types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { + if _, err = r.c.AppsV1().StatefulSets(sts.Namespace).Patch(context.TODO(), sts.Name, types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } diff --git a/staging/src/k8s.io/kubectl/pkg/util/deployment/deployment.go b/staging/src/k8s.io/kubectl/pkg/util/deployment/deployment.go index 586ba5deeee..d44bf3b3463 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/deployment/deployment.go +++ b/staging/src/k8s.io/kubectl/pkg/util/deployment/deployment.go @@ -17,6 +17,7 @@ limitations under the License. package deployment import ( + "context" "sort" "strconv" @@ -95,7 +96,7 @@ func GetAllReplicaSets(deployment *appsv1.Deployment, c appsclient.AppsV1Interfa // RsListFromClient returns an rsListFunc that wraps the given client. func rsListFromClient(c appsclient.AppsV1Interface) rsListFunc { return func(namespace string, options metav1.ListOptions) ([]*appsv1.ReplicaSet, error) { - rsList, err := c.ReplicaSets(namespace).List(options) + rsList, err := c.ReplicaSets(namespace).List(context.TODO(), options) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go b/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go index 252c70d471e..8e67eb7e459 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go @@ -2084,7 +2084,7 @@ func (d *awsDisk) modifyVolume(requestGiB int64) (int64, error) { // applyUnSchedulableTaint applies a unschedulable taint to a node after verifying // if node has become unusable because of volumes getting stuck in attaching state. func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) { - node, fetchErr := c.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{}) + node, fetchErr := c.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(nodeName), metav1.GetOptions{}) if fetchErr != nil { klog.Errorf("Error fetching node %s with %v", nodeName, fetchErr) return diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config.go index 7f094e47e0e..cf953b331a2 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config.go @@ -19,6 +19,7 @@ limitations under the License. package azure import ( + "context" "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -68,7 +69,7 @@ func (az *Cloud) getConfigFromSecret() (*Config, error) { return nil, nil } - secret, err := az.kubeClient.CoreV1().Secrets(cloudConfigNamespace).Get(cloudConfigSecretName, metav1.GetOptions{}) + secret, err := az.kubeClient.CoreV1().Secrets(cloudConfigNamespace).Get(context.TODO(), cloudConfigSecretName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get secret %s: %v", cloudConfigSecretName, err) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go index 8d713595f1d..b5a2c8d19e1 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config_test.go @@ -19,6 +19,7 @@ limitations under the License. package azure import ( + "context" "testing" "github.com/Azure/go-autorest/autorest/to" @@ -153,7 +154,7 @@ func TestGetConfigFromSecret(t *testing.T) { "cloud-config": secretData, } } - _, err := az.kubeClient.CoreV1().Secrets(cloudConfigNamespace).Create(secret) + _, err := az.kubeClient.CoreV1().Secrets(cloudConfigNamespace).Create(context.TODO(), secret) assert.NoError(t, err, test.name) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go index b2e2a1deaf9..7bb10208c3f 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go @@ -19,6 +19,7 @@ limitations under the License. package gce import ( + "context" "crypto/rand" "encoding/hex" "errors" @@ -199,7 +200,7 @@ func (ci *ClusterID) getOrInitialize() error { UIDProvider: newID, } - if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(cfg); err != nil { + if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(context.TODO(), cfg); err != nil { klog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err) return err } diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal_test.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal_test.go index 4b054794e59..1a75bc072d0 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal_test.go @@ -157,7 +157,7 @@ func TestEnsureInternalLoadBalancer(t *testing.T) { gce, err := fakeGCECloud(vals) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -182,7 +182,7 @@ func TestEnsureInternalLoadBalancerDeprecatedAnnotation(t *testing.T) { } svc := fakeLoadBalancerServiceDeprecatedAnnotation(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) if err != nil { t.Errorf("Failed to create service %s, err %v", svc.Name, err) } @@ -220,7 +220,7 @@ func TestEnsureInternalLoadBalancerWithExistingResources(t *testing.T) { gce, err := fakeGCECloud(vals) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) // Create the expected resources necessary for an Internal Load Balancer nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace} @@ -257,7 +257,7 @@ func TestEnsureInternalLoadBalancerClearPreviousResources(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -336,7 +336,7 @@ func TestEnsureInternalLoadBalancerHealthCheckConfigurable(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -368,7 +368,7 @@ func TestUpdateInternalLoadBalancerBackendServices(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) _, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -424,7 +424,7 @@ func TestUpdateInternalLoadBalancerNodes(t *testing.T) { node1Name := []string{"test-node-1"} svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) nodes, err := createAndInsertNodes(gce, node1Name, vals.ZoneName) require.NoError(t, err) @@ -491,7 +491,7 @@ func TestEnsureInternalLoadBalancerDeleted(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) _, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -509,7 +509,7 @@ func TestEnsureInternalLoadBalancerDeletedTwiceDoesNotError(t *testing.T) { gce, err := fakeGCECloud(vals) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) _, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName) @@ -535,7 +535,7 @@ func TestEnsureInternalLoadBalancerWithSpecialHealthCheck(t *testing.T) { svc.Spec.HealthCheckNodePort = healthCheckNodePort svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, []string{nodeName}, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -554,7 +554,7 @@ func TestClearPreviousInternalResources(t *testing.T) { svc := fakeLoadbalancerService(string(LBTypeInternal)) gce, err := fakeGCECloud(vals) require.NoError(t, err) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) loadBalancerName := gce.GetLoadBalancerName(context.TODO(), "", svc) nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace} @@ -619,7 +619,7 @@ func TestEnsureInternalFirewallDeletesLegacyFirewall(t *testing.T) { require.NoError(t, err) vals := DefaultTestClusterValues() svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) fwName := MakeFirewallName(lbName) @@ -695,7 +695,7 @@ func TestEnsureInternalFirewallSucceedsOnXPN(t *testing.T) { require.NoError(t, err) vals := DefaultTestClusterValues() svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) fwName := MakeFirewallName(lbName) @@ -772,7 +772,7 @@ func TestEnsureLoadBalancerDeletedSucceedsOnXPN(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) _, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -796,7 +796,7 @@ func TestEnsureInternalInstanceGroupsDeleted(t *testing.T) { igName := makeInstanceGroupName(vals.ClusterID) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) _, err = createInternalLoadBalancer(gce, svc, nil, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.NoError(t, err) @@ -916,7 +916,7 @@ func TestEnsureInternalLoadBalancerErrors(t *testing.T) { if tc.injectMock != nil { tc.injectMock(gce.c.(*cloud.MockGCE)) } - _, err = gce.client.CoreV1().Services(params.service.Namespace).Create(params.service) + _, err = gce.client.CoreV1().Services(params.service.Namespace).Create(context.TODO(), params.service) require.NoError(t, err) status, err := gce.ensureInternalLoadBalancer( params.clusterName, @@ -1029,7 +1029,7 @@ func TestEnsureInternalLoadBalancerSubsetting(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.EqualError(t, err, cloudprovider.ImplementedElsewhere.Error()) @@ -1064,7 +1064,7 @@ func TestEnsureInternalLoadBalancerDeletedSubsetting(t *testing.T) { nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) @@ -1095,7 +1095,7 @@ func TestEnsureInternalLoadBalancerGlobalAccess(t *testing.T) { nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -1160,7 +1160,7 @@ func TestEnsureInternalLoadBalancerDisableGlobalAccess(t *testing.T) { nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) svc.Annotations[ServiceAnnotationILBAllowGlobalAccess] = "true" lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -1211,7 +1211,7 @@ func TestGlobalAccessChangeScheme(t *testing.T) { nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -1372,7 +1372,7 @@ func TestEnsureInternalLoadBalancerCustomSubnet(t *testing.T) { nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName) require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) lbName := gce.GetLoadBalancerName(context.TODO(), "", svc) @@ -1526,13 +1526,13 @@ func TestEnsureInternalLoadBalancerFinalizer(t *testing.T) { require.NoError(t, err) svc := fakeLoadbalancerService(string(LBTypeInternal)) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) require.NoError(t, err) assert.NotEmpty(t, status.Ingress) assertInternalLbResources(t, gce, svc, vals, nodeNames) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) require.NoError(t, err) if !hasFinalizer(svc, ILBFinalizerV1) { t.Errorf("Expected finalizer '%s' not found in Finalizer list - %v", ILBFinalizerV1, svc.Finalizers) @@ -1542,7 +1542,7 @@ func TestEnsureInternalLoadBalancerFinalizer(t *testing.T) { err = gce.EnsureLoadBalancerDeleted(context.Background(), vals.ClusterName, svc) require.NoError(t, err) assertInternalLbResourcesDeleted(t, gce, svc, vals, true) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) require.NoError(t, err) if hasFinalizer(svc, ILBFinalizerV1) { t.Errorf("Finalizer '%s' not deleted as part of ILB delete", ILBFinalizerV1) @@ -1564,7 +1564,7 @@ func TestEnsureLoadBalancerSkipped(t *testing.T) { svc := fakeLoadbalancerService(string(LBTypeInternal)) // Add the V2 finalizer svc.Finalizers = append(svc.Finalizers, ILBFinalizerV2) - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) require.NoError(t, err) status, err := createInternalLoadBalancer(gce, svc, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) assert.EqualError(t, err, cloudprovider.ImplementedElsewhere.Error()) diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_test.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_test.go index 99d48522c74..df9862feee6 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_test.go @@ -83,7 +83,7 @@ func TestEnsureLoadBalancerCreatesInternalLb(t *testing.T) { require.NoError(t, err) apiService := fakeLoadbalancerService(string(LBTypeInternal)) - apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(apiService) + apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(context.TODO(), apiService) require.NoError(t, err) status, err := gce.EnsureLoadBalancer(context.Background(), vals.ClusterName, apiService, nodes) assert.NoError(t, err) @@ -128,7 +128,7 @@ func TestEnsureLoadBalancerDeletesExistingExternalLb(t *testing.T) { createExternalLoadBalancer(gce, apiService, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) apiService = fakeLoadbalancerService(string(LBTypeInternal)) - apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(apiService) + apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(context.TODO(), apiService) require.NoError(t, err) status, err := gce.EnsureLoadBalancer(context.Background(), vals.ClusterName, apiService, nodes) assert.NoError(t, err) @@ -169,7 +169,7 @@ func TestEnsureLoadBalancerDeletedDeletesInternalLb(t *testing.T) { require.NoError(t, err) apiService := fakeLoadbalancerService(string(LBTypeInternal)) - apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(apiService) + apiService, err = gce.client.CoreV1().Services(apiService.Namespace).Create(context.TODO(), apiService) require.NoError(t, err) createInternalLoadBalancer(gce, apiService, nil, nodeNames, vals.ClusterName, vals.ClusterID, vals.ZoneName) diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go index 48ad9fe88b0..1a0937698b4 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util.go @@ -413,7 +413,7 @@ func patchService(c v1core.CoreV1Interface, oldSvc *v1.Service, newSvc *v1.Servi return nil, err } - return c.Services(oldSvc.Namespace).Patch(oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status") + return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status") } func getPatchBytes(oldSvc *v1.Service, newSvc *v1.Service) ([]byte, error) { diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util_test.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util_test.go index bd545fe684f..65ff1072ef5 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_util_test.go @@ -19,6 +19,7 @@ limitations under the License. package gce import ( + "context" "net" "reflect" "testing" @@ -127,7 +128,7 @@ func TestAddRemoveFinalizer(t *testing.T) { if err != nil { t.Fatalf("Failed to get GCE client, err %v", err) } - svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(svc) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc) if err != nil { t.Errorf("Failed to create service %s, err %v", svc.Name, err) } @@ -136,7 +137,7 @@ func TestAddRemoveFinalizer(t *testing.T) { if err != nil { t.Fatalf("Failed to add finalizer, err %v", err) } - svc, err = gce.client.CoreV1().Services(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Failed to get service, err %v", err) } @@ -147,7 +148,7 @@ func TestAddRemoveFinalizer(t *testing.T) { if err != nil { t.Fatalf("Failed to remove finalizer, err %v", err) } - svc, err = gce.client.CoreV1().Services(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) + svc, err = gce.client.CoreV1().Services(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Failed to get service, err %v", err) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/openstack/openstack.go b/staging/src/k8s.io/legacy-cloud-providers/openstack/openstack.go index c3db860b5e4..2c2d25251d1 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/openstack/openstack.go +++ b/staging/src/k8s.io/legacy-cloud-providers/openstack/openstack.go @@ -295,7 +295,7 @@ func setConfigFromSecret(cfg *Config) error { return fmt.Errorf("failed to get kubernetes client: %v", err) } - secret, err := k8sClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secret, err := k8sClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { klog.Warningf("Cannot get secret %s in namespace %s. error: %q", secretName, secretNamespace, err) return err diff --git a/staging/src/k8s.io/metrics/pkg/client/clientset_test/clientset_test.go b/staging/src/k8s.io/metrics/pkg/client/clientset_test/clientset_test.go index 38632b8aaf8..db77233c111 100644 --- a/staging/src/k8s.io/metrics/pkg/client/clientset_test/clientset_test.go +++ b/staging/src/k8s.io/metrics/pkg/client/clientset_test/clientset_test.go @@ -17,6 +17,7 @@ limitations under the License. package clientset_test import ( + "context" "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,16 +27,16 @@ import ( // TestFakeList is a basic sanity check that makes sure the fake Clientset is working properly. func TestFakeList(t *testing.T) { client := fake.NewSimpleClientset() - if _, err := client.MetricsV1alpha1().PodMetricses("").List(metav1.ListOptions{}); err != nil { + if _, err := client.MetricsV1alpha1().PodMetricses("").List(context.TODO(), metav1.ListOptions{}); err != nil { t.Errorf("Unexpected error: %v", err) } - if _, err := client.MetricsV1alpha1().NodeMetricses().List(metav1.ListOptions{}); err != nil { + if _, err := client.MetricsV1alpha1().NodeMetricses().List(context.TODO(), metav1.ListOptions{}); err != nil { t.Errorf("Unexpected error: %v", err) } - if _, err := client.MetricsV1beta1().PodMetricses("").List(metav1.ListOptions{}); err != nil { + if _, err := client.MetricsV1beta1().PodMetricses("").List(context.TODO(), metav1.ListOptions{}); err != nil { t.Errorf("Unexpected error: %v", err) } - if _, err := client.MetricsV1beta1().NodeMetricses().List(metav1.ListOptions{}); err != nil { + if _, err := client.MetricsV1beta1().NodeMetricses().List(context.TODO(), metav1.ListOptions{}); err != nil { t.Errorf("Unexpected error: %v", err) } } diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1alpha1/fischer.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1alpha1/fischer.go index c748ace4e1c..74f3425bba2 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1alpha1/fischer.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1alpha1/fischer.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,13 +61,13 @@ func NewFilteredFischerInformer(client versioned.Interface, resyncPeriod time.Du if tweakListOptions != nil { tweakListOptions(&options) } - return client.WardleV1alpha1().Fischers().List(options) + return client.WardleV1alpha1().Fischers().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.WardleV1alpha1().Fischers().Watch(options) + return client.WardleV1alpha1().Fischers().Watch(context.TODO(), options) }, }, &wardlev1alpha1.Fischer{}, diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1alpha1/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1alpha1/flunder.go index c6f4f387578..7a432b914eb 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1alpha1/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1alpha1/flunder.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredFlunderInformer(client versioned.Interface, namespace string, re if tweakListOptions != nil { tweakListOptions(&options) } - return client.WardleV1alpha1().Flunders(namespace).List(options) + return client.WardleV1alpha1().Flunders(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.WardleV1alpha1().Flunders(namespace).Watch(options) + return client.WardleV1alpha1().Flunders(namespace).Watch(context.TODO(), options) }, }, &wardlev1alpha1.Flunder{}, diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1beta1/flunder.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1beta1/flunder.go index 368b14e1b7d..4bba41dd5a8 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1beta1/flunder.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/informers/externalversions/wardle/v1beta1/flunder.go @@ -19,6 +19,7 @@ limitations under the License. package v1beta1 import ( + "context" time "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredFlunderInformer(client versioned.Interface, namespace string, re if tweakListOptions != nil { tweakListOptions(&options) } - return client.WardleV1beta1().Flunders(namespace).List(options) + return client.WardleV1beta1().Flunders(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.WardleV1beta1().Flunders(namespace).Watch(options) + return client.WardleV1beta1().Flunders(namespace).Watch(context.TODO(), options) }, }, &wardlev1beta1.Flunder{}, diff --git a/staging/src/k8s.io/sample-controller/controller.go b/staging/src/k8s.io/sample-controller/controller.go index b3447505b57..4b8d29f0209 100644 --- a/staging/src/k8s.io/sample-controller/controller.go +++ b/staging/src/k8s.io/sample-controller/controller.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "context" "fmt" "time" @@ -273,7 +274,7 @@ func (c *Controller) syncHandler(key string) error { deployment, err := c.deploymentsLister.Deployments(foo.Namespace).Get(deploymentName) // If the resource doesn't exist, we'll create it if errors.IsNotFound(err) { - deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Create(newDeployment(foo)) + deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Create(context.TODO(), newDeployment(foo)) } // If an error occurs during Get/Create, we'll requeue the item so we can @@ -296,7 +297,7 @@ func (c *Controller) syncHandler(key string) error { // should update the Deployment resource. if foo.Spec.Replicas != nil && *foo.Spec.Replicas != *deployment.Spec.Replicas { klog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas) - deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(newDeployment(foo)) + deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(context.TODO(), newDeployment(foo)) } // If an error occurs during Update, we'll requeue the item so we can @@ -327,7 +328,7 @@ func (c *Controller) updateFooStatus(foo *samplev1alpha1.Foo, deployment *appsv1 // we must use Update instead of UpdateStatus to update the Status block of the Foo resource. // UpdateStatus will not allow changes to the Spec of the resource, // which is ideal for ensuring nothing other than resource status has been updated. - _, err := c.sampleclientset.SamplecontrollerV1alpha1().Foos(foo.Namespace).Update(fooCopy) + _, err := c.sampleclientset.SamplecontrollerV1alpha1().Foos(foo.Namespace).Update(context.TODO(), fooCopy) return err } diff --git a/staging/src/k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller/v1alpha1/foo.go b/staging/src/k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller/v1alpha1/foo.go index 21fda895bd5..44f3487cf21 100644 --- a/staging/src/k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller/v1alpha1/foo.go +++ b/staging/src/k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller/v1alpha1/foo.go @@ -19,6 +19,7 @@ limitations under the License. package v1alpha1 import ( + "context" time "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,13 +62,13 @@ func NewFilteredFooInformer(client versioned.Interface, namespace string, resync if tweakListOptions != nil { tweakListOptions(&options) } - return client.SamplecontrollerV1alpha1().Foos(namespace).List(options) + return client.SamplecontrollerV1alpha1().Foos(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.SamplecontrollerV1alpha1().Foos(namespace).Watch(options) + return client.SamplecontrollerV1alpha1().Foos(namespace).Watch(context.TODO(), options) }, }, &samplecontrollerv1alpha1.Foo{}, diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index 9916c27ae81..d200bcaeb94 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -102,16 +102,16 @@ var _ = SIGDescribe("Aggregator", func() { func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientset, namespace string) { // delete the APIService first to avoid causing discovery errors - _ = aggrclient.ApiregistrationV1().APIServices().Delete("v1alpha1.wardle.example.com", nil) + _ = aggrclient.ApiregistrationV1().APIServices().Delete(context.TODO(), "v1alpha1.wardle.example.com", nil) - _ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver-deployment", nil) - _ = client.CoreV1().Secrets(namespace).Delete("sample-apiserver-secret", nil) - _ = client.CoreV1().Services(namespace).Delete("sample-api", nil) - _ = client.CoreV1().ServiceAccounts(namespace).Delete("sample-apiserver", nil) - _ = client.RbacV1().RoleBindings("kube-system").Delete("wardler-auth-reader", nil) - _ = client.RbacV1().ClusterRoleBindings().Delete("wardler:"+namespace+":auth-delegator", nil) - _ = client.RbacV1().ClusterRoles().Delete("sample-apiserver-reader", nil) - _ = client.RbacV1().ClusterRoleBindings().Delete("wardler:"+namespace+":sample-apiserver-reader", nil) + _ = client.AppsV1().Deployments(namespace).Delete(context.TODO(), "sample-apiserver-deployment", nil) + _ = client.CoreV1().Secrets(namespace).Delete(context.TODO(), "sample-apiserver-secret", nil) + _ = client.CoreV1().Services(namespace).Delete(context.TODO(), "sample-api", nil) + _ = client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), "sample-apiserver", nil) + _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), "wardler-auth-reader", nil) + _ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":auth-delegator", nil) + _ = client.RbacV1().ClusterRoles().Delete(context.TODO(), "sample-apiserver-reader", nil) + _ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":sample-apiserver-reader", nil) } // TestSampleAPIServer is a basic test if the sample-apiserver code from 1.10 and compiled against 1.10 @@ -139,12 +139,12 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl "tls.key": certCtx.key, }, } - _, err := client.CoreV1().Secrets(namespace).Create(secret) + _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret) framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace) // kubectl create -f clusterrole.yaml - _, err = client.RbacV1().ClusterRoles().Create(&rbacv1.ClusterRole{ - // role for listing ValidatingWebhookConfiguration/MutatingWebhookConfiguration/Namespaces + _, err = client.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver-reader"}, Rules: []rbacv1.PolicyRule{ rbacv1helpers.NewRule("get", "list", "watch").Groups("").Resources("namespaces").RuleOrDie(), @@ -153,7 +153,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl }) framework.ExpectNoError(err, "creating cluster role %s", "sample-apiserver-reader") - _, err = client.RbacV1().ClusterRoleBindings().Create(&rbacv1.ClusterRoleBinding{ + _, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "wardler:" + namespace + ":sample-apiserver-reader", }, @@ -174,7 +174,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":sample-apiserver-reader") // kubectl create -f authDelegator.yaml - _, err = client.RbacV1().ClusterRoleBindings().Create(&rbacv1.ClusterRoleBinding{ + _, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "wardler:" + namespace + ":auth-delegator", }, @@ -272,7 +272,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl }, }, } - deployment, err := client.AppsV1().Deployments(namespace).Create(d) + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace) @@ -298,16 +298,16 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl }, }, } - _, err = client.CoreV1().Services(namespace).Create(service) + _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service) framework.ExpectNoError(err, "creating service %s in namespace %s", "sample-apiserver", namespace) // kubectl create -f serviceAccount.yaml sa := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver"}} - _, err = client.CoreV1().ServiceAccounts(namespace).Create(sa) + _, err = client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), sa) framework.ExpectNoError(err, "creating service account %s in namespace %s", "sample-apiserver", namespace) // kubectl create -f auth-reader.yaml - _, err = client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{ + _, err = client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "wardler-auth-reader", Annotations: map[string]string{ @@ -322,7 +322,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: "default", // "sample-apiserver", + Name: "default", Namespace: namespace, }, }, @@ -337,7 +337,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace) // kubectl create -f apiservice.yaml - _, err = aggrclient.ApiregistrationV1().APIServices().Create(&apiregistrationv1.APIService{ + _, err = aggrclient.ApiregistrationV1().APIServices().Create(context.TODO(), &apiregistrationv1.APIService{ ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.wardle.example.com"}, Spec: apiregistrationv1.APIServiceSpec{ Service: &apiregistrationv1.ServiceReference{ @@ -361,8 +361,8 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl err = pollTimed(100*time.Millisecond, 60*time.Second, func() (bool, error) { - currentAPIService, _ = aggrclient.ApiregistrationV1().APIServices().Get("v1alpha1.wardle.example.com", metav1.GetOptions{}) - currentPods, _ = client.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + currentAPIService, _ = aggrclient.ApiregistrationV1().APIServices().Get(context.TODO(), "v1alpha1.wardle.example.com", metav1.GetOptions{}) + currentPods, _ = client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) request := restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders") request.SetHeader("Accept", "application/json") @@ -421,7 +421,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl framework.ExpectEqual(u.GetKind(), "Flunder") framework.ExpectEqual(u.GetName(), flunderName) - pods, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + pods, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "getting pods for flunders service") // kubectl get flunders -v 9 @@ -519,11 +519,11 @@ func validateErrorWithDebugInfo(f *framework.Framework, err error, pods *v1.PodL msg := fmt.Sprintf(msg, fields...) msg += fmt.Sprintf(" but received unexpected error:\n%v", err) client := f.ClientSet - ep, err := client.CoreV1().Endpoints(namespace).Get("sample-api", metav1.GetOptions{}) + ep, err := client.CoreV1().Endpoints(namespace).Get(context.TODO(), "sample-api", metav1.GetOptions{}) if err == nil { msg += fmt.Sprintf("\nFound endpoints for sample-api:\n%v", ep) } - pds, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + pds, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) if err == nil { msg += fmt.Sprintf("\nFound pods in %s:\n%v", namespace, pds) msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods) diff --git a/test/e2e/apimachinery/chunking.go b/test/e2e/apimachinery/chunking.go index 3e0b87a74ff..62f07a524fd 100644 --- a/test/e2e/apimachinery/chunking.go +++ b/test/e2e/apimachinery/chunking.go @@ -53,7 +53,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { ginkgo.By("creating a large number of resources") workqueue.ParallelizeUntil(context.TODO(), 20, numberOfTotalResources, func(i int) { for tries := 3; tries >= 0; tries-- { - _, err := client.Create(&v1.PodTemplate{ + _, err := client.Create(context.TODO(), &v1.PodTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("template-%04d", i), }, @@ -85,7 +85,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { var lastRV string for { opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1) - list, err := client.List(opts) + list, err := client.List(context.TODO(), opts) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue) gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit)) @@ -116,7 +116,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { ginkgo.By("retrieving those results all at once") opts := metav1.ListOptions{Limit: numberOfTotalResources + 1} - list, err := client.List(opts) + list, err := client.List(context.TODO(), opts) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources)) }) @@ -130,7 +130,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { oneTenth := int64(numberOfTotalResources / 10) opts := metav1.ListOptions{} opts.Limit = oneTenth - list, err := client.List(opts) + list, err := client.List(context.TODO(), opts) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) firstToken := list.Continue firstRV := list.ResourceVersion @@ -148,7 +148,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { opts.Continue = firstToken var inconsistentToken string wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) { - _, err := client.List(opts) + _, err := client.List(context.TODO(), opts) if err == nil { framework.Logf("Token %s has not expired yet", firstToken) return false, nil @@ -171,7 +171,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { ginkgo.By("retrieving the second page again with the token received with the error message") opts.Continue = inconsistentToken - list, err = client.List(opts) + list, err = client.List(context.TODO(), opts) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit) framework.ExpectNotEqual(list.ResourceVersion, firstRV) gomega.Expect(len(list.Items)).To(gomega.BeNumerically("==", opts.Limit)) @@ -194,7 +194,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { opts.Continue = list.Continue lastRV := list.ResourceVersion for { - list, err := client.List(opts) + list, err := client.List(context.TODO(), opts) framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit) if shouldCheckRemainingItem() { if list.GetContinue() == "" { diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index 6fbac4a9d09..ad8a4fdf1f7 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -17,6 +17,7 @@ limitations under the License. package apimachinery import ( + "context" "fmt" "time" @@ -208,17 +209,17 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]", }) func cleanCRDWebhookTest(client clientset.Interface, namespaceName string) { - _ = client.CoreV1().Services(namespaceName).Delete(serviceCRDName, nil) - _ = client.AppsV1().Deployments(namespaceName).Delete(deploymentCRDName, nil) - _ = client.CoreV1().Secrets(namespaceName).Delete(secretCRDName, nil) - _ = client.RbacV1().RoleBindings("kube-system").Delete(roleBindingCRDName, nil) + _ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), serviceCRDName, nil) + _ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentCRDName, nil) + _ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), secretCRDName, nil) + _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingCRDName, nil) } func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespace string) { ginkgo.By("Create role binding to let cr conversion webhook read extension-apiserver-authentication") client := f.ClientSet // Create the role binding to allow the webhook read the extension-apiserver-authentication configmap - _, err := client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{ + _, err := client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: roleBindingCRDName, }, @@ -227,7 +228,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa Kind: "Role", Name: "extension-apiserver-authentication-reader", }, - // Webhook uses the default service account. + Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", @@ -259,7 +260,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, }, } namespace := f.Namespace.Name - _, err := client.CoreV1().Secrets(namespace).Create(secret) + _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret) framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace) // Create the deployment of the webhook @@ -335,7 +336,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, }, }, } - deployment, err := client.AppsV1().Deployments(namespace).Create(d) + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace) ginkgo.By("Wait for the deployment to be ready") err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image) @@ -363,7 +364,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, }, }, } - _, err = client.CoreV1().Services(namespace).Create(service) + _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service) framework.ExpectNoError(err, "creating service %s in namespace %s", serviceCRDName, namespace) ginkgo.By("Verifying the service has paired with the endpoint") diff --git a/test/e2e/apimachinery/crd_publish_openapi.go b/test/e2e/apimachinery/crd_publish_openapi.go index d727c861ca2..20058a0d02e 100644 --- a/test/e2e/apimachinery/crd_publish_openapi.go +++ b/test/e2e/apimachinery/crd_publish_openapi.go @@ -17,6 +17,7 @@ limitations under the License. package apimachinery import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -392,7 +393,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu {"op":"test","path":"/spec/versions/1/name","value":"v3"}, {"op": "replace", "path": "/spec/versions/1/name", "value": "v4"} ]`) - crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(crdMultiVer.Crd.Name, types.JSONPatchType, patch) + crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crdMultiVer.Crd.Name, types.JSONPatchType, patch) if err != nil { framework.Failf("%v", err) } @@ -440,12 +441,12 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu } ginkgo.By("mark a version not serverd") - crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(crd.Crd.Name, metav1.GetOptions{}) + crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Crd.Name, metav1.GetOptions{}) if err != nil { framework.Failf("%v", err) } crd.Crd.Spec.Versions[1].Served = false - crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(crd.Crd) + crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), crd.Crd) if err != nil { framework.Failf("%v", err) } diff --git a/test/e2e/apimachinery/custom_resource_definition.go b/test/e2e/apimachinery/custom_resource_definition.go index c209d855ab6..b0609cec8ce 100644 --- a/test/e2e/apimachinery/custom_resource_definition.go +++ b/test/e2e/apimachinery/custom_resource_definition.go @@ -110,7 +110,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin }() selectorListOpts := metav1.ListOptions{LabelSelector: "e2e-list-test-uuid=" + testUUID} - list, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().List(selectorListOpts) + list, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), selectorListOpts) framework.ExpectNoError(err, "listing CustomResourceDefinitions") framework.ExpectEqual(len(list.Items), testListSize) for _, actual := range list.Items { @@ -130,7 +130,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin // Use delete collection to remove the CRDs err = fixtures.DeleteV1CustomResourceDefinitions(selectorListOpts, apiExtensionClient) framework.ExpectNoError(err, "deleting CustomResourceDefinitions") - _, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + _, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "getting remaining CustomResourceDefinition") }) @@ -170,15 +170,14 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin framework.Failf("Expected CustomResourceDefinition Spec to match status sub-resource Spec, but got:\n%s", diff.ObjectReflectDiff(status.Spec, crd.Spec)) } status.Status.Conditions = append(status.Status.Conditions, updateCondition) - updated, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(status) + updated, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(context.TODO(), status) return err }) framework.ExpectNoError(err, "updating CustomResourceDefinition status") expectCondition(updated.Status.Conditions, updateCondition) patchCondition := v1.CustomResourceDefinitionCondition{Message: "patched"} - patched, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch( - crd.GetName(), + patched, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.GetName(), types.JSONPatchType, []byte(`[{"op": "add", "path": "/status/conditions", "value": [{"message": "patched"}]}]`), "status") @@ -305,7 +304,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin framework.ExpectNoError(err, "creating CR") // Setting default for a to "A" and waiting for the CR to get defaulted on read - crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(crd.Name, types.JSONPatchType, []byte(`[ + crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.JSONPatchType, []byte(`[ {"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default", "value": "A"} ]`)) framework.ExpectNoError(err, "setting default for a to \"A\" in schema") @@ -344,7 +343,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin framework.ExpectEqual(v, "A", "\"a\" is defaulted to \"A\"") // Deleting default for a, adding default "B" for b and waiting for the CR to get defaulted on read for b - crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(crd.Name, types.JSONPatchType, []byte(`[ + crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.JSONPatchType, []byte(`[ {"op":"remove","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default"}, {"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/b/default", "value": "B"} ]`)) diff --git a/test/e2e/apimachinery/etcd_failure.go b/test/e2e/apimachinery/etcd_failure.go index f6b21dc2be2..5d2875038dc 100644 --- a/test/e2e/apimachinery/etcd_failure.go +++ b/test/e2e/apimachinery/etcd_failure.go @@ -17,6 +17,7 @@ limitations under the License. package apimachinery import ( + "context" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -115,7 +116,7 @@ func checkExistingRCRecovers(f *framework.Framework) { ginkgo.By("deleting pods from existing replication controller") framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { options := metav1.ListOptions{LabelSelector: rcSelector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) if err != nil { framework.Logf("apiserver returned error, as expected before recovery: %v", err) return false, nil @@ -124,7 +125,7 @@ func checkExistingRCRecovers(f *framework.Framework) { return false, nil } for _, pod := range pods.Items { - err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name) } framework.Logf("apiserver has recovered") @@ -134,7 +135,7 @@ func checkExistingRCRecovers(f *framework.Framework) { ginkgo.By("waiting for replication controller to recover") framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { options := metav1.ListOptions{LabelSelector: rcSelector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String()) for _, pod := range pods.Items { if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) { diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 4e218f97999..0822287858f 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -17,6 +17,7 @@ limitations under the License. package apimachinery import ( + "context" "encoding/json" "fmt" "sync/atomic" @@ -183,7 +184,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo for object, num := range objects { switch object { case "Pods": - pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list pods: %v", err) } @@ -192,7 +193,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo ginkgo.By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items))) } case "Deployments": - deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(metav1.ListOptions{}) + deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list deployments: %v", err) } @@ -201,7 +202,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo ginkgo.By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items))) } case "ReplicaSets": - rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{}) + rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list rs: %v", err) } @@ -210,7 +211,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo ginkgo.By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items))) } case "ReplicationControllers": - rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(metav1.ListOptions{}) + rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list replication controllers: %v", err) } @@ -219,7 +220,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo ginkgo.By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items))) } case "CronJobs": - cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{}) + cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list cronjobs: %v", err) } @@ -228,7 +229,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo ginkgo.By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items))) } case "Jobs": - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list jobs: %v", err) } @@ -320,13 +321,13 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "delete_pods") rc := newOwnerRC(f, rcName, 2, uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(rc) + rc, err := rcClient.Create(context.TODO(), rc) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } // wait for rc to create some pods if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list pods: %v", err) } @@ -345,7 +346,7 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("delete the rc") deleteOptions := getBackgroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) - if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil { + if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the rc: %v", err) } ginkgo.By("wait for all pods to be garbage collected") @@ -355,7 +356,7 @@ var _ = SIGDescribe("Garbage collector", func() { return verifyRemainingObjects(f, objects) }); err != nil { framework.Failf("failed to wait for all pods to be deleted: %v", err) - remainingPods, err := podClient.List(metav1.ListOptions{}) + remainingPods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Failf("failed to list pods post mortem: %v", err) } else { @@ -378,13 +379,13 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "orphan_pods") rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(rc) + rc, err := rcClient.Create(context.TODO(), rc) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } // wait for rc to create pods if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - rc, err := rcClient.Get(rc.Name, metav1.GetOptions{}) + rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get rc: %v", err) } @@ -399,7 +400,7 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("delete the rc") deleteOptions := getOrphanOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) - if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil { + if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the rc: %v", err) } ginkgo.By("wait for the rc to be deleted") @@ -411,7 +412,7 @@ var _ = SIGDescribe("Garbage collector", func() { // parallel, the GC controller might get distracted by other tests. // According to the test logs, 120s is enough time. if err := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) { - rcs, err := rcClient.List(metav1.ListOptions{}) + rcs, err := rcClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list rcs: %v", err) } @@ -424,7 +425,7 @@ var _ = SIGDescribe("Garbage collector", func() { } ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods") time.Sleep(30 * time.Second) - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Failf("Failed to list pods: %v", err) } @@ -444,13 +445,13 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "orphan_pods_nil_option") rc := newOwnerRC(f, rcName, 2, uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(rc) + rc, err := rcClient.Create(context.TODO(), rc) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } // wait for rc to create some pods if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - rc, err := rcClient.Get(rc.Name, metav1.GetOptions{}) + rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get rc: %v", err) } @@ -464,12 +465,12 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("delete the rc") deleteOptions := &metav1.DeleteOptions{} deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) - if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil { + if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the rc: %v", err) } ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods") time.Sleep(30 * time.Second) - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Failf("Failed to list pods: %v", err) } @@ -492,14 +493,14 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "delete_rs") deployment := newOwnerDeployment(f, deploymentName, uniqLabels) ginkgo.By("create the deployment") - createdDeployment, err := deployClient.Create(deployment) + createdDeployment, err := deployClient.Create(context.TODO(), deployment) if err != nil { framework.Failf("Failed to create deployment: %v", err) } // wait for deployment to create some rs ginkgo.By("Wait for the Deployment to create new ReplicaSet") err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { - rsList, err := rsClient.List(metav1.ListOptions{}) + rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list rs: %v", err) } @@ -513,7 +514,7 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("delete the deployment") deleteOptions := getBackgroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID)) - if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil { + if err := deployClient.Delete(context.TODO(), deployment.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the deployment: %v", err) } ginkgo.By("wait for all rs to be garbage collected") @@ -524,7 +525,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err != nil { errList := make([]error, 0) errList = append(errList, err) - remainingRSs, err := rsClient.List(metav1.ListOptions{}) + remainingRSs, err := rsClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err)) } else { @@ -551,14 +552,14 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "orphan_rs") deployment := newOwnerDeployment(f, deploymentName, uniqLabels) ginkgo.By("create the deployment") - createdDeployment, err := deployClient.Create(deployment) + createdDeployment, err := deployClient.Create(context.TODO(), deployment) if err != nil { framework.Failf("Failed to create deployment: %v", err) } // wait for deployment to create some rs ginkgo.By("Wait for the Deployment to create new ReplicaSet") err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { - rsList, err := rsClient.List(metav1.ListOptions{}) + rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list rs: %v", err) } @@ -572,12 +573,12 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("delete the deployment") deleteOptions := getOrphanOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID)) - if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil { + if err := deployClient.Delete(context.TODO(), deployment.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the deployment: %v", err) } ginkgo.By("wait for deployment deletion to see if the garbage collector mistakenly deletes the rs") err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) { - dList, err := deployClient.List(metav1.ListOptions{}) + dList, err := deployClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list deployments: %v", err) } @@ -594,13 +595,13 @@ var _ = SIGDescribe("Garbage collector", func() { } if !ok { errList := make([]error, 0) - remainingRSs, err := rsClient.List(metav1.ListOptions{}) + remainingRSs, err := rsClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err)) } else { errList = append(errList, fmt.Errorf("remaining rs post mortem: %#v", remainingRSs)) } - remainingDSs, err := deployClient.List(metav1.ListOptions{}) + remainingDSs, err := deployClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %v", err)) } else { @@ -609,7 +610,7 @@ var _ = SIGDescribe("Garbage collector", func() { aggregatedError := utilerrors.NewAggregate(errList) framework.Failf("Failed to verify remaining deployments, rs, and pods: %v", aggregatedError) } - rs, err := clientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{}) + rs, err := clientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Failf("Failed to list ReplicaSet %v", err) } @@ -635,13 +636,13 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabels := getUniqLabel("gctest", "delete_pods_foreground") rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels) ginkgo.By("create the rc") - rc, err := rcClient.Create(rc) + rc, err := rcClient.Create(context.TODO(), rc) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } // wait for rc to create pods if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - rc, err := rcClient.Get(rc.Name, metav1.GetOptions{}) + rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get rc: %v", err) } @@ -655,7 +656,7 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("delete the rc") deleteOptions := getForegroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID)) - if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil { + if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the rc: %v", err) } ginkgo.By("wait for the rc to be deleted") @@ -665,9 +666,9 @@ var _ = SIGDescribe("Garbage collector", func() { // deletion and dependent deletion processing. For now, increase the timeout // and investigate the processing delay. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { - _, err := rcClient.Get(rc.Name, metav1.GetOptions{}) + _, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err == nil { - pods, _ := podClient.List(metav1.ListOptions{}) + pods, _ := podClient.List(context.TODO(), metav1.ListOptions{}) framework.Logf("%d pods remaining", len(pods.Items)) count := 0 for _, pod := range pods.Items { @@ -684,7 +685,7 @@ var _ = SIGDescribe("Garbage collector", func() { } return false, err }); err != nil { - pods, err2 := podClient.List(metav1.ListOptions{}) + pods, err2 := podClient.List(context.TODO(), metav1.ListOptions{}) if err2 != nil { framework.Failf("%v", err2) } @@ -696,7 +697,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("failed to delete the rc: %v", err) } // There shouldn't be any pods - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Failf("%v", err) } @@ -722,7 +723,7 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabelsDeleted := getUniqLabel("gctest_d", "valid_and_pending_owners_d") rc1 := newOwnerRC(f, rc1Name, replicas, uniqLabelsDeleted) ginkgo.By("create the rc1") - rc1, err := rcClient.Create(rc1) + rc1, err := rcClient.Create(context.TODO(), rc1) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } @@ -730,13 +731,13 @@ var _ = SIGDescribe("Garbage collector", func() { uniqLabelsStay := getUniqLabel("gctest_s", "valid_and_pending_owners_s") rc2 := newOwnerRC(f, rc2Name, 0, uniqLabelsStay) ginkgo.By("create the rc2") - rc2, err = rcClient.Create(rc2) + rc2, err = rcClient.Create(context.TODO(), rc2) if err != nil { framework.Failf("Failed to create replication controller: %v", err) } // wait for rc1 to be stable if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { - rc1, err := rcClient.Get(rc1.Name, metav1.GetOptions{}) + rc1, err := rcClient.Get(context.TODO(), rc1.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get rc: %v", err) } @@ -748,28 +749,28 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err) } ginkgo.By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name)) - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list pods in namespace: %s", f.Namespace.Name) patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID) for i := 0; i < halfReplicas; i++ { pod := pods.Items[i] - _, err := podClient.Patch(pod.Name, types.StrategicMergePatchType, []byte(patch)) + _, err := podClient.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, []byte(patch)) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch) } ginkgo.By(fmt.Sprintf("delete the rc %s", rc1Name)) deleteOptions := getForegroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc1.UID)) - if err := rcClient.Delete(rc1.ObjectMeta.Name, deleteOptions); err != nil { + if err := rcClient.Delete(context.TODO(), rc1.ObjectMeta.Name, deleteOptions); err != nil { framework.Failf("failed to delete the rc: %v", err) } ginkgo.By("wait for the rc to be deleted") // TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient. // Tracked at https://github.com/kubernetes/kubernetes/issues/50046. if err := wait.Poll(5*time.Second, 90*time.Second, func() (bool, error) { - _, err := rcClient.Get(rc1.Name, metav1.GetOptions{}) + _, err := rcClient.Get(context.TODO(), rc1.Name, metav1.GetOptions{}) if err == nil { - pods, _ := podClient.List(metav1.ListOptions{}) + pods, _ := podClient.List(context.TODO(), metav1.ListOptions{}) framework.Logf("%d pods remaining", len(pods.Items)) count := 0 for _, pod := range pods.Items { @@ -786,7 +787,7 @@ var _ = SIGDescribe("Garbage collector", func() { } return false, err }); err != nil { - pods, err2 := podClient.List(metav1.ListOptions{}) + pods, err2 := podClient.List(context.TODO(), metav1.ListOptions{}) if err2 != nil { framework.Failf("%v", err2) } @@ -798,7 +799,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("failed to delete rc %s, err: %v", rc1Name, err) } // half of the pods should still exist, - pods, err = podClient.List(metav1.ListOptions{}) + pods, err = podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Failf("%v", err) } @@ -828,43 +829,43 @@ var _ = SIGDescribe("Garbage collector", func() { podClient := clientSet.CoreV1().Pods(f.Namespace.Name) pod1Name := "pod1" pod1 := newGCPod(pod1Name) - pod1, err := podClient.Create(pod1) + pod1, err := podClient.Create(context.TODO(), pod1) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name) pod2Name := "pod2" pod2 := newGCPod(pod2Name) - pod2, err = podClient.Create(pod2) + pod2, err = podClient.Create(context.TODO(), pod2) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name) pod3Name := "pod3" pod3 := newGCPod(pod3Name) - pod3, err = podClient.Create(pod3) + pod3, err = podClient.Create(context.TODO(), pod3) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name) // create circular dependency addRefPatch := func(name string, uid types.UID) []byte { return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid)) } patch1 := addRefPatch(pod3.Name, pod3.UID) - pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1) + pod1, err = podClient.Patch(context.TODO(), pod1.Name, types.StrategicMergePatchType, patch1) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1) framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences) patch2 := addRefPatch(pod1.Name, pod1.UID) - pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2) + pod2, err = podClient.Patch(context.TODO(), pod2.Name, types.StrategicMergePatchType, patch2) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2) framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences) patch3 := addRefPatch(pod2.Name, pod2.UID) - pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3) + pod3, err = podClient.Patch(context.TODO(), pod3.Name, types.StrategicMergePatchType, patch3) framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3) framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences) // delete one pod, should result in the deletion of all pods deleteOptions := getForegroundOptions() deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID)) - err = podClient.Delete(pod1.ObjectMeta.Name, deleteOptions) + err = podClient.Delete(context.TODO(), pod1.ObjectMeta.Name, deleteOptions) framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name) var pods *v1.PodList var err2 error // TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient. // Tracked at https://github.com/kubernetes/kubernetes/issues/50046. if err := wait.Poll(5*time.Second, 90*time.Second, func() (bool, error) { - pods, err2 = podClient.List(metav1.ListOptions{}) + pods, err2 = podClient.List(context.TODO(), metav1.ListOptions{}) if err2 != nil { return false, fmt.Errorf("failed to list pods: %v", err) } @@ -1124,12 +1125,12 @@ var _ = SIGDescribe("Garbage collector", func() { ginkgo.By("Create the cronjob") cronJob := newCronJob("simple", "*/1 * * * ?") - cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(cronJob) + cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(context.TODO(), cronJob) framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name) ginkgo.By("Wait for the CronJob to create new Job") err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) { - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list jobs: %v", err) } @@ -1140,7 +1141,7 @@ var _ = SIGDescribe("Garbage collector", func() { } ginkgo.By("Delete the cronjob") - if err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Delete(cronJob.Name, getBackgroundOptions()); err != nil { + if err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Delete(context.TODO(), cronJob.Name, getBackgroundOptions()); err != nil { framework.Failf("Failed to delete the CronJob: %v", err) } ginkgo.By("Verify if cronjob does not leave jobs nor pods behind") diff --git a/test/e2e/apimachinery/generated_clientset.go b/test/e2e/apimachinery/generated_clientset.go index d5f9ed21807..8b248be0396 100644 --- a/test/e2e/apimachinery/generated_clientset.go +++ b/test/e2e/apimachinery/generated_clientset.go @@ -17,6 +17,7 @@ limitations under the License. package apimachinery import ( + "context" "strconv" "time" @@ -110,7 +111,7 @@ var _ = SIGDescribe("Generated clientset", func() { ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String() options := metav1.ListOptions{LabelSelector: selector} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) if err != nil { framework.Failf("Failed to query for pods: %v", err) } @@ -119,13 +120,13 @@ var _ = SIGDescribe("Generated clientset", func() { LabelSelector: selector, ResourceVersion: pods.ListMeta.ResourceVersion, } - w, err := podClient.Watch(options) + w, err := podClient.Watch(context.TODO(), options) if err != nil { framework.Failf("Failed to set up watch: %v", err) } ginkgo.By("creating the pod") - pod, err = podClient.Create(pod) + pod, err = podClient.Create(context.TODO(), pod) if err != nil { framework.Failf("Failed to create pod: %v", err) } @@ -135,7 +136,7 @@ var _ = SIGDescribe("Generated clientset", func() { LabelSelector: selector, ResourceVersion: pod.ResourceVersion, } - pods, err = podClient.List(options) + pods, err = podClient.List(context.TODO(), options) if err != nil { framework.Failf("Failed to query for pods: %v", err) } @@ -150,7 +151,7 @@ var _ = SIGDescribe("Generated clientset", func() { ginkgo.By("deleting the pod gracefully") gracePeriod := int64(31) - if err := podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil { + if err := podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil { framework.Failf("Failed to delete pod: %v", err) } @@ -226,7 +227,7 @@ var _ = SIGDescribe("Generated clientset", func() { ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String() options := metav1.ListOptions{LabelSelector: selector} - cronJobs, err := cronJobClient.List(options) + cronJobs, err := cronJobClient.List(context.TODO(), options) if err != nil { framework.Failf("Failed to query for cronJobs: %v", err) } @@ -235,13 +236,13 @@ var _ = SIGDescribe("Generated clientset", func() { LabelSelector: selector, ResourceVersion: cronJobs.ListMeta.ResourceVersion, } - w, err := cronJobClient.Watch(options) + w, err := cronJobClient.Watch(context.TODO(), options) if err != nil { framework.Failf("Failed to set up watch: %v", err) } ginkgo.By("creating the cronJob") - cronJob, err = cronJobClient.Create(cronJob) + cronJob, err = cronJobClient.Create(context.TODO(), cronJob) if err != nil { framework.Failf("Failed to create cronJob: %v", err) } @@ -251,7 +252,7 @@ var _ = SIGDescribe("Generated clientset", func() { LabelSelector: selector, ResourceVersion: cronJob.ResourceVersion, } - cronJobs, err = cronJobClient.List(options) + cronJobs, err = cronJobClient.List(context.TODO(), options) if err != nil { framework.Failf("Failed to query for cronJobs: %v", err) } @@ -263,12 +264,12 @@ var _ = SIGDescribe("Generated clientset", func() { ginkgo.By("deleting the cronJob") // Use DeletePropagationBackground so the CronJob is really gone when the call returns. propagationPolicy := metav1.DeletePropagationBackground - if err := cronJobClient.Delete(cronJob.Name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { + if err := cronJobClient.Delete(context.TODO(), cronJob.Name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { framework.Failf("Failed to delete cronJob: %v", err) } options = metav1.ListOptions{LabelSelector: selector} - cronJobs, err = cronJobClient.List(options) + cronJobs, err = cronJobClient.List(context.TODO(), options) if err != nil { framework.Failf("Failed to list cronJobs to verify deletion: %v", err) } diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index b0068b68a99..e99f4461032 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -17,6 +17,7 @@ limitations under the License. package apimachinery import ( + "context" "encoding/json" "fmt" "strings" @@ -67,7 +68,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, func() (bool, error) { var cnt = 0 - nsList, err := f.ClientSet.CoreV1().Namespaces().List(metav1.ListOptions{}) + nsList, err := f.ClientSet.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -109,21 +110,21 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }, }, } - pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, namespace.Name) ginkgo.By("Waiting for the pod to have running status") framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) ginkgo.By("Deleting the namespace") - err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil) + err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, nil) framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name) ginkgo.By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { - _, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespace.Name, metav1.GetOptions{}) if err != nil && apierrors.IsNotFound(err) { return true, nil } @@ -135,7 +136,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) ginkgo.By("Verifying there are no pods in the namespace") - _, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectError(err, "failed to get pod %s in namespace: %s", pod.Name, namespace.Name) } @@ -169,18 +170,18 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }}, }, } - service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service) + service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(context.TODO(), service) framework.ExpectNoError(err, "failed to create service %s in namespace %s", serviceName, namespace.Name) ginkgo.By("Deleting the namespace") - err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil) + err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, nil) framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name) ginkgo.By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { - _, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespace.Name, metav1.GetOptions{}) if err != nil && apierrors.IsNotFound(err) { return true, nil } @@ -192,7 +193,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) ginkgo.By("Verifying there is no service in the namespace") - _, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(context.TODO(), service.Name, metav1.GetOptions{}) framework.ExpectError(err, "failed to get service %s in namespace: %s", service.Name, namespace.Name) } @@ -270,11 +271,11 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { }, }) framework.ExpectNoError(err, "failed to marshal JSON patch data") - _, err = f.ClientSet.CoreV1().Namespaces().Patch(namespaceName, types.StrategicMergePatchType, []byte(nspatch)) + _, err = f.ClientSet.CoreV1().Namespaces().Patch(context.TODO(), namespaceName, types.StrategicMergePatchType, []byte(nspatch)) framework.ExpectNoError(err, "failed to patch Namespace") ginkgo.By("get the Namespace and ensuring it has the label") - namespace, err := f.ClientSet.CoreV1().Namespaces().Get(namespaceName, metav1.GetOptions{}) + namespace, err := f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get Namespace") framework.ExpectEqual(namespace.ObjectMeta.Labels["testLabel"], "testValue", "namespace not patched") }) diff --git a/test/e2e/apimachinery/protocol.go b/test/e2e/apimachinery/protocol.go index a2c001e62a2..eb0d48ca64b 100644 --- a/test/e2e/apimachinery/protocol.go +++ b/test/e2e/apimachinery/protocol.go @@ -17,6 +17,7 @@ limitations under the License. package apimachinery import ( + "context" "fmt" "strconv" @@ -49,11 +50,11 @@ var _ = SIGDescribe("client-go should negotiate", func() { cfg.AcceptContentTypes = accept c := kubernetes.NewForConfigOrDie(cfg) - svcs, err := c.CoreV1().Services("default").Get("kubernetes", metav1.GetOptions{}) + svcs, err := c.CoreV1().Services("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{}) framework.ExpectNoError(err) rv, err := strconv.Atoi(svcs.ResourceVersion) framework.ExpectNoError(err) - w, err := c.CoreV1().Services("default").Watch(metav1.ListOptions{ResourceVersion: strconv.Itoa(rv - 1)}) + w, err := c.CoreV1().Services("default").Watch(context.TODO(), metav1.ListOptions{ResourceVersion: strconv.Itoa(rv - 1)}) framework.ExpectNoError(err) defer w.Stop() diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index 95baf48d30b..26857f29cfb 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -17,6 +17,7 @@ limitations under the License. package apimachinery import ( + "context" "fmt" "strconv" "time" @@ -100,7 +101,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a Service") service := newTestServiceForQuota("test-service", v1.ServiceTypeClusterIP) - service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(service) + service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), service) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures service creation") @@ -111,7 +112,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a Service") - err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(service.Name, nil) + err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), service.Name, nil) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -133,7 +134,7 @@ var _ = SIGDescribe("ResourceQuota", func() { // On contended servers the service account controller can slow down, leading to the count changing during a run. // Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely. wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { - secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(metav1.ListOptions{}) + secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) if len(secrets.Items) == found { // loop until the number of secrets has stabilized for 5 seconds @@ -167,7 +168,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a Secret") secret := newTestSecretForQuota("test-secret") - secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) + secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures secret creation") @@ -179,7 +180,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a secret") - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, nil) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -224,7 +225,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") pod := newTestPodForQuota(f, podName, requests, limits) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) podToUpdate := pod @@ -243,7 +244,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceCPU] = resource.MustParse("600m") requests[v1.ResourceMemory] = resource.MustParse("100Mi") pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectError(err) ginkgo.By("Not allowing a pod to be created that exceeds remaining quota(validation on extended resources)") @@ -255,7 +256,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2") pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectError(err) ginkgo.By("Ensuring a pod cannot update its resource requirements") @@ -265,7 +266,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceMemory] = resource.MustParse("100Mi") requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi") podToUpdate.Spec.Containers[0].Resources.Requests = requests - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(podToUpdate) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), podToUpdate) framework.ExpectError(err) ginkgo.By("Ensuring attempts to update pod resource requirements did not change quota usage") @@ -273,7 +274,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -298,7 +299,7 @@ var _ = SIGDescribe("ResourceQuota", func() { // On contended servers the service account controller can slow down, leading to the count changing during a run. // Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely. wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { - configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(metav1.ListOptions{}) + configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) if len(configmaps.Items) == found { // loop until the number of configmaps has stabilized for 5 seconds @@ -331,7 +332,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a ConfigMap") configMap := newTestConfigMapForQuota("test-configmap") - configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap) + configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures configMap creation") @@ -345,7 +346,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a ConfigMap") - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, nil) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -381,7 +382,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a ReplicationController") replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0) - replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(replicationController) + replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), replicationController) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures replication controller creation") @@ -396,7 +397,7 @@ var _ = SIGDescribe("ResourceQuota", func() { // detached. ReplicationControllers default to "orphan", which // is different from most resources. (Why? To preserve a common // workflow from prior to the GC's introduction.) - err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(replicationController.Name, &metav1.DeleteOptions{ + err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(context.TODO(), replicationController.Name, &metav1.DeleteOptions{ PropagationPolicy: func() *metav1.DeletionPropagation { p := metav1.DeletePropagationBackground return &p @@ -437,7 +438,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a ReplicaSet") replicaSet := newTestReplicaSetForQuota("test-rs", "nginx", 0) - replicaSet, err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(replicaSet) + replicaSet, err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), replicaSet) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures replicaset creation") @@ -447,7 +448,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a ReplicaSet") - err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Delete(replicaSet.Name, nil) + err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Delete(context.TODO(), replicaSet.Name, nil) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -485,7 +486,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a PersistentVolumeClaim") pvc := newTestPersistentVolumeClaimForQuota("test-claim") - pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(pvc) + pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures persistent volume claim creation") @@ -496,7 +497,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a PersistentVolumeClaim") - err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil) + err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, nil) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -539,7 +540,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a PersistentVolumeClaim with storage class") pvc := newTestPersistentVolumeClaimForQuota("test-claim") pvc.Spec.StorageClassName = &classGold - pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(pvc) + pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status captures persistent volume claim creation") @@ -553,7 +554,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting a PersistentVolumeClaim") - err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil) + err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, nil) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released usage") @@ -587,7 +588,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) err = updateResourceQuotaUntilUsageAppears(f.ClientSet, f.Namespace.Name, quotaName, v1.ResourceName(countResourceName)) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Delete(quotaName, nil) + err = f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Delete(context.TODO(), quotaName, nil) framework.ExpectNoError(err) ginkgo.By("Counting existing ResourceQuota") @@ -689,7 +690,7 @@ var _ = SIGDescribe("ResourceQuota", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod := newTestPodForQuota(f, podName, requests, limits) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage") @@ -711,7 +712,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -728,7 +729,7 @@ var _ = SIGDescribe("ResourceQuota", func() { pod = newTestPodForQuota(f, podName, requests, limits) activeDeadlineSeconds := int64(3600) pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage") @@ -750,7 +751,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -793,7 +794,7 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Creating a best-effort pod") pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage") @@ -807,7 +808,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -823,7 +824,7 @@ var _ = SIGDescribe("ResourceQuota", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod = newTestPodForQuota(f, "burstable-pod", requests, limits) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage") @@ -837,7 +838,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -871,7 +872,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Getting a ResourceQuota") - resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("1")) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("500Mi")) @@ -879,13 +880,13 @@ var _ = SIGDescribe("ResourceQuota", func() { ginkgo.By("Updating a ResourceQuota") resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("2") resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("1Gi") - resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(resourceQuota) + resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota) framework.ExpectNoError(err) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("2")) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("1Gi")) ginkgo.By("Verifying a ResourceQuota was modified") - resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("2")) framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("1Gi")) @@ -895,7 +896,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err) ginkgo.By("Verifying the deleted ResourceQuota") - _, err = client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + _, err = client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) framework.ExpectEqual(apierrors.IsNotFound(err), true) }) }) @@ -923,7 +924,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { ginkgo.By("Creating a best-effort pod") pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage") @@ -937,7 +938,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -953,7 +954,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod = newTestPodForQuota(f, "burstable-pod", requests, limits) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage") @@ -967,7 +968,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1005,7 +1006,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod := newTestPodForQuota(f, podName, requests, limits) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage") @@ -1027,7 +1028,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1044,7 +1045,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { pod = newTestPodForQuota(f, podName, requests, limits) activeDeadlineSeconds := int64(3600) pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage") @@ -1066,7 +1067,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1085,7 +1086,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1104,7 +1105,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a pod with priority class") podName := "testpod-pclass1" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass1") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage") @@ -1113,7 +1114,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1124,7 +1125,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1143,7 +1144,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating first pod with priority class should pass") podName := "testpod-pclass2-1" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass2") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage") @@ -1154,11 +1155,11 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating 2nd pod with priority class should fail") podName2 := "testpod-pclass2-2" pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2") - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2) framework.ExpectError(err) ginkgo.By("Deleting first pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1169,7 +1170,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1188,7 +1189,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a pod with priority class with pclass3") podName := "testpod-pclass3-1" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass3") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope remains same") @@ -1199,7 +1200,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a 2nd pod with priority class pclass3") podName2 := "testpod-pclass2-2" pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass3") - pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2) + pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope remains same") @@ -1208,17 +1209,17 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting both pods") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod2.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) }) ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) - _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)}) + _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1237,7 +1238,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a pod with priority class pclass5") podName := "testpod-pclass5" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass5") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage") @@ -1248,7 +1249,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating 2nd pod with priority class pclass6") podName2 := "testpod-pclass6" pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass6") - pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2) + pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope is updated with the pod usage") @@ -1257,9 +1258,9 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting both pods") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod2.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1270,7 +1271,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1289,7 +1290,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a pod with priority class pclass7") podName := "testpod-pclass7" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass7") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class is not used") @@ -1298,13 +1299,13 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) }) ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1323,7 +1324,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.By("Creating a pod with priority class pclass8") podName := "testpod-pclass8" pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass8") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage") @@ -1332,7 +1333,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1343,7 +1344,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) hard := v1.ResourceList{} @@ -1377,7 +1378,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { limit[v1.ResourceMemory] = resource.MustParse("2Gi") pod := newTestPodForQuotaWithPriority(f, podName, request, limit, "pclass9") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage") @@ -1390,7 +1391,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { framework.ExpectNoError(err) ginkgo.By("Deleting the pod") - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Ensuring resource quota status released the pod usage") @@ -1656,12 +1657,12 @@ func newTestSecretForQuota(name string) *v1.Secret { // createResourceQuota in the specified namespace func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) { - return c.CoreV1().ResourceQuotas(namespace).Create(resourceQuota) + return c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), resourceQuota) } // deleteResourceQuota with the specified name func deleteResourceQuota(c clientset.Interface, namespace, name string) error { - return c.CoreV1().ResourceQuotas(namespace).Delete(name, nil) + return c.CoreV1().ResourceQuotas(namespace).Delete(context.TODO(), name, nil) } // countResourceQuota counts the number of ResourceQuota in the specified namespace @@ -1670,7 +1671,7 @@ func deleteResourceQuota(c clientset.Interface, namespace, name string) error { func countResourceQuota(c clientset.Interface, namespace string) (int, error) { found, unchanged := 0, 0 return found, wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { - resourceQuotas, err := c.CoreV1().ResourceQuotas(namespace).List(metav1.ListOptions{}) + resourceQuotas, err := c.CoreV1().ResourceQuotas(namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) if len(resourceQuotas.Items) == found { // loop until the number of resource quotas has stabilized for 5 seconds @@ -1686,7 +1687,7 @@ func countResourceQuota(c clientset.Interface, namespace string) (int, error) { // wait for resource quota status to show the expected used resources value func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.ResourceList) error { return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) { - resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1709,7 +1710,7 @@ func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.R // for the specific resource name. func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName string, resourceName v1.ResourceName) error { return wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) { - resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1722,7 +1723,7 @@ func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName s current := resourceQuota.Spec.Hard[resourceName] current.Add(resource.MustParse("1")) resourceQuota.Spec.Hard[resourceName] = current - _, err = c.CoreV1().ResourceQuotas(ns).Update(resourceQuota) + _, err = c.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota) // ignoring conflicts since someone else may already updated it. if apierrors.IsConflict(err) { return false, nil diff --git a/test/e2e/apimachinery/table_conversion.go b/test/e2e/apimachinery/table_conversion.go index 4a3d0a9a1a5..0ac564ef60c 100644 --- a/test/e2e/apimachinery/table_conversion.go +++ b/test/e2e/apimachinery/table_conversion.go @@ -55,7 +55,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { podName := "pod-1" framework.Logf("Creating pod %s", podName) - _, err := c.CoreV1().Pods(ns).Create(newTablePod(podName)) + _, err := c.CoreV1().Pods(ns).Create(context.TODO(), newTablePod(podName)) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, ns) table := &metav1beta1.Table{} @@ -83,7 +83,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { ginkgo.By("creating a large number of resources") workqueue.ParallelizeUntil(context.TODO(), 5, 20, func(i int) { for tries := 3; tries >= 0; tries-- { - _, err := client.Create(&v1.PodTemplate{ + _, err := client.Create(context.TODO(), &v1.PodTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("template-%04d", i), }, diff --git a/test/e2e/apimachinery/watch.go b/test/e2e/apimachinery/watch.go index 18a4c18a9e2..b51c8d34cd1 100644 --- a/test/e2e/apimachinery/watch.go +++ b/test/e2e/apimachinery/watch.go @@ -17,6 +17,7 @@ limitations under the License. package apimachinery import ( + "context" "fmt" "math/rand" "time" @@ -84,7 +85,7 @@ var _ = SIGDescribe("Watchers", func() { } ginkgo.By("creating a configmap with label A and ensuring the correct watchers observe the notification") - testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA) + testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapA) framework.ExpectNoError(err, "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns) expectEvent(watchA, watch.Added, testConfigMapA) expectEvent(watchAB, watch.Added, testConfigMapA) @@ -109,21 +110,21 @@ var _ = SIGDescribe("Watchers", func() { expectNoEvent(watchB, watch.Modified, testConfigMapA) ginkgo.By("deleting configmap A and ensuring the correct watchers observe the notification") - err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil) + err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapA.GetName(), nil) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns) expectEvent(watchA, watch.Deleted, nil) expectEvent(watchAB, watch.Deleted, nil) expectNoEvent(watchB, watch.Deleted, nil) ginkgo.By("creating a configmap with label B and ensuring the correct watchers observe the notification") - testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB) + testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapB) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMapB, ns) expectEvent(watchB, watch.Added, testConfigMapB) expectEvent(watchAB, watch.Added, testConfigMapB) expectNoEvent(watchA, watch.Added, testConfigMapB) ginkgo.By("deleting configmap B and ensuring the correct watchers observe the notification") - err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil) + err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapB.GetName(), nil) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns) expectEvent(watchB, watch.Deleted, nil) expectEvent(watchAB, watch.Deleted, nil) @@ -149,7 +150,7 @@ var _ = SIGDescribe("Watchers", func() { } ginkgo.By("creating a new configmap") - testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap) + testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns) ginkgo.By("modifying the configmap once") @@ -165,7 +166,7 @@ var _ = SIGDescribe("Watchers", func() { framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns) ginkgo.By("deleting the configmap") - err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil) + err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), nil) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns) ginkgo.By("creating a watch on configmaps from the resource version returned by the first update") @@ -202,7 +203,7 @@ var _ = SIGDescribe("Watchers", func() { framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", watchRestartedLabelValue) ginkgo.By("creating a new configmap") - testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap) + testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns) ginkgo.By("modifying the configmap once") @@ -234,7 +235,7 @@ var _ = SIGDescribe("Watchers", func() { framework.ExpectNoError(err, "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion) ginkgo.By("deleting the configmap") - err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil) + err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), nil) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns) ginkgo.By("Expecting to observe notifications for all changes to the configmap since the first watch closed") @@ -267,7 +268,7 @@ var _ = SIGDescribe("Watchers", func() { framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", toBeChangedLabelValue) ginkgo.By("creating a new configmap") - testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap) + testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns) ginkgo.By("modifying the configmap once") @@ -309,7 +310,7 @@ var _ = SIGDescribe("Watchers", func() { framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a third time", configMapName, ns) ginkgo.By("deleting the configmap") - err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil) + err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), nil) framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns) ginkgo.By("Expecting to observe an add notification for the watched object when the label value was restored") @@ -344,7 +345,7 @@ var _ = SIGDescribe("Watchers", func() { wcs := []watch.Interface{} resourceVersion := "0" for i := 0; i < iterations; i++ { - wc, err := c.CoreV1().ConfigMaps(ns).Watch(metav1.ListOptions{ResourceVersion: resourceVersion}) + wc, err := c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), metav1.ListOptions{ResourceVersion: resourceVersion}) framework.ExpectNoError(err, "Failed to watch configmaps in the namespace %s", ns) wcs = append(wcs, wc) resourceVersion = waitForNextConfigMapEvent(wcs[0]).ResourceVersion @@ -378,7 +379,7 @@ func watchConfigMaps(f *framework.Framework, resourceVersion string, labels ...s }, }), } - return c.CoreV1().ConfigMaps(ns).Watch(opts) + return c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), opts) } func int64ptr(i int) *int64 { @@ -470,18 +471,18 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa switch op { case createEvent: cm.Name = name(i) - _, err := c.CoreV1().ConfigMaps(ns).Create(cm) + _, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), cm) framework.ExpectNoError(err, "Failed to create configmap %s in namespace %s", cm.Name, ns) existing = append(existing, i) i++ case updateEvent: idx := rand.Intn(len(existing)) cm.Name = name(existing[idx]) - _, err := c.CoreV1().ConfigMaps(ns).Update(cm) + _, err := c.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm) framework.ExpectNoError(err, "Failed to update configmap %s in namespace %s", cm.Name, ns) case deleteEvent: idx := rand.Intn(len(existing)) - err := c.CoreV1().ConfigMaps(ns).Delete(name(existing[idx]), &metav1.DeleteOptions{}) + err := c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), name(existing[idx]), &metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns) existing = append(existing[:idx], existing[idx+1:]...) default: diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index cd0a2f4b4dc..8292ec87e27 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -423,7 +423,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { }) framework.ExpectNoError(err, "Creating validating webhook configuration") defer func() { - err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(hook.Name, nil) + err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), hook.Name, nil) framework.ExpectNoError(err, "Deleting validating webhook configuration") }() @@ -434,9 +434,9 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) if err == nil { - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") return false, nil } @@ -448,10 +448,10 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Updating a validating webhook configuration's rules to not include the create operation") err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - h, err := admissionClient.ValidatingWebhookConfigurations().Get(f.UniqueName, metav1.GetOptions{}) + h, err := admissionClient.ValidatingWebhookConfigurations().Get(context.TODO(), f.UniqueName, metav1.GetOptions{}) framework.ExpectNoError(err, "Getting validating webhook configuration") h.Webhooks[0].Rules[0].Operations = []admissionregistrationv1.OperationType{admissionregistrationv1.Update} - _, err = admissionClient.ValidatingWebhookConfigurations().Update(h) + _, err = admissionClient.ValidatingWebhookConfigurations().Update(context.TODO(), h) return err }) framework.ExpectNoError(err, "Updating validating webhook configuration") @@ -459,22 +459,21 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) if err != nil { if !strings.Contains(err.Error(), "denied") { return false, err } return false, nil } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") return true, nil }) framework.ExpectNoError(err, "Waiting for configMap in namespace %s to be allowed creation since webhook was updated to not validate create", f.Namespace.Name) ginkgo.By("Patching a validating webhook configuration's rules to include the create operation") - hook, err = admissionClient.ValidatingWebhookConfigurations().Patch( - f.UniqueName, + hook, err = admissionClient.ValidatingWebhookConfigurations().Patch(context.TODO(), f.UniqueName, types.JSONPatchType, []byte(`[{"op": "replace", "path": "/webhooks/0/rules/0/operations", "value": ["CREATE"]}]`)) framework.ExpectNoError(err, "Patching validating webhook configuration") @@ -482,9 +481,9 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) if err == nil { - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") return false, nil } @@ -519,7 +518,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { }) framework.ExpectNoError(err, "Creating mutating webhook configuration") defer func() { - err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(hook.Name, nil) + err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), hook.Name, nil) framework.ExpectNoError(err, "Deleting mutating webhook configuration") }() @@ -527,21 +526,21 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") - hook, err = admissionClient.MutatingWebhookConfigurations().Get(f.UniqueName, metav1.GetOptions{}) + hook, err = admissionClient.MutatingWebhookConfigurations().Get(context.TODO(), f.UniqueName, metav1.GetOptions{}) framework.ExpectNoError(err, "Getting mutating webhook configuration") ginkgo.By("Updating a mutating webhook configuration's rules to not include the create operation") hook.Webhooks[0].Rules[0].Operations = []admissionregistrationv1.OperationType{admissionregistrationv1.Update} - hook, err = admissionClient.MutatingWebhookConfigurations().Update(hook) + hook, err = admissionClient.MutatingWebhookConfigurations().Update(context.TODO(), hook) framework.ExpectNoError(err, "Updating mutating webhook configuration") ginkgo.By("Creating a configMap that should not be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return !ok, nil @@ -549,8 +548,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { framework.ExpectNoError(err, "Waiting for configMap in namespace %s this is not mutated", f.Namespace.Name) ginkgo.By("Patching a mutating webhook configuration's rules to include the create operation") - hook, err = admissionClient.MutatingWebhookConfigurations().Patch( - f.UniqueName, + hook, err = admissionClient.MutatingWebhookConfigurations().Patch(context.TODO(), f.UniqueName, types.JSONPatchType, []byte(`[{"op": "replace", "path": "/webhooks/0/rules/0/operations", "value": ["CREATE"]}]`)) framework.ExpectNoError(err, "Patching mutating webhook configuration") @@ -558,11 +556,11 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that should be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return ok, nil @@ -599,7 +597,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { selectorListOpts := metav1.ListOptions{LabelSelector: "e2e-list-test-uuid=" + testUUID} ginkgo.By("Listing all of the created validation webhooks") - list, err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(selectorListOpts) + list, err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(context.TODO(), selectorListOpts) framework.ExpectNoError(err, "Listing validating webhook configurations") framework.ExpectEqual(len(list.Items), testListSize) @@ -610,9 +608,9 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) if err == nil { - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") return false, nil } @@ -624,20 +622,20 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { framework.ExpectNoError(err, "Waiting for configMap in namespace %s to be denied creation by validating webhook", f.Namespace.Name) ginkgo.By("Deleting the collection of validation webhooks") - err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().DeleteCollection(nil, selectorListOpts) + err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().DeleteCollection(context.TODO(), nil, selectorListOpts) framework.ExpectNoError(err, "Deleting collection of validating webhook configurations") ginkgo.By("Creating a configMap that does not comply to the validation webhook rules") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) if err != nil { if !strings.Contains(err.Error(), "denied") { return false, err } return false, nil } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") return true, nil }) @@ -673,7 +671,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { selectorListOpts := metav1.ListOptions{LabelSelector: "e2e-list-test-uuid=" + testUUID} ginkgo.By("Listing all of the created validation webhooks") - list, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(selectorListOpts) + list, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(context.TODO(), selectorListOpts) framework.ExpectNoError(err, "Listing mutating webhook configurations") framework.ExpectEqual(len(list.Items), testListSize) @@ -684,11 +682,11 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("Creating a configMap that should be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return ok, nil @@ -696,17 +694,17 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { framework.ExpectNoError(err, "Waiting for configMap in namespace %s to be mutated", f.Namespace.Name) ginkgo.By("Deleting the collection of validation webhooks") - err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().DeleteCollection(nil, selectorListOpts) + err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().DeleteCollection(context.TODO(), nil, selectorListOpts) framework.ExpectNoError(err, "Deleting collection of mutating webhook configurations") ginkgo.By("Creating a configMap that should not be mutated") err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f) - created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm) + created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm) if err != nil { return false, err } - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil) framework.ExpectNoError(err, "Deleting successfully created configMap") _, ok := created.Data["mutation-stage-1"] return !ok, nil @@ -719,7 +717,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) { ginkgo.By("Create role binding to let webhook read extension-apiserver-authentication") client := f.ClientSet // Create the role binding to allow the webhook read the extension-apiserver-authentication configmap - _, err := client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{ + _, err := client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: roleBindingName, Annotations: map[string]string{ @@ -731,7 +729,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) { Kind: "Role", Name: "extension-apiserver-authentication-reader", }, - // Webhook uses the default service account. + Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", @@ -763,7 +761,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert }, } namespace := f.Namespace.Name - _, err := client.CoreV1().Secrets(namespace).Create(secret) + _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret) framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace) // Create the deployment of the webhook @@ -839,7 +837,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert }, }, } - deployment, err := client.AppsV1().Deployments(namespace).Create(d) + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) ginkgo.By("Wait for the deployment to be ready") err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) @@ -867,7 +865,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert }, }, } - _, err = client.CoreV1().Services(namespace).Create(service) + _, err = client.CoreV1().Services(namespace).Create(context.TODO(), service) framework.ExpectNoError(err, "creating service %s in namespace %s", serviceName, namespace) ginkgo.By("Verifying the service has paired with the endpoint") @@ -911,7 +909,7 @@ func registerWebhook(f *framework.Framework, configName string, certCtx *certCon framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil) + client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) } } @@ -963,7 +961,7 @@ func registerWebhookForAttachingPod(f *framework.Framework, configName string, c framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil) + client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) } } @@ -988,14 +986,16 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, configName stri err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") - return func() { client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(configName, nil) } + return func() { + client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + } } func testMutatingConfigMapWebhook(f *framework.Framework) { ginkgo.By("create a configmap that should be updated by the webhook") client := f.ClientSet configMap := toBeMutatedConfigMap(f) - mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap) + mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) framework.ExpectNoError(err) expectedConfigMapData := map[string]string{ "mutation-start": "yes", @@ -1054,14 +1054,16 @@ func registerMutatingWebhookForPod(f *framework.Framework, configName string, ce err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") - return func() { client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(configName, nil) } + return func() { + client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + } } func testMutatingPodWebhook(f *framework.Framework) { ginkgo.By("create a pod that should be updated by the webhook") client := f.ClientSet pod := toBeMutatedPod(f) - mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod) + mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) gomega.Expect(err).To(gomega.BeNil()) if len(mutatedPod.Spec.InitContainers) != 1 { framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers) @@ -1095,7 +1097,7 @@ func testWebhook(f *framework.Framework) { client := f.ClientSet // Creating the pod, the request should be rejected pod := nonCompliantPod(f) - _, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectError(err, "create pod %s in namespace %s should have been denied by webhook", pod.Name, f.Namespace.Name) expectedErrMsg1 := "the pod contains unwanted container name" if !strings.Contains(err.Error(), expectedErrMsg1) { @@ -1110,7 +1112,7 @@ func testWebhook(f *framework.Framework) { client = f.ClientSet // Creating the pod, the request should be rejected pod = hangingPod(f) - _, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectError(err, "create pod %s in namespace %s should have caused webhook to hang", pod.Name, f.Namespace.Name) // ensure the error is webhook-related, not client-side if !strings.Contains(err.Error(), "webhook") { @@ -1121,14 +1123,14 @@ func testWebhook(f *framework.Framework) { framework.Failf("expect error %q, got %q", "deadline", err.Error()) } // ensure the pod was not actually created - if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}); !apierrors.IsNotFound(err) { + if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}); !apierrors.IsNotFound(err) { framework.Failf("expect notfound error looking for rejected pod, got %v", err) } ginkgo.By("create a configmap that should be denied by the webhook") // Creating the configmap, the request should be rejected configmap := nonCompliantConfigMap(f) - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap) framework.ExpectError(err, "create configmap %s in namespace %s should have been denied by the webhook", configmap.Name, f.Namespace.Name) expectedErrMsg := "the configmap contains unwanted key and value" if !strings.Contains(err.Error(), expectedErrMsg) { @@ -1145,7 +1147,7 @@ func testWebhook(f *framework.Framework) { "admit": "this", }, } - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name) ginkgo.By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook") @@ -1163,7 +1165,7 @@ func testWebhook(f *framework.Framework) { ginkgo.By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook") patch := nonCompliantConfigMapPatch() - _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(allowedConfigMapName, types.StrategicMergePatchType, []byte(patch)) + _, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(context.TODO(), allowedConfigMapName, types.StrategicMergePatchType, []byte(patch)) framework.ExpectError(err, "update admitted configmap %s in namespace %s by strategic merge patch to a non-compliant one should be rejected by webhook. Patch: %+v", allowedConfigMapName, f.Namespace.Name, patch) if !strings.Contains(err.Error(), expectedErrMsg) { framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error()) @@ -1179,11 +1181,11 @@ func testWebhook(f *framework.Framework) { }}) framework.ExpectNoError(err, "creating namespace %q", skippedNamespaceName) // clean up the namespace - defer client.CoreV1().Namespaces().Delete(skippedNamespaceName, nil) + defer client.CoreV1().Namespaces().Delete(context.TODO(), skippedNamespaceName, nil) ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace") configmap = nonCompliantConfigMap(f) - _, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap) + _, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(context.TODO(), configmap) framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName) } @@ -1191,7 +1193,7 @@ func testAttachingPodWebhook(f *framework.Framework) { ginkgo.By("create a pod") client := f.ClientSet pod := toBeAttachedPod(f) - _, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name) err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name) framework.ExpectNoError(err, "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name) @@ -1272,7 +1274,7 @@ func registerFailClosedWebhook(f *framework.Framework, configName string, certCt err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil) + f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) } } @@ -1287,7 +1289,7 @@ func testFailClosedWebhook(f *framework.Framework) { }, }}) framework.ExpectNoError(err, "creating namespace %q", failNamespaceName) - defer client.CoreV1().Namespaces().Delete(failNamespaceName, nil) + defer client.CoreV1().Namespaces().Delete(context.TODO(), failNamespaceName, nil) ginkgo.By("create a configmap should be unconditionally rejected by the webhook") configmap := &v1.ConfigMap{ @@ -1295,7 +1297,7 @@ func testFailClosedWebhook(f *framework.Framework) { Name: "foo", }, } - _, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap) + _, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(context.TODO(), configmap) framework.ExpectError(err, "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName) if !apierrors.IsInternalError(err) { framework.Failf("expect an internal error, got %#v", err) @@ -1358,7 +1360,7 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil) + err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace) } } @@ -1419,7 +1421,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(configName, nil) + err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil) framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace) } } @@ -1487,7 +1489,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str ginkgo.By("Deleting the validating-webhook-configuration, which should be possible to remove") - err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil) + err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace) ginkgo.By("Creating a dummy mutating-webhook-configuration object") @@ -1543,13 +1545,13 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str ginkgo.By("Deleting the mutating-webhook-configuration, which should be possible to remove") - err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(configName, nil) + err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil) framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace) } func createNamespace(f *framework.Framework, ns *v1.Namespace) error { return wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { - _, err := f.ClientSet.CoreV1().Namespaces().Create(ns) + _, err := f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), ns) if err != nil { if strings.HasPrefix(err.Error(), "object is being deleted:") { return false, nil @@ -1654,11 +1656,11 @@ func updateConfigMap(c clientset.Interface, ns, name string, update updateConfig var cm *v1.ConfigMap pollErr := wait.PollImmediate(2*time.Second, 1*time.Minute, func() (bool, error) { var err error - if cm, err = c.CoreV1().ConfigMaps(ns).Get(name, metav1.GetOptions{}); err != nil { + if cm, err = c.CoreV1().ConfigMaps(ns).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { return false, err } update(cm) - if cm, err = c.CoreV1().ConfigMaps(ns).Update(cm); err == nil { + if cm, err = c.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm); err == nil { return true, nil } // Only retry update on conflict @@ -1693,10 +1695,10 @@ func updateCustomResource(c dynamic.ResourceInterface, ns, name string, update u } func cleanWebhookTest(client clientset.Interface, namespaceName string) { - _ = client.CoreV1().Services(namespaceName).Delete(serviceName, nil) - _ = client.AppsV1().Deployments(namespaceName).Delete(deploymentName, nil) - _ = client.CoreV1().Secrets(namespaceName).Delete(secretName, nil) - _ = client.RbacV1().RoleBindings("kube-system").Delete(roleBindingName, nil) + _ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), serviceName, nil) + _ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentName, nil) + _ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), secretName, nil) + _ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingName, nil) } func registerWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) func() { @@ -1746,7 +1748,7 @@ func registerWebhookForCustomResource(f *framework.Framework, configName string, err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil) + client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) } } @@ -1823,7 +1825,9 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, configName err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") - return func() { client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(configName, nil) } + return func() { + client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil) + } } func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) { @@ -1983,7 +1987,7 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd. ] } }` - _, err = testcrd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch)) + _, err = testcrd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch)) framework.ExpectNoError(err, "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name) ginkgo.By("Patching the custom resource while v2 is storage version") @@ -2054,7 +2058,7 @@ func registerValidatingWebhookForCRD(f *framework.Framework, configName string, err = waitWebhookConfigurationReady(f) framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil) + client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) } } @@ -2112,7 +2116,7 @@ func testCRDDenyWebhook(f *framework.Framework) { } // create CRD - _, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Create(crd) + _, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd) framework.ExpectError(err, "create custom resource definition %s should be denied by webhook", crd.Name) expectedErrMsg := "the crd contains unwanted label" if !strings.Contains(err.Error(), expectedErrMsg) { @@ -2124,13 +2128,13 @@ func labelNamespace(f *framework.Framework, namespace string) { client := f.ClientSet // Add a unique label to the namespace - ns, err := client.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}) + ns, err := client.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting namespace %s", namespace) if ns.Labels == nil { ns.Labels = map[string]string{} } ns.Labels[f.UniqueName] = "true" - _, err = client.CoreV1().Namespaces().Update(ns) + _, err = client.CoreV1().Namespaces().Update(context.TODO(), ns) framework.ExpectNoError(err, "error labeling namespace %s", namespace) } @@ -2184,7 +2188,7 @@ func registerSlowWebhook(f *framework.Framework, configName string, certCtx *cer framework.ExpectNoError(err, "waiting for webhook configuration to be ready") return func() { - client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil) + client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil) } } @@ -2192,7 +2196,7 @@ func testSlowWebhookTimeoutFailEarly(f *framework.Framework) { ginkgo.By("Request fails when timeout (1s) is shorter than slow webhook latency (5s)") client := f.ClientSet name := "e2e-test-slow-webhook-configmap" - _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}) + _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}) framework.ExpectError(err, "create configmap in namespace %s should have timed-out reaching slow webhook", f.Namespace.Name) // http timeout message: context deadline exceeded // dial timeout message: dial tcp {address}: i/o timeout @@ -2206,9 +2210,9 @@ func testSlowWebhookTimeoutFailEarly(f *framework.Framework) { func testSlowWebhookTimeoutNoError(f *framework.Framework) { client := f.ClientSet name := "e2e-test-slow-webhook-configmap" - _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}) + _, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}) gomega.Expect(err).To(gomega.BeNil()) - err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(name, &metav1.DeleteOptions{}) + err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, &metav1.DeleteOptions{}) gomega.Expect(err).To(gomega.BeNil()) } @@ -2267,7 +2271,7 @@ func createValidatingWebhookConfiguration(f *framework.Framework, config *admiss } framework.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName) } - return f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(config) + return f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.TODO(), config) } // createMutatingWebhookConfiguration ensures the webhook config scopes object or namespace selection @@ -2282,7 +2286,7 @@ func createMutatingWebhookConfiguration(f *framework.Framework, config *admissio } framework.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName) } - return f.ClientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(config) + return f.ClientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.TODO(), config) } func newDenyPodWebhookFixture(f *framework.Framework, certCtx *certContext, servicePort int32) admissionregistrationv1.ValidatingWebhook { @@ -2385,7 +2389,7 @@ func newMutateConfigMapWebhookFixture(f *framework.Framework, certCtx *certConte // createWebhookConfigurationReadyNamespace creates a separate namespace for webhook configuration ready markers to // prevent cross-talk with webhook configurations being tested. func createWebhookConfigurationReadyNamespace(f *framework.Framework) { - ns, err := f.ClientSet.CoreV1().Namespaces().Create(&v1.Namespace{ + ns, err := f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: f.Namespace.Name + "-markers", Labels: map[string]string{f.UniqueName + "-markers": "true"}, @@ -2409,7 +2413,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error { }, }, } - _, err := cmClient.Create(marker) + _, err := cmClient.Create(context.TODO(), marker) if err != nil { // The always-deny webhook does not provide a reason, so check for the error string we expect if strings.Contains(err.Error(), "denied") { @@ -2418,7 +2422,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error { return false, err } // best effort cleanup of markers that are no longer needed - _ = cmClient.Delete(marker.GetName(), nil) + _ = cmClient.Delete(context.TODO(), marker.GetName(), nil) framework.Logf("Waiting for webhook configuration to be ready...") return false, nil }) diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index d7823c6fcfb..4877984fc0e 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -17,6 +17,7 @@ limitations under the License. package apps import ( + "context" "fmt" "time" @@ -70,7 +71,7 @@ var _ = SIGDescribe("CronJob", func() { framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name) ginkgo.By("Ensuring at least two running jobs exists by listing jobs explicitly") - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name) activeJobs, _ := filterActiveJobs(jobs) gomega.Expect(len(activeJobs)).To(gomega.BeNumerically(">=", 2)) @@ -95,7 +96,7 @@ var _ = SIGDescribe("CronJob", func() { framework.ExpectError(err) ginkgo.By("Ensuring no job exists by listing jobs explicitly") - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name) gomega.Expect(jobs.Items).To(gomega.HaveLen(0)) @@ -122,7 +123,7 @@ var _ = SIGDescribe("CronJob", func() { gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1)) ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly") - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name) activeJobs, _ := filterActiveJobs(jobs) gomega.Expect(activeJobs).To(gomega.HaveLen(1)) @@ -154,7 +155,7 @@ var _ = SIGDescribe("CronJob", func() { gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1)) ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly") - jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{}) + jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list the jobs in namespace %s", f.Namespace.Name) activeJobs, _ := filterActiveJobs(jobs) gomega.Expect(activeJobs).To(gomega.HaveLen(1)) @@ -266,7 +267,7 @@ func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1beta1 framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists in namespace %s", ns) ginkgo.By("Ensuring a finished job exists by listing jobs explicitly") - jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{}) + jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", ns) activeJobs, finishedJobs := filterActiveJobs(jobs) if len(finishedJobs) != 1 { @@ -282,7 +283,7 @@ func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1beta1 framework.ExpectNoError(err, "Failed to ensure that pods for job does not exists anymore in namespace %s", ns) ginkgo.By("Ensuring there is 1 finished job by listing jobs explicitly") - jobs, err = c.BatchV1().Jobs(ns).List(metav1.ListOptions{}) + jobs, err = c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to ensure there is one finished job by listing job explicitly in namespace %s", ns) activeJobs, finishedJobs = filterActiveJobs(jobs) if len(finishedJobs) != 1 { @@ -354,16 +355,16 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.Concur } func createCronJob(c clientset.Interface, ns string, cronJob *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) { - return c.BatchV1beta1().CronJobs(ns).Create(cronJob) + return c.BatchV1beta1().CronJobs(ns).Create(context.TODO(), cronJob) } func getCronJob(c clientset.Interface, ns, name string) (*batchv1beta1.CronJob, error) { - return c.BatchV1beta1().CronJobs(ns).Get(name, metav1.GetOptions{}) + return c.BatchV1beta1().CronJobs(ns).Get(context.TODO(), name, metav1.GetOptions{}) } func deleteCronJob(c clientset.Interface, ns, name string) error { propagationPolicy := metav1.DeletePropagationBackground // Also delete jobs and pods related to cronjob - return c.BatchV1beta1().CronJobs(ns).Delete(name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) + return c.BatchV1beta1().CronJobs(ns).Delete(context.TODO(), name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) } // Wait for at least given amount of active jobs. @@ -415,7 +416,7 @@ func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string) // Wait for a job to disappear by listing them explicitly. func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{}) + jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -433,7 +434,7 @@ func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1. func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { options := metav1.ListOptions{LabelSelector: fmt.Sprintf("controller-uid=%s", targetJob.UID)} - pods, err := c.CoreV1().Pods(ns).List(options) + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) if err != nil { return false, err } @@ -444,7 +445,7 @@ func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batc // Wait for a job to be replaced with a new one. func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{}) + jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -463,7 +464,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error // waitForJobsAtLeast waits for at least a number of jobs to appear. func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{}) + jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -474,7 +475,7 @@ func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error { // waitForAnyFinishedJob waits for any completed job to appear. func waitForAnyFinishedJob(c clientset.Interface, ns string) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { - jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{}) + jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index eb158ca3f19..e49670115b7 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -17,6 +17,7 @@ limitations under the License. package apps import ( + "context" "fmt" "strconv" "time" @@ -177,7 +178,7 @@ func replacePods(pods []*v1.Pod, store cache.Store) { // and a list of nodenames across which these containers restarted. func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) { options := metav1.ListOptions{LabelSelector: labelSelector.String()} - pods, err := c.CoreV1().Pods(ns).List(options) + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) framework.ExpectNoError(err) failedContainers := 0 containerRestartNodes := sets.NewString() @@ -227,12 +228,12 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labelSelector.String() - obj, err := f.ClientSet.CoreV1().Pods(ns).List(options) + obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector.String() - return f.ClientSet.CoreV1().Pods(ns).Watch(options) + return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options) }, }, &v1.Pod{}, diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index afd3fac1554..4fcbae9d12b 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -17,6 +17,7 @@ limitations under the License. package apps import ( + "context" "fmt" "reflect" "strings" @@ -67,7 +68,7 @@ func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, a daemonsets := c.AppsV1().DaemonSets(namespace) var updateErr error pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil { + if ds, err = daemonsets.Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if testutils.IsRetryableAPIError(err) { return false, nil } @@ -75,7 +76,7 @@ func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, a } // Apply the update, then attempt to push it to the apiserver. applyUpdate(ds) - if ds, err = daemonsets.Update(ds); err == nil { + if ds, err = daemonsets.Update(context.TODO(), ds); err == nil { framework.Logf("Updating DaemonSet %s", name) return true, nil } @@ -98,7 +99,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.AfterEach(func() { // Clean up - daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}) + daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "unable to dump DaemonSets") if daemonsets != nil && len(daemonsets.Items) > 0 { for _, ds := range daemonsets.Items { @@ -108,12 +109,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err, "error waiting for daemon pod to be reaped") } } - if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { + if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil { framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets)) } else { framework.Logf("unable to dump daemonsets: %v", err) } - if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { + if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil { framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods)) } else { framework.Logf("unable to dump pods: %v", err) @@ -153,7 +154,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) - ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label)) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") @@ -165,7 +166,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.") podList := listDaemonPods(c, ns, label) pod := podList.Items[0] - err = c.CoreV1().Pods(ns).Delete(pod.Name, nil) + err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, nil) framework.ExpectNoError(err) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to revive") @@ -182,7 +183,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Creating daemon %q with a node selector", dsName) ds := newDaemonSet(dsName, image, complexLabel) ds.Spec.Template.Spec.NodeSelector = nodeSelector - ds, err := c.AppsV1().DaemonSets(ns).Create(ds) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds) framework.ExpectNoError(err) ginkgo.By("Initially, daemon pods should not be running on any nodes.") @@ -211,7 +212,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate") patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`, daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel]) - ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) + ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch)) framework.ExpectNoError(err, "error patching daemon set") daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels) framework.ExpectEqual(len(daemonSetLabels), 1) @@ -245,7 +246,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { }, }, } - ds, err := c.AppsV1().DaemonSets(ns).Create(ds) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds) framework.ExpectNoError(err) ginkgo.By("Initially, daemon pods should not be running on any nodes.") @@ -279,7 +280,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName)) - ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label)) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") @@ -293,7 +294,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { pod := podList.Items[0] pod.ResourceVersion = "" pod.Status.Phase = v1.PodFailed - _, err = c.CoreV1().Pods(ns).UpdateStatus(&pod) + _, err = c.CoreV1().Pods(ns).UpdateStatus(context.TODO(), &pod) framework.ExpectNoError(err, "error failing a daemon pod") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to revive") @@ -311,7 +312,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Creating simple daemon set %s", dsName) ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType} - ds, err := c.AppsV1().DaemonSets(ns).Create(ds) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") @@ -319,7 +320,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels - ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) framework.ExpectNoError(err) waitForHistoryCreated(c, ns, label, 1) first := curHistory(listDaemonHistories(c, ns, label), ds) @@ -329,7 +330,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage) - ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) + ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch)) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods images aren't updated.") @@ -341,7 +342,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels - ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) framework.ExpectNoError(err) waitForHistoryCreated(c, ns, label, 2) cur := curHistory(listDaemonHistories(c, ns, label), ds) @@ -360,7 +361,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Creating simple daemon set %s", dsName) ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} - ds, err := c.AppsV1().DaemonSets(ns).Create(ds) + ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") @@ -368,7 +369,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels - ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) framework.ExpectNoError(err) waitForHistoryCreated(c, ns, label, 1) cur := curHistory(listDaemonHistories(c, ns, label), ds) @@ -378,12 +379,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage) - ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) + ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch)) framework.ExpectNoError(err) // Time to complete the rolling upgrade is proportional to the number of nodes in the cluster. // Get the number of nodes, and set the timeout appropriately. - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) nodeCount := len(nodes.Items) retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second @@ -397,7 +398,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels - ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{}) framework.ExpectNoError(err) waitForHistoryCreated(c, ns, label, 2) cur = curHistory(listDaemonHistories(c, ns, label), ds) @@ -419,7 +420,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} - ds, err = c.AppsV1().DaemonSets(ns).Create(ds) + ds, err = c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds) framework.ExpectNoError(err) framework.Logf("Check that daemon pods launch on every node of the cluster") @@ -518,7 +519,7 @@ func newDaemonSet(dsName, image string, label map[string]string) *appsv1.DaemonS func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *v1.PodList { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.CoreV1().Pods(ns).List(options) + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), options) framework.ExpectNoError(err) gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0)) return podList @@ -555,7 +556,7 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error { func updateNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namespace, error) { nsClient := c.CoreV1().Namespaces() - ns, err := nsClient.Get(nsName, metav1.GetOptions{}) + ns, err := nsClient.Get(context.TODO(), nsName, metav1.GetOptions{}) if err != nil { return nil, err } @@ -568,7 +569,7 @@ func updateNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Names ns.Annotations[n] = "" } - return nsClient.Update(ns) + return nsClient.Update(context.TODO(), ns) } func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) { @@ -576,7 +577,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s var newNode *v1.Node var newLabels map[string]string err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, func() (bool, error) { - node, err := nodeClient.Get(nodeName, metav1.GetOptions{}) + node, err := nodeClient.Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return false, err } @@ -591,7 +592,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s for k, v := range labels { node.Labels[k] = v } - newNode, err = nodeClient.Update(node) + newNode, err = nodeClient.Update(context.TODO(), node) if err == nil { newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels) return true, err @@ -613,7 +614,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s func checkDaemonPodOnNodes(f *framework.Framework, ds *appsv1.DaemonSet, nodeNames []string) func() (bool, error) { return func() (bool, error) { - podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Logf("could not get the pod list: %v", err) return false, nil @@ -658,7 +659,7 @@ func checkRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() } func schedulableNodes(c clientset.Interface, ds *appsv1.DaemonSet) []string { - nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) nodeNames := make([]string, 0) for _, node := range nodeList.Items { @@ -702,7 +703,7 @@ func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() } func checkDaemonStatus(f *framework.Framework, dsName string) error { - ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{}) + ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(context.TODO(), dsName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("Could not get daemon set from v1") } @@ -715,7 +716,7 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error { func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.DaemonSet, image string, maxUnavailable int) func() (bool, error) { return func() (bool, error) { - podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(ds.Namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -766,7 +767,7 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st listHistoryFn := func() (bool, error) { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - historyList, err := c.AppsV1().ControllerRevisions(ns).List(options) + historyList, err := c.AppsV1().ControllerRevisions(ns).List(context.TODO(), options) if err != nil { return false, err } @@ -783,7 +784,7 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *appsv1.ControllerRevisionList { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - historyList, err := c.AppsV1().ControllerRevisions(ns).List(options) + historyList, err := c.AppsV1().ControllerRevisions(ns).List(context.TODO(), options) framework.ExpectNoError(err) gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0)) return historyList @@ -810,7 +811,7 @@ func curHistory(historyList *appsv1.ControllerRevisionList, ds *appsv1.DaemonSet func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) { return func() (bool, error) { - if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil { + if _, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil { if apierrors.IsNotFound(err) { return true, nil } diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 2e77803c9f1..a6c5481c443 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -134,7 +134,7 @@ var _ = SIGDescribe("Deployment", func() { }) func failureTrap(c clientset.Interface, ns string) { - deployments, err := c.AppsV1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + deployments, err := c.AppsV1().Deployments(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("Could not list Deployments in namespace %q: %v", ns, err) return @@ -160,7 +160,7 @@ func failureTrap(c clientset.Interface, ns string) { return } framework.Logf("Log out all the ReplicaSets if there is no deployment created") - rss, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err) return @@ -172,7 +172,7 @@ func failureTrap(c clientset.Interface, ns string) { framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err) } options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.CoreV1().Pods(rs.Namespace).List(options) + podList, err := c.CoreV1().Pods(rs.Namespace).List(context.TODO(), options) if err != nil { framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err) continue @@ -189,7 +189,7 @@ func intOrStrP(num int) *intstr.IntOrString { } func stopDeployment(c clientset.Interface, ns, deploymentName string) { - deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.Logf("Deleting deployment %s", deploymentName) @@ -197,20 +197,20 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) { framework.ExpectNoError(err) framework.Logf("Ensuring deployment %s was deleted", deploymentName) - _, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) + _, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) framework.ExpectError(err) framework.ExpectEqual(apierrors.IsNotFound(err), true) framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) framework.ExpectNoError(err) options := metav1.ListOptions{LabelSelector: selector.String()} - rss, err := c.AppsV1().ReplicaSets(ns).List(options) + rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options) framework.ExpectNoError(err) gomega.Expect(rss.Items).Should(gomega.HaveLen(0)) framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) var pods *v1.PodList if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { - pods, err = c.CoreV1().Pods(ns).List(options) + pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options) if err != nil { return false, err } @@ -234,7 +234,7 @@ func testDeleteDeployment(f *framework.Framework) { framework.Logf("Creating simple deployment %s", deploymentName) d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} - deploy, err := c.AppsV1().Deployments(ns).Create(d) + deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) framework.ExpectNoError(err) // Wait for it to be updated to revision 1 @@ -244,7 +244,7 @@ func testDeleteDeployment(f *framework.Framework) { err = e2edeploy.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) - deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) framework.ExpectNoError(err) @@ -270,7 +270,7 @@ func testRollingUpdateDeployment(f *framework.Framework) { rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) rs.Annotations = annotations framework.Logf("Creating replica set %q (going to be adopted)", rs.Name) - _, err := c.AppsV1().ReplicaSets(ns).Create(rs) + _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs) framework.ExpectNoError(err) // Verify that the required pods have come up. err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) @@ -280,7 +280,7 @@ func testRollingUpdateDeployment(f *framework.Framework) { deploymentName := "test-rolling-update-deployment" framework.Logf("Creating deployment %q", deploymentName) d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) - deploy, err := c.AppsV1().Deployments(ns).Create(d) + deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) framework.ExpectNoError(err) // Wait for it to be updated to revision 3546343826724305833. @@ -294,7 +294,7 @@ func testRollingUpdateDeployment(f *framework.Framework) { // There should be 1 old RS (webserver-controller, which is adopted) framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name) - deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) _, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1()) framework.ExpectNoError(err) @@ -309,7 +309,7 @@ func testRecreateDeployment(f *framework.Framework) { deploymentName := "test-recreate-deployment" framework.Logf("Creating deployment %q", deploymentName) d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType) - deployment, err := c.AppsV1().Deployments(ns).Create(d) + deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) framework.ExpectNoError(err) // Wait for it to be updated to revision 1 @@ -347,7 +347,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { rsName := "test-cleanup-controller" replicas := int32(1) revisionHistoryLimit := utilpointer.Int32Ptr(0) - _, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)) + _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)) framework.ExpectNoError(err) // Verify that the required pods have come up. @@ -358,7 +358,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { deploymentName := "test-cleanup-deployment" framework.Logf("Creating deployment %s", deploymentName) - pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) framework.ExpectNoError(err, "Failed to query for pods: %v", err) options := metav1.ListOptions{ @@ -366,7 +366,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { } stopCh := make(chan struct{}) defer close(stopCh) - w, err := c.CoreV1().Pods(ns).Watch(options) + w, err := c.CoreV1().Pods(ns).Watch(context.TODO(), options) framework.ExpectNoError(err) go func() { // There should be only one pod being created, which is the pod with the agnhost image. @@ -396,7 +396,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { }() d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) d.Spec.RevisionHistoryLimit = revisionHistoryLimit - _, err = c.AppsV1().Deployments(ns).Create(d) + _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName)) @@ -418,7 +418,7 @@ func testRolloverDeployment(f *framework.Framework) { rsName := "test-rollover-controller" rsReplicas := int32(1) - _, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil)) + _, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil)) framework.ExpectNoError(err) // Verify that the required pods have come up. err = e2epod.VerifyPodsRunning(c, ns, podName, false, rsReplicas) @@ -442,11 +442,11 @@ func testRolloverDeployment(f *framework.Framework) { MaxSurge: intOrStrP(1), } newDeployment.Spec.MinReadySeconds = int32(10) - _, err = c.AppsV1().Deployments(ns).Create(newDeployment) + _, err = c.AppsV1().Deployments(ns).Create(context.TODO(), newDeployment) framework.ExpectNoError(err) // Verify that the pods were scaled up and down as expected. - deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.Logf("Make sure deployment %q performs scaling operations", deploymentName) // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 @@ -457,7 +457,7 @@ func testRolloverDeployment(f *framework.Framework) { framework.ExpectNoError(err) framework.Logf("Ensure that both replica sets have 1 created replica") - oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) + oldRS, err := c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{}) framework.ExpectNoError(err) ensureReplicas(oldRS, int32(1)) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) @@ -488,11 +488,11 @@ func testRolloverDeployment(f *framework.Framework) { framework.ExpectNoError(err) framework.Logf("Ensure that both old replica sets have no replicas") - oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) + oldRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{}) framework.ExpectNoError(err) ensureReplicas(oldRS, int32(0)) // Not really the new replica set anymore but we GET by name so that's fine. - newRS, err = c.AppsV1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{}) + newRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), newRS.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ensureReplicas(newRS, int32(0)) } @@ -532,7 +532,7 @@ func testIterativeDeployments(f *framework.Framework) { d.Spec.RevisionHistoryLimit = &two d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero framework.Logf("Creating deployment %q", deploymentName) - deployment, err := c.AppsV1().Deployments(ns).Create(d) + deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) framework.ExpectNoError(err) iterations := 20 @@ -595,7 +595,7 @@ func testIterativeDeployments(f *framework.Framework) { selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) framework.ExpectNoError(err) opts := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.CoreV1().Pods(ns).List(opts) + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), opts) framework.ExpectNoError(err) if len(podList.Items) == 0 { framework.Logf("%02d: no deployment pods to delete", i) @@ -607,7 +607,7 @@ func testIterativeDeployments(f *framework.Framework) { } name := podList.Items[p].Name framework.Logf("%02d: deleting deployment pod %q", i, name) - err := c.CoreV1().Pods(ns).Delete(name, nil) + err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, nil) if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } @@ -616,7 +616,7 @@ func testIterativeDeployments(f *framework.Framework) { } // unpause the deployment if we end up pausing it - deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) framework.ExpectNoError(err) if deployment.Spec.Paused { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { @@ -646,7 +646,7 @@ func testDeploymentsControllerRef(f *framework.Framework) { podLabels := map[string]string{"name": WebserverImageName} replicas := int32(1) d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) - deploy, err := c.AppsV1().Deployments(ns).Create(d) + deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) @@ -673,7 +673,7 @@ func testDeploymentsControllerRef(f *framework.Framework) { deploymentName = "test-adopt-deployment" framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) - deploy, err = c.AppsV1().Deployments(ns).Create(d) + deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d) framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(c, deploy) framework.ExpectNoError(err) @@ -708,7 +708,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) framework.Logf("Creating deployment %q", deploymentName) - deployment, err := c.AppsV1().Deployments(ns).Create(d) + deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) framework.ExpectNoError(err) framework.Logf("Waiting for observed generation %d", deployment.Generation) @@ -756,7 +756,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { // The desired replicas wait makes sure that the RS controller has created expected number of pods. framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) - firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) + firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{}) framework.ExpectNoError(err) err = waitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS) framework.ExpectNoError(err) @@ -780,7 +780,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { // The desired replicas wait makes sure that the RS controller has created expected number of pods. framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) - secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) + secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{}) framework.ExpectNoError(err) err = waitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS) framework.ExpectNoError(err) @@ -801,9 +801,9 @@ func testProportionalScalingDeployment(f *framework.Framework) { framework.ExpectNoError(err) framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName) - firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) + firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) + secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas. @@ -846,7 +846,7 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - rsList, err := c.AppsV1().ReplicaSets(ns).List(options) + rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options) framework.ExpectNoError(err) gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0)) return rsList @@ -856,7 +856,7 @@ func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) er trueVar := true deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar} deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID)) - return c.AppsV1().Deployments(d.Namespace).Delete(d.Name, deleteOptions) + return c.AppsV1().Deployments(d.Namespace).Delete(context.TODO(), d.Name, deleteOptions) } func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framework) { @@ -890,7 +890,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew MaxSurge: intOrStrP(1), MaxUnavailable: intOrStrP(0), } - deployment, err := c.AppsV1().Deployments(ns).Create(d) + deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d) framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(c, deployment) framework.ExpectNoError(err) @@ -1023,7 +1023,7 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type) } - w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion})) + w, err := c.AppsV1().Deployments(d.Namespace).Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion})) if err != nil { return err } @@ -1065,7 +1065,7 @@ func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string var d *appsv1.Deployment pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) { - deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1088,7 +1088,7 @@ func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string func waitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error { desiredGeneration := replicaSet.Generation err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) { - rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) + rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -1104,7 +1104,7 @@ func waitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, rep func waitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error { desiredGeneration := replicaSet.Generation err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) { - rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) + rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index 4b4ad4ae983..f35ce6a12f5 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -17,6 +17,7 @@ limitations under the License. package apps import ( + "context" "fmt" "time" @@ -69,7 +70,7 @@ var _ = SIGDescribe("DisruptionController", func() { // Since disruptionAllowed starts out 0, if we see it ever become positive, // that means the controller is working. err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{}) + pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) if err != nil { return false, err } @@ -240,7 +241,7 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable MinAvailable: &minAvailable, }, } - _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb) + _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb) framework.ExpectNoError(err, "Waiting for the pdb to be created with minAvailable %d in namespace %s", minAvailable.IntVal, ns) waitForPdbToBeProcessed(cs, ns) } @@ -256,19 +257,19 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail MaxUnavailable: &maxUnavailable, }, } - _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb) + _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb) framework.ExpectNoError(err, "Waiting for the pdb to be created with maxUnavailable %d in namespace %s", maxUnavailable.IntVal, ns) waitForPdbToBeProcessed(cs, ns) } func updatePDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable intstr.IntOrString) { err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - old, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{}) + old, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) if err != nil { return err } old.Spec.MinAvailable = &minAvailable - if _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Update(old); err != nil { + if _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Update(context.TODO(), old); err != nil { return err } return nil @@ -297,7 +298,7 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) { }, } - _, err := cs.CoreV1().Pods(ns).Create(pod) + _, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns) } } @@ -305,7 +306,7 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) { func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) { ginkgo.By("Waiting for all pods to be running") err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { - pods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"}) + pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: "foo=bar"}) if err != nil { return false, err } @@ -364,14 +365,14 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu }, } - _, err := cs.AppsV1().ReplicaSets(ns).Create(rs) + _, err := cs.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs) framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns) } func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) { ginkgo.By("locating a running pod") err = wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { - podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{}) + podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -392,7 +393,7 @@ func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err erro func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string) { ginkgo.By("Waiting for the pdb to be processed") err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { - pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{}) + pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) if err != nil { return false, err } @@ -407,7 +408,7 @@ func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string) { func waitForPdbToObserveHealthyPods(cs kubernetes.Interface, ns string, healthyCount int32) { ginkgo.By("Waiting for the pdb to observed all healthy pods") err := wait.PollImmediate(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) { - pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{}) + pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index d13addc1882..c7120077f6c 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -17,6 +17,7 @@ limitations under the License. package apps import ( + "context" "fmt" "time" @@ -260,7 +261,7 @@ var _ = SIGDescribe("Job", func() { // waitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail. func waitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error { return wait.Poll(framework.Poll, timeout, func() (bool, error) { - curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) + curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index ef41acbb2ef..5440de4e8e9 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -17,6 +17,7 @@ limitations under the License. package apps import ( + "context" "fmt" "strings" "time" @@ -100,7 +101,7 @@ func podOnNode(podName, nodeName string, image string) *v1.Pod { } func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error { - pod, err := c.CoreV1().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage)) + pod, err := c.CoreV1().Pods(namespace).Create(context.TODO(), podOnNode(podName, nodeName, framework.ServeHostnameImage)) if err == nil { framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) } else { @@ -145,14 +146,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { ginkgo.By("choose a node - we will block all network traffic on this node") var podOpts metav1.ListOptions nodeOpts := metav1.ListOptions{} - nodes, err := c.CoreV1().Nodes().List(nodeOpts) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), nodeOpts) framework.ExpectNoError(err) e2enode.Filter(nodes, func(node v1.Node) bool { if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) { return false } podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} - pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts) if err != nil || len(pods.Items) <= 0 { return false } @@ -176,12 +177,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = nodeSelector.String() - obj, err := f.ClientSet.CoreV1().Nodes().List(options) + obj, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = nodeSelector.String() - return f.ClientSet.CoreV1().Nodes().Watch(options) + return f.ClientSet.CoreV1().Nodes().Watch(context.TODO(), options) }, }, &v1.Node{}, @@ -256,11 +257,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) // list pods after all have been scheduled framework.ExpectNoError(err) nodeName := pods.Items[0].Spec.NodeName - node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) // This creates a temporary network partition, verifies that 'podNameToDisappear', @@ -298,7 +299,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // verify that it is really on the requested node { - pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), additionalPod, metav1.GetOptions{}) framework.ExpectNoError(err) if pod.Spec.NodeName != node.Name { framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) @@ -325,11 +326,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) // list pods after all have been scheduled framework.ExpectNoError(err) nodeName := pods.Items[0].Spec.NodeName - node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) // This creates a temporary network partition, verifies that 'podNameToDisappear', @@ -367,7 +368,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { e2eskipper.SkipUnlessProviderIs("gke") ginkgo.By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name) headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService) framework.ExpectNoError(err) c = f.ClientSet ns = f.Namespace.Name @@ -385,7 +386,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}} podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}} ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels) - _, err := c.AppsV1().StatefulSets(ns).Create(ps) + _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps) framework.ExpectNoError(err) nn, err := e2enode.TotalRegistered(f.ClientSet) @@ -402,13 +403,13 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { e2eskipper.SkipUnlessSSHKeyPresent() ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels) - _, err := c.AppsV1().StatefulSets(ns).Create(ps) + _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps) pod := e2esset.GetPodList(c, ps).Items[0] - node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) // Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear', @@ -450,11 +451,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node") options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) // list pods after all have been scheduled framework.ExpectNoError(err) nodeName := pods.Items[0].Spec.NodeName - node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) // This creates a temporary network partition, verifies that the job has 'parallelism' number of @@ -501,7 +502,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { return false } podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} - pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts) if err != nil || len(pods.Items) <= 0 { return false } @@ -515,7 +516,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { if err := e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil { framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) } - pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts) framework.ExpectNoError(err) podTolerationTimes := map[string]time.Duration{} // This test doesn't add tolerations by itself, but because they may be present in the cluster @@ -564,12 +565,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = nodeSelector.String() - obj, err := f.ClientSet.CoreV1().Nodes().List(options) + obj, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = nodeSelector.String() - return f.ClientSet.CoreV1().Nodes().Watch(options) + return f.ClientSet.CoreV1().Nodes().Watch(context.TODO(), options) }, }, &v1.Node{}, @@ -625,7 +626,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { sleepTime := maxTolerationTime + 20*time.Second ginkgo.By(fmt.Sprintf("Sleeping for %v and checking if all Pods were evicted", sleepTime)) time.Sleep(sleepTime) - pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(podOpts) + pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), podOpts) framework.ExpectNoError(err) seenRunning := []string{} for _, pod := range pods.Items { diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index fa57bbc6dda..44d3f8cbcbd 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -17,6 +17,7 @@ limitations under the License. package apps import ( + "context" "fmt" "time" @@ -126,7 +127,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri ginkgo.By(fmt.Sprintf("Creating replication controller %s", name)) newRC := newRC(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} - _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC) + _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), newRC) framework.ExpectNoError(err) // Check that pods for the new RC were created. @@ -144,7 +145,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri } err = f.WaitForPodRunning(pod.Name) if err != nil { - updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if getErr == nil { err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) } else { @@ -183,11 +184,11 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name) quota := newPodQuota(name, "2") - _, err := c.CoreV1().ResourceQuotas(namespace).Create(quota) + _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota) framework.ExpectNoError(err) err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{}) + quota, err = c.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -202,14 +203,14 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name)) rc := newRC(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil) - rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc) + rc, err = c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), rc) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name)) generation := rc.Generation conditions := rc.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) + rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -238,7 +239,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { generation = rc.Generation conditions = rc.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) + rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -281,12 +282,12 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) { replicas := int32(1) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rcSt.Spec.Selector = map[string]string{"name": name} - rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt) framework.ExpectNoError(err) ginkgo.By("Then the orphan pod is adopted") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the RC if apierrors.IsNotFound(err) { return true, nil @@ -310,7 +311,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { replicas := int32(1) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rcSt.Spec.Selector = map[string]string{"name": name} - rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt) framework.ExpectNoError(err) ginkgo.By("When the matched label of one of its pods change") @@ -319,11 +320,11 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { p := pods.Items[0] err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pod.Labels = map[string]string{"name": "not-matching-name"} - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod) if err != nil && apierrors.IsConflict(err) { return false, nil } @@ -336,7 +337,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { ginkgo.By("Then the pod is released") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) for _, owner := range p2.OwnerReferences { if *owner.Controller && owner.UID == rc.UID { @@ -361,12 +362,12 @@ func updateReplicationControllerWithRetries(c clientset.Interface, namespace, na var updateErr error pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { var err error - if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}); err != nil { + if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rc) - if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(rc); err == nil { + if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc); err == nil { framework.Logf("Updating replication controller %q", name) return true, nil } diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 9267b48eca7..5170e00d79f 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -17,6 +17,7 @@ limitations under the License. package apps import ( + "context" "fmt" "time" @@ -127,7 +128,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s framework.Logf("Creating ReplicaSet %s", name) newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} - _, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS) + _, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), newRS) framework.ExpectNoError(err) // Check that pods for the new RS were created. @@ -145,7 +146,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s } err = f.WaitForPodRunning(pod.Name) if err != nil { - updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if getErr == nil { err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) } else { @@ -184,11 +185,11 @@ func testReplicaSetConditionCheck(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name)) quota := newPodQuota(name, "2") - _, err := c.CoreV1().ResourceQuotas(namespace).Create(quota) + _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota) framework.ExpectNoError(err) err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{}) + quota, err = c.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -203,14 +204,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name)) rs := newRS(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil) - rs, err = c.AppsV1().ReplicaSets(namespace).Create(rs) + rs, err = c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), rs) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name)) generation := rs.Generation conditions := rs.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}) + rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -240,7 +241,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) { generation = rs.Generation conditions = rs.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}) + rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -283,12 +284,12 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { replicas := int32(1) rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}} - rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt) + rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), rsSt) framework.ExpectNoError(err) ginkgo.By("Then the orphan pod is adopted") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the ReplicaSet if apierrors.IsNotFound(err) { return true, nil @@ -311,11 +312,11 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { p = &pods.Items[0] err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pod.Labels = map[string]string{"name": "not-matching-name"} - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod) if err != nil && apierrors.IsConflict(err) { return false, nil } @@ -328,7 +329,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { ginkgo.By("Then the pod is released") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) for _, owner := range p2.OwnerReferences { if *owner.Controller && owner.UID == rs.UID { diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 09173dbc202..f10bfdbde8b 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -103,7 +103,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels) - _, err := c.CoreV1().Services(ns).Create(headlessService) + _, err := c.CoreV1().Services(ns).Create(context.TODO(), headlessService) framework.ExpectNoError(err) }) @@ -123,7 +123,7 @@ var _ = SIGDescribe("StatefulSet", func() { *(ss.Spec.Replicas) = 3 e2esset.PauseNewPods(ss) - _, err := c.AppsV1().StatefulSets(ns).Create(ss) + _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) framework.ExpectNoError(err) ginkgo.By("Saturating stateful set " + ss.Name) @@ -165,7 +165,7 @@ var _ = SIGDescribe("StatefulSet", func() { // Replace ss with the one returned from Create() so it has the UID. // Save Kind since it won't be populated in the returned ss. kind := ss.Kind - ss, err := c.AppsV1().StatefulSets(ns).Create(ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) framework.ExpectNoError(err) ss.Kind = kind @@ -247,7 +247,7 @@ var _ = SIGDescribe("StatefulSet", func() { *(ss.Spec.Replicas) = 2 e2esset.PauseNewPods(ss) - _, err := c.AppsV1().StatefulSets(ns).Create(ss) + _, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) framework.ExpectNoError(err) e2esset.WaitForRunning(c, 1, 0, ss) @@ -314,7 +314,7 @@ var _ = SIGDescribe("StatefulSet", func() { }()} }(), } - ss, err := c.AppsV1().StatefulSets(ns).Create(ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) @@ -499,7 +499,7 @@ var _ = SIGDescribe("StatefulSet", func() { ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ Type: appsv1.OnDeleteStatefulSetStrategyType, } - ss, err := c.AppsV1().StatefulSets(ns).Create(ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) @@ -573,7 +573,7 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow]", func() { psLabels := klabels.Set(labels) ginkgo.By("Initializing watcher for selector " + psLabels.String()) - watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{ + watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), metav1.ListOptions{ LabelSelector: psLabels.AsSelector().String(), }) framework.ExpectNoError(err) @@ -581,7 +581,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns) ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) setHTTPProbe(ss) - ss, err = c.AppsV1().StatefulSets(ns).Create(ss) + ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) framework.ExpectNoError(err) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) @@ -616,7 +616,7 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ExpectNoError(err) ginkgo.By("Scale down will halt with unhealthy stateful pod") - watcher, err = f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{ + watcher, err = f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), metav1.ListOptions{ LabelSelector: psLabels.AsSelector().String(), }) framework.ExpectNoError(err) @@ -661,7 +661,7 @@ var _ = SIGDescribe("StatefulSet", func() { ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) ss.Spec.PodManagementPolicy = appsv1.ParallelPodManagement setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) framework.ExpectNoError(err) ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) @@ -720,7 +720,7 @@ var _ = SIGDescribe("StatefulSet", func() { NodeName: node.Name, }, } - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name) @@ -728,7 +728,7 @@ var _ = SIGDescribe("StatefulSet", func() { statefulPodContainer := &ss.Spec.Template.Spec.Containers[0] statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort) ss.Spec.Template.Spec.NodeName = node.Name - _, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(ss) + _, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(context.TODO(), ss) framework.ExpectNoError(err) ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) @@ -738,7 +738,7 @@ var _ = SIGDescribe("StatefulSet", func() { var initialStatefulPodUID types.UID ginkgo.By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name) - w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName})) + w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName})) framework.ExpectNoError(err) ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), statefulPodTimeout) defer cancel() @@ -763,13 +763,13 @@ var _ = SIGDescribe("StatefulSet", func() { } ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name) - err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state") // we may catch delete event, that's why we are waiting for running phase like this, and not with watchtools.UntilWithoutRetry gomega.Eventually(func() error { - statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{}) + statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), statefulPodName, metav1.GetOptions{}) if err != nil { return err } @@ -793,13 +793,13 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels) setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) ginkgo.By("getting scale subresource") - scale, err := c.AppsV1().StatefulSets(ns).GetScale(ssName, metav1.GetOptions{}) + scale, err := c.AppsV1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get scale subresource: %v", err) } @@ -809,14 +809,14 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("updating a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = 2 - scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(ssName, scale) + scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(context.TODO(), ssName, scale) if err != nil { framework.Failf("Failed to put scale subresource: %v", err) } framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2)) ginkgo.By("verifying the statefulset Spec.Replicas was modified") - ss, err = c.AppsV1().StatefulSets(ns).Get(ssName, metav1.GetOptions{}) + ss, err = c.AppsV1().StatefulSets(ns).Get(context.TODO(), ssName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get statefulset resource: %v", err) } @@ -1086,7 +1086,7 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k // PVCs and one using no storage. func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { setHTTPProbe(ss) - ss, err := c.AppsV1().StatefulSets(ns).Create(ss) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) ss = waitForStatus(c, ss) @@ -1268,7 +1268,7 @@ func restorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error { func deleteStatefulPodAtIndex(c clientset.Interface, index int, ss *appsv1.StatefulSet) { name := getStatefulSetPodNameAtIndex(index, ss) noGrace := int64(0) - if err := c.CoreV1().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { + if err := c.CoreV1().Pods(ss.Namespace).Delete(context.TODO(), name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { framework.Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err) } } @@ -1287,12 +1287,12 @@ func updateStatefulSetWithRetries(c clientset.Interface, namespace, name string, statefulSets := c.AppsV1().StatefulSets(namespace) var updateErr error pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - if statefulSet, err = statefulSets.Get(name, metav1.GetOptions{}); err != nil { + if statefulSet, err = statefulSets.Get(context.TODO(), name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(statefulSet) - if statefulSet, err = statefulSets.Update(statefulSet); err == nil { + if statefulSet, err = statefulSets.Update(context.TODO(), statefulSet); err == nil { framework.Logf("Updating stateful set %s", name) return true, nil } @@ -1307,7 +1307,7 @@ func updateStatefulSetWithRetries(c clientset.Interface, namespace, name string, // getStatefulSet gets the StatefulSet named name in namespace. func getStatefulSet(c clientset.Interface, namespace, name string) *appsv1.StatefulSet { - ss, err := c.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{}) + ss, err := c.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err) } diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index 7a7d450aa15..796c8b906f1 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -84,19 +84,19 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { f.PodClient().CreateSync(pod) - _, err := f.PodClient().Get(pod.Name, metav1.GetOptions{}) + _, err := f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get audit-pod") - podChan, err := f.PodClient().Watch(watchOptions) + podChan, err := f.PodClient().Watch(context.TODO(), watchOptions) framework.ExpectNoError(err, "failed to create watch for pods") podChan.Stop() f.PodClient().Update(pod.Name, updatePod) - _, err = f.PodClient().List(metav1.ListOptions{}) + _, err = f.PodClient().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list pods") - _, err = f.PodClient().Patch(pod.Name, types.JSONPatchType, patch) + _, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch) framework.ExpectNoError(err, "failed to patch pod") f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) @@ -206,26 +206,26 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { podLabels := map[string]string{"name": "audit-deployment-pod"} d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "agnhost", imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RecreateDeploymentStrategyType) - _, err := f.ClientSet.AppsV1().Deployments(namespace).Create(d) + _, err := f.ClientSet.AppsV1().Deployments(namespace).Create(context.TODO(), d) framework.ExpectNoError(err, "failed to create audit-deployment") - _, err = f.ClientSet.AppsV1().Deployments(namespace).Get(d.Name, metav1.GetOptions{}) + _, err = f.ClientSet.AppsV1().Deployments(namespace).Get(context.TODO(), d.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get audit-deployment") - deploymentChan, err := f.ClientSet.AppsV1().Deployments(namespace).Watch(watchOptions) + deploymentChan, err := f.ClientSet.AppsV1().Deployments(namespace).Watch(context.TODO(), watchOptions) framework.ExpectNoError(err, "failed to create watch for deployments") deploymentChan.Stop() - _, err = f.ClientSet.AppsV1().Deployments(namespace).Update(d) + _, err = f.ClientSet.AppsV1().Deployments(namespace).Update(context.TODO(), d) framework.ExpectNoError(err, "failed to update audit-deployment") - _, err = f.ClientSet.AppsV1().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch) + _, err = f.ClientSet.AppsV1().Deployments(namespace).Patch(context.TODO(), d.Name, types.JSONPatchType, patch) framework.ExpectNoError(err, "failed to patch deployment") - _, err = f.ClientSet.AppsV1().Deployments(namespace).List(metav1.ListOptions{}) + _, err = f.ClientSet.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to create list deployments") - err = f.ClientSet.AppsV1().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{}) + err = f.ClientSet.AppsV1().Deployments(namespace).Delete(context.TODO(), "audit-deployment", &metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete deployments") expectEvents(f, []utils.AuditEvent{ @@ -339,26 +339,26 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { }, } - _, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Create(configMap) + _, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap) framework.ExpectNoError(err, "failed to create audit-configmap") - _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Get(configMap.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMap.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get audit-configmap") - configMapChan, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Watch(watchOptions) + configMapChan, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), watchOptions) framework.ExpectNoError(err, "failed to create watch for config maps") configMapChan.Stop() - _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Update(configMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap) framework.ExpectNoError(err, "failed to update audit-configmap") - _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Patch(configMap.Name, types.JSONPatchType, patch) + _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Patch(context.TODO(), configMap.Name, types.JSONPatchType, patch) framework.ExpectNoError(err, "failed to patch configmap") - _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).List(metav1.ListOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list config maps") - err = f.ClientSet.CoreV1().ConfigMaps(namespace).Delete(configMap.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configMap.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete audit-configmap") expectEvents(f, []utils.AuditEvent{ @@ -471,26 +471,26 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { "top-secret": []byte("foo-bar"), }, } - _, err := f.ClientSet.CoreV1().Secrets(namespace).Create(secret) + _, err := f.ClientSet.CoreV1().Secrets(namespace).Create(context.TODO(), secret) framework.ExpectNoError(err, "failed to create audit-secret") - _, err = f.ClientSet.CoreV1().Secrets(namespace).Get(secret.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(namespace).Get(context.TODO(), secret.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get audit-secret") - secretChan, err := f.ClientSet.CoreV1().Secrets(namespace).Watch(watchOptions) + secretChan, err := f.ClientSet.CoreV1().Secrets(namespace).Watch(context.TODO(), watchOptions) framework.ExpectNoError(err, "failed to create watch for secrets") secretChan.Stop() - _, err = f.ClientSet.CoreV1().Secrets(namespace).Update(secret) + _, err = f.ClientSet.CoreV1().Secrets(namespace).Update(context.TODO(), secret) framework.ExpectNoError(err, "failed to update audit-secret") - _, err = f.ClientSet.CoreV1().Secrets(namespace).Patch(secret.Name, types.JSONPatchType, patch) + _, err = f.ClientSet.CoreV1().Secrets(namespace).Patch(context.TODO(), secret.Name, types.JSONPatchType, patch) framework.ExpectNoError(err, "failed to patch secret") - _, err = f.ClientSet.CoreV1().Secrets(namespace).List(metav1.ListOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list secrets") - err = f.ClientSet.CoreV1().Secrets(namespace).Delete(secret.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(namespace).Delete(context.TODO(), secret.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete audit-secret") expectEvents(f, []utils.AuditEvent{ @@ -670,7 +670,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { anonymousClient, err := clientset.NewForConfig(config) framework.ExpectNoError(err) - _, err = anonymousClient.CoreV1().Pods(namespace).Get("another-audit-pod", metav1.GetOptions{}) + _, err = anonymousClient.CoreV1().Pods(namespace).Get(context.TODO(), "another-audit-pod", metav1.GetOptions{}) expectForbidden(err) expectEvents(f, []utils.AuditEvent{ @@ -703,7 +703,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() { impersonatedClient, err := clientset.NewForConfig(config) framework.ExpectNoError(err) - _, err = impersonatedClient.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + _, err = impersonatedClient.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list pods") expectEvents(f, []utils.AuditEvent{ diff --git a/test/e2e/auth/audit_dynamic.go b/test/e2e/auth/audit_dynamic.go index 1984d070c92..1a069064079 100644 --- a/test/e2e/auth/audit_dynamic.go +++ b/test/e2e/auth/audit_dynamic.go @@ -17,6 +17,7 @@ limitations under the License. package auth import ( + "context" "fmt" "strings" "time" @@ -58,14 +59,14 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { anonymousClient, err := clientset.NewForConfig(config) framework.ExpectNoError(err, "failed to create the anonymous client") - _, err = f.ClientSet.CoreV1().Namespaces().Create(&v1.Namespace{ + _, err = f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "audit", }, }) framework.ExpectNoError(err, "failed to create namespace") - _, err = f.ClientSet.CoreV1().Pods(namespace).Create(&v1.Pod{ + _, err = f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "audit-proxy", Labels: map[string]string{ @@ -89,7 +90,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { }) framework.ExpectNoError(err, "failed to create proxy pod") - _, err = f.ClientSet.CoreV1().Services(namespace).Create(&v1.Service{ + _, err = f.ClientSet.CoreV1().Services(namespace).Create(context.TODO(), &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "audit", }, @@ -110,7 +111,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { var podIP string // get pod ip err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) { - p, err := f.ClientSet.CoreV1().Pods(namespace).Get("audit-proxy", metav1.GetOptions{}) + p, err := f.ClientSet.CoreV1().Pods(namespace).Get(context.TODO(), "audit-proxy", metav1.GetOptions{}) if apierrors.IsNotFound(err) { framework.Logf("waiting for audit-proxy pod to be present") return false, nil @@ -150,7 +151,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { }, } - _, err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Create(&sink) + _, err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Create(context.TODO(), &sink) framework.ExpectNoError(err, "failed to create audit sink") framework.Logf("created audit sink") @@ -194,20 +195,20 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { f.PodClient().CreateSync(pod) - _, err := f.PodClient().Get(pod.Name, metav1.GetOptions{}) + _, err := f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get audit-pod") - podChan, err := f.PodClient().Watch(watchOptions) + podChan, err := f.PodClient().Watch(context.TODO(), watchOptions) framework.ExpectNoError(err, "failed to create watch for pods") for range podChan.ResultChan() { } f.PodClient().Update(pod.Name, updatePod) - _, err = f.PodClient().List(metav1.ListOptions{}) + _, err = f.PodClient().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list pods") - _, err = f.PodClient().Patch(pod.Name, types.JSONPatchType, patch) + _, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch) framework.ExpectNoError(err, "failed to patch pod") f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) @@ -323,7 +324,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { // get a pod with unauthorized user { func() { - _, err := anonymousClient.CoreV1().Pods(namespace).Get("another-audit-pod", metav1.GetOptions{}) + _, err := anonymousClient.CoreV1().Pods(namespace).Get(context.TODO(), "another-audit-pod", metav1.GetOptions{}) expectForbidden(err) }, []utils.AuditEvent{ @@ -375,7 +376,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { return len(missingReport.MissingEvents) == 0, nil }) framework.ExpectNoError(err, "after %v failed to observe audit events", pollingTimeout) - err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Delete("test", &metav1.DeleteOptions{}) + err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Delete(context.TODO(), "test", &metav1.DeleteOptions{}) framework.ExpectNoError(err, "could not delete audit configuration") }) }) diff --git a/test/e2e/auth/certificates.go b/test/e2e/auth/certificates.go index 6f7cd211b60..94c03d75eee 100644 --- a/test/e2e/auth/certificates.go +++ b/test/e2e/auth/certificates.go @@ -17,6 +17,7 @@ limitations under the License. package auth import ( + "context" "crypto/x509" "crypto/x509/pkix" "encoding/pem" @@ -67,7 +68,7 @@ var _ = SIGDescribe("Certificates API", func() { csrs := f.ClientSet.CertificatesV1beta1().CertificateSigningRequests() framework.Logf("creating CSR") - csr, err = csrs.Create(csr) + csr, err = csrs.Create(context.TODO(), csr) framework.ExpectNoError(err) csrName := csr.Name @@ -83,7 +84,7 @@ var _ = SIGDescribe("Certificates API", func() { } csr, err = csrs.UpdateApproval(csr) if err != nil { - csr, _ = csrs.Get(csrName, metav1.GetOptions{}) + csr, _ = csrs.Get(context.TODO(), csrName, metav1.GetOptions{}) framework.Logf("err updating approval: %v", err) return false, nil } @@ -92,7 +93,7 @@ var _ = SIGDescribe("Certificates API", func() { framework.Logf("waiting for CSR to be signed") framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) { - csr, err = csrs.Get(csrName, metav1.GetOptions{}) + csr, err = csrs.Get(context.TODO(), csrName, metav1.GetOptions{}) if err != nil { framework.Logf("error getting csr: %v", err) return false, nil @@ -118,6 +119,6 @@ var _ = SIGDescribe("Certificates API", func() { newClient, err := v1beta1client.NewForConfig(rcfg) framework.ExpectNoError(err) - framework.ExpectNoError(newClient.CertificateSigningRequests().Delete(csrName, nil)) + framework.ExpectNoError(newClient.CertificateSigningRequests().Delete(context.TODO(), csrName, nil)) }) }) diff --git a/test/e2e/auth/node_authn.go b/test/e2e/auth/node_authn.go index 0cbad0d7366..47b88153bc9 100644 --- a/test/e2e/auth/node_authn.go +++ b/test/e2e/auth/node_authn.go @@ -17,6 +17,7 @@ limitations under the License. package auth import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -38,7 +39,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { ginkgo.BeforeEach(func() { ns = f.Namespace.Name - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns) framework.ExpectNotEqual(len(nodeList.Items), 0) @@ -49,7 +50,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { // make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works saName := "default" - sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(context.TODO(), saName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to retrieve service account (%s:%s)", ns, saName) framework.ExpectNotEqual(len(sa.Secrets), 0) @@ -74,7 +75,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { }, AutomountServiceAccountToken: &trueValue, } - _, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(newSA) + _, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(context.TODO(), newSA) framework.ExpectNoError(err, "failed to create service account (%s:%s)", ns, newSA.Name) pod := createNodeAuthTestPod(f) diff --git a/test/e2e/auth/node_authz.go b/test/e2e/auth/node_authz.go index c3351e65d82..a20e4eb2940 100644 --- a/test/e2e/auth/node_authz.go +++ b/test/e2e/auth/node_authz.go @@ -17,6 +17,7 @@ limitations under the License. package auth import ( + "context" "fmt" "time" @@ -49,13 +50,13 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { ginkgo.BeforeEach(func() { ns = f.Namespace.Name - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns) framework.ExpectNotEqual(len(nodeList.Items), 0) nodeName = nodeList.Items[0].Name asUser = nodeNamePrefix + nodeName saName := "default" - sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(context.TODO(), saName, metav1.GetOptions{}) framework.ExpectNotEqual(len(sa.Secrets), 0) framework.ExpectNoError(err, "failed to retrieve service account (%s:%s)", ns, saName) defaultSaSecret = sa.Secrets[0].Name @@ -71,17 +72,17 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { }) ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() { - _, err := c.CoreV1().Secrets(ns).Get("foo", metav1.GetOptions{}) + _, err := c.CoreV1().Secrets(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) framework.ExpectEqual(apierrors.IsForbidden(err), true) }) ginkgo.It("Getting an existing secret should exit with the Forbidden error", func() { - _, err := c.CoreV1().Secrets(ns).Get(defaultSaSecret, metav1.GetOptions{}) + _, err := c.CoreV1().Secrets(ns).Get(context.TODO(), defaultSaSecret, metav1.GetOptions{}) framework.ExpectEqual(apierrors.IsForbidden(err), true) }) ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() { - _, err := c.CoreV1().ConfigMaps(ns).Get("foo", metav1.GetOptions{}) + _, err := c.CoreV1().ConfigMaps(ns).Get(context.TODO(), "foo", metav1.GetOptions{}) framework.ExpectEqual(apierrors.IsForbidden(err), true) }) @@ -96,9 +97,9 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { "data": "content", }, } - _, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(configmap) + _, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(context.TODO(), configmap) framework.ExpectNoError(err, "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap) - _, err = c.CoreV1().ConfigMaps(ns).Get(configmap.Name, metav1.GetOptions{}) + _, err = c.CoreV1().ConfigMaps(ns).Get(context.TODO(), configmap.Name, metav1.GetOptions{}) framework.ExpectEqual(apierrors.IsForbidden(err), true) }) @@ -113,11 +114,11 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { "data": []byte("keep it secret"), }, } - _, err := f.ClientSet.CoreV1().Secrets(ns).Create(secret) + _, err := f.ClientSet.CoreV1().Secrets(ns).Create(context.TODO(), secret) framework.ExpectNoError(err, "failed to create secret (%s:%s)", ns, secret.Name) ginkgo.By("Node should not get the secret") - _, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{}) + _, err = c.CoreV1().Secrets(ns).Get(context.TODO(), secret.Name, metav1.GetOptions{}) framework.ExpectEqual(apierrors.IsForbidden(err), true) ginkgo.By("Create a pod that use the secret") @@ -146,14 +147,14 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { }, } - _, err = f.ClientSet.CoreV1().Pods(ns).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err, "failed to create pod (%s:%s)", ns, pod.Name) ginkgo.By("The node should able to access the secret") itv := framework.Poll dur := 1 * time.Minute err = wait.Poll(itv, dur, func() (bool, error) { - _, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{}) + _, err = c.CoreV1().Secrets(ns).Get(context.TODO(), secret.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Failed to get secret %v, err: %v", secret.Name, err) return false, nil @@ -172,13 +173,13 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { }, } ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) - _, err := c.CoreV1().Nodes().Create(node) + _, err := c.CoreV1().Nodes().Create(context.TODO(), node) framework.ExpectEqual(apierrors.IsForbidden(err), true) }) ginkgo.It("A node shouldn't be able to delete another node", func() { ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser)) - err := c.CoreV1().Nodes().Delete("foo", &metav1.DeleteOptions{}) + err := c.CoreV1().Nodes().Delete(context.TODO(), "foo", &metav1.DeleteOptions{}) framework.ExpectEqual(apierrors.IsForbidden(err), true) }) }) diff --git a/test/e2e/auth/pod_security_policy.go b/test/e2e/auth/pod_security_policy.go index 21a28fe7a98..0033b0ac8e5 100644 --- a/test/e2e/auth/pod_security_policy.go +++ b/test/e2e/auth/pod_security_policy.go @@ -17,6 +17,7 @@ limitations under the License. package auth import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -78,7 +79,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { ginkgo.It("should forbid pod creation when no PSP is available", func() { ginkgo.By("Running a restricted pod") - _, err := c.CoreV1().Pods(ns).Create(restrictedPod("restricted")) + _, err := c.CoreV1().Pods(ns).Create(context.TODO(), restrictedPod("restricted")) expectForbidden(err) }) @@ -88,12 +89,12 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { defer cleanup() ginkgo.By("Running a restricted pod") - pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed")) + pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), restrictedPod("allowed")) framework.ExpectNoError(err) framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace)) testPrivilegedPods(func(pod *v1.Pod) { - _, err := c.CoreV1().Pods(ns).Create(pod) + _, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) expectForbidden(err) }) }) @@ -107,12 +108,12 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { defer cleanup() testPrivilegedPods(func(pod *v1.Pod) { - p, err := c.CoreV1().Pods(ns).Create(pod) + p, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err) framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace)) // Verify expected PSP was used. - p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{}) + p, err = c.CoreV1().Pods(ns).Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) validated, found := p.Annotations[psputil.ValidatedPSPAnnotation] framework.ExpectEqual(found, true, "PSP annotation not found") @@ -214,11 +215,11 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecu ns := f.Namespace.Name name := fmt.Sprintf("%s-%s", ns, psp.Name) psp.Name = name - psp, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Create(psp) + psp, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp) framework.ExpectNoError(err, "Failed to create PSP") // Create the Role to bind it to the namespace. - _, err = f.ClientSet.RbacV1().Roles(ns).Create(&rbacv1.Role{ + _, err = f.ClientSet.RbacV1().Roles(ns).Create(context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -245,7 +246,7 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecu return psp, func() { // Cleanup non-namespaced PSP object. - f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Delete(name, &metav1.DeleteOptions{}) + f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Delete(context.TODO(), name, &metav1.DeleteOptions{}) } } diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 5cd6de4514c..4c4170be1f6 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -17,6 +17,7 @@ limitations under the License. package auth import ( + "context" "fmt" "path" "regexp" @@ -49,7 +50,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { var secrets []v1.ObjectReference framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) { ginkgo.By("waiting for a single token reference") - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{}) if apierrors.IsNotFound(err) { framework.Logf("default service account was not found") return false, nil @@ -75,19 +76,19 @@ var _ = SIGDescribe("ServiceAccounts", func() { { ginkgo.By("ensuring the single token reference persists") time.Sleep(2 * time.Second) - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(sa.Secrets, secrets) } // delete the referenced secret ginkgo.By("deleting the service account token") - framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secrets[0].Name, nil)) + framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secrets[0].Name, nil)) // wait for the referenced secret to be removed, and another one autocreated framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { ginkgo.By("waiting for a new token reference") - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{}) if err != nil { framework.Logf("error getting default service account: %v", err) return false, err @@ -113,7 +114,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { { ginkgo.By("ensuring the single token reference persists") time.Sleep(2 * time.Second) - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(sa.Secrets, secrets) } @@ -121,17 +122,17 @@ var _ = SIGDescribe("ServiceAccounts", func() { // delete the reference from the service account ginkgo.By("deleting the reference to the service account token") { - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{}) framework.ExpectNoError(err) sa.Secrets = nil - _, updateErr := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Update(sa) + _, updateErr := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Update(context.TODO(), sa) framework.ExpectNoError(updateErr) } // wait for another one to be autocreated framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { ginkgo.By("waiting for a new token to be created and added") - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{}) if err != nil { framework.Logf("error getting default service account: %v", err) return false, err @@ -153,7 +154,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { { ginkgo.By("ensuring the single token reference persists") time.Sleep(2 * time.Second) - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(sa.Secrets, secrets) } @@ -171,13 +172,13 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.ConformanceIt("should mount an API token into pods ", func() { var rootCAContent string - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}) framework.ExpectNoError(err) // Standard get, update retry loop framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { ginkgo.By("getting the auto-created API token") - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("mount-test", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "mount-test", metav1.GetOptions{}) if apierrors.IsNotFound(err) { framework.Logf("mount-test service account was not found") return false, nil @@ -191,7 +192,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { return false, nil } for _, secretRef := range sa.Secrets { - secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{}) + secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretRef.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Error getting secret %s: %v", secretRef.Name, err) continue @@ -207,7 +208,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { })) zero := int64(0) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{ + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-service-account-" + string(uuid.NewUUID()), }, @@ -216,10 +217,10 @@ var _ = SIGDescribe("ServiceAccounts", func() { Containers: []v1.Container{{ Name: "test", Image: imageutils.GetE2EImage(imageutils.BusyBox), - Command: []string{"sleep", "100000"}, // run and pause + Command: []string{"sleep", "100000"}, }}, - TerminationGracePeriodSeconds: &zero, // terminate quickly when deleted - RestartPolicy: v1.RestartPolicyNever, // never restart + TerminationGracePeriodSeconds: &zero, + RestartPolicy: v1.RestartPolicyNever, }, }) framework.ExpectNoError(err) @@ -238,7 +239,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.ExpectEqual(mountedNamespace, f.Namespace.Name) // Token should be a valid credential that identifies the pod's service account tokenReview := &authenticationv1.TokenReview{Spec: authenticationv1.TokenReviewSpec{Token: mountedToken}} - tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(tokenReview) + tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(context.TODO(), tokenReview) framework.ExpectNoError(err) framework.ExpectEqual(tokenReview.Status.Authenticated, true) framework.ExpectEqual(tokenReview.Status.Error, "") @@ -281,15 +282,15 @@ var _ = SIGDescribe("ServiceAccounts", func() { falseValue := false mountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount"}, AutomountServiceAccountToken: &trueValue} nomountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "nomount"}, AutomountServiceAccountToken: &falseValue} - mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(mountSA) + mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), mountSA) framework.ExpectNoError(err) - nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(nomountSA) + nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), nomountSA) framework.ExpectNoError(err) // Standard get, update retry loop framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { ginkgo.By("getting the auto-created API token") - sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), mountSA.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { framework.Logf("mount service account was not found") return false, nil @@ -303,7 +304,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { return false, nil } for _, secretRef := range sa.Secrets { - secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{}) + secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretRef.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Error getting secret %s: %v", secretRef.Name, err) continue @@ -393,7 +394,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { AutomountServiceAccountToken: tc.AutomountPodSpec, }, } - createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) framework.Logf("created pod %s", tc.PodName) @@ -418,7 +419,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { cfg, err := framework.LoadConfig() framework.ExpectNoError(err) - if _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{ + if _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "kube-root-ca.crt", }, @@ -488,7 +489,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { }}, }, } - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) framework.Logf("created pod") diff --git a/test/e2e/autoscaling/autoscaling_timer.go b/test/e2e/autoscaling/autoscaling_timer.go index 2c665824c08..2bb9a879823 100644 --- a/test/e2e/autoscaling/autoscaling_timer.go +++ b/test/e2e/autoscaling/autoscaling_timer.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "strings" "time" @@ -36,7 +37,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling" SIGDescribe("Autoscaling a service", func() { ginkgo.BeforeEach(func() { // Check if Cloud Autoscaler is enabled by trying to get its ConfigMap. - _, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{}) if err != nil { e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled") } diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index c94eba7624f..2e6428a0a63 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "encoding/json" "fmt" "math" @@ -71,7 +72,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun e2eskipper.SkipUnlessProviderIs("gce", "gke", "kubemark") // Check if Cloud Autoscaler is enabled by trying to get its ConfigMap. - _, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{}) + _, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{}) if err != nil { e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled") } @@ -115,7 +116,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster")) setMigSizes(originalSizes) framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount, scaleDownTimeout)) - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) s := time.Now() makeSchedulableLoop: @@ -255,7 +256,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun // annotate all nodes with no-scale-down ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled" - nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String(), @@ -456,7 +457,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e return err } - _, err = f.ClientSet.CoreV1().Nodes().Patch(string(node.Name), types.StrategicMergePatchType, patchBytes) + _, err = f.ClientSet.CoreV1().Nodes().Patch(context.TODO(), string(node.Name), types.StrategicMergePatchType, patchBytes) if err != nil { return err } diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 038270d085b..2f0294a2470 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "io/ioutil" "math" @@ -147,7 +148,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { expectedNodes += size } framework.ExpectNoError(e2enode.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout)) - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) s := time.Now() @@ -178,7 +179,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { EventsLoop: for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) { ginkgo.By("Waiting for NotTriggerScaleUp event") - events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{}) + events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, e := range events.Items { @@ -621,7 +622,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.By(fmt.Sprintf("New nodes: %v\n", newNodesSet)) registeredNodes := sets.NewString() for nodeName := range newNodesSet { - node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err == nil && node != nil { registeredNodes.Insert(nodeName) } else { @@ -778,7 +779,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } ginkgo.By("Make remaining nodes unschedulable") - nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) framework.ExpectNoError(err) @@ -857,7 +858,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ngNodes, err := framework.GetGroupNodes(minMig) framework.ExpectNoError(err) framework.ExpectEqual(len(ngNodes) == 1, true) - node, err := f.ClientSet.CoreV1().Nodes().Get(ngNodes[0], metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), ngNodes[0], metav1.GetOptions{}) ginkgo.By(fmt.Sprintf("Target node for scale-down: %s", node.Name)) framework.ExpectNoError(err) @@ -905,7 +906,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.By("Block network connectivity to some nodes to simulate unhealthy cluster") nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize)))) - nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) framework.ExpectNoError(err) @@ -1012,7 +1013,7 @@ func execCmd(args ...string) *exec.Cmd { func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace string, podsPerNode, pdbSize int, verifyFunction func(int)) { increasedSize := manuallyIncreaseClusterSize(f, migSizes) - nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) framework.ExpectNoError(err) @@ -1035,10 +1036,10 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str MinAvailable: &minAvailable, }, } - _, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb) + _, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb) defer func() { - f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{}) + f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(context.TODO(), pdb.Name, &metav1.DeleteOptions{}) }() framework.ExpectNoError(err) @@ -1346,7 +1347,7 @@ func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, time // WaitForClusterSizeFuncWithUnready waits until the cluster size matches the given function and assumes some unready nodes. func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration, expectedUnready int) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -1373,7 +1374,7 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface, tolerateUnreadyCount int) error { var notready []string for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) { - pods, err := c.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { return fmt.Errorf("failed to get pods: %v", err) } @@ -1413,7 +1414,7 @@ func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interf } func getAnyNode(c clientset.Interface) *v1.Node { - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -1448,10 +1449,10 @@ func drainNode(f *framework.Framework, node *v1.Node) { ginkgo.By("Manually drain the single node") podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} - pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) + pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts) framework.ExpectNoError(err) for _, pod := range pods.Items { - err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) } } @@ -1459,7 +1460,7 @@ func drainNode(f *framework.Framework, node *v1.Node) { func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error { ginkgo.By(fmt.Sprintf("Taint node %s", node.Name)) for j := 0; j < 3; j++ { - freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + freshNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) if err != nil { return err } @@ -1473,7 +1474,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error { Value: "DisabledForTest", Effect: v1.TaintEffectNoSchedule, }) - _, err = c.CoreV1().Nodes().Update(freshNode) + _, err = c.CoreV1().Nodes().Update(context.TODO(), freshNode) if err == nil { return nil } @@ -1496,7 +1497,7 @@ func (CriticalAddonsOnlyError) Error() string { func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAddonsOnly bool) error { ginkgo.By(fmt.Sprintf("Remove taint from node %s", node.Name)) for j := 0; j < 3; j++ { - freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + freshNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) if err != nil { return err } @@ -1514,7 +1515,7 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd return nil } freshNode.Spec.Taints = newTaints - _, err = c.CoreV1().Nodes().Update(freshNode) + _, err = c.CoreV1().Nodes().Update(context.TODO(), freshNode) if err == nil { return nil } @@ -1571,7 +1572,7 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id if err != nil { return err } - _, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{}) if err != nil { return err } @@ -1594,7 +1595,7 @@ func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods in if err != nil { return err } - _, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{}) if err != nil { return err } @@ -1675,7 +1676,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa if err != nil { return err } - rc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{}) if err != nil { return err } @@ -1689,7 +1690,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa // (we retry 409 errors in case rc reference got out of sync) for j := 0; j < 3; j++ { *rc.Spec.Replicas = int32((i + 1) * podsPerNode) - rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(rc) + rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc) if err == nil { break } @@ -1697,14 +1698,14 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa return err } klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j) - rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) + rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{}) if err != nil { return err } } err = wait.PollImmediate(5*time.Second, podTimeout, func() (bool, error) { - rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) + rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{}) if err != nil || rc.Status.ReadyReplicas < int32((i+1)*podsPerNode) { return false, nil } @@ -1751,7 +1752,7 @@ func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[strin // Try to get clusterwide health from CA status configmap. // Status configmap is not parsing-friendly, so evil regexpery follows. func getClusterwideStatus(c clientset.Interface) (string, error) { - configMap, err := c.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{}) + configMap, err := c.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{}) if err != nil { return "", err } @@ -1800,7 +1801,7 @@ func getStatusTimestamp(status string) (time.Time, error) { // Try to get scaleup statuses of all node groups. // Status configmap is not parsing-friendly, so evil regexpery follows. func getScaleUpStatus(c clientset.Interface) (*scaleUpStatus, error) { - configMap, err := c.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{}) + configMap, err := c.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{}) if err != nil { return nil, err } @@ -1879,7 +1880,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) { var finalErr error for _, newPdbName := range newPdbs { ginkgo.By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName)) - err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{}) + err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(context.TODO(), newPdbName, &metav1.DeleteOptions{}) if err != nil { // log error, but attempt to remove other pdbs klog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err) @@ -1917,7 +1918,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) { MinAvailable: &minAvailable, }, } - _, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(pdb) + _, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(context.TODO(), pdb) newPdbs = append(newPdbs, pdbName) if err != nil { @@ -1933,7 +1934,7 @@ func createPriorityClasses(f *framework.Framework) func() { highPriorityClassName: 1000, } for className, priority := range priorityClasses { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority}) if err != nil { klog.Errorf("Error creating priority class: %v", err) } @@ -1942,7 +1943,7 @@ func createPriorityClasses(f *framework.Framework) func() { return func() { for className := range priorityClasses { - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(className, nil) + err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), className, nil) if err != nil { klog.Errorf("Error deleting priority class: %v", err) } diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index 1c8faea60b4..b5f72849fd7 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -276,24 +276,24 @@ func (tc *CustomMetricTestCase) Run() { waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas) // Autoscale the deployment - _, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(tc.hpa) + _, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(context.TODO(), tc.hpa) if err != nil { framework.Failf("Failed to create HPA: %v", err) } - defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{}) + defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(context.TODO(), tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{}) waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas) } func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) error { if deployment != nil { - _, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(deployment) + _, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(context.TODO(), deployment) if err != nil { return err } } if pod != nil { - _, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(pod) + _, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(context.TODO(), pod) if err != nil { return err } @@ -303,10 +303,10 @@ func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, dep func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) { if deployment != nil { - _ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(deployment.ObjectMeta.Name, &metav1.DeleteOptions{}) + _ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), deployment.ObjectMeta.Name, &metav1.DeleteOptions{}) } if pod != nil { - _ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(pod.ObjectMeta.Name, &metav1.DeleteOptions{}) + _ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), pod.ObjectMeta.Name, &metav1.DeleteOptions{}) } } @@ -440,7 +440,7 @@ func externalHPA(namespace string, metricTargets map[string]externalMetricTarget func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) { interval := 20 * time.Second err := wait.PollImmediate(interval, timeout, func() (bool, error) { - deployment, err := cs.AppsV1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) + deployment, err := cs.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get replication controller %s: %v", deployment, err) } diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index 3c50383b46a..7d4d51a41ba 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -17,6 +17,7 @@ limitations under the License. package autoscaling import ( + "context" "fmt" "math" "strings" @@ -265,7 +266,7 @@ func getScheduableCores(nodes []v1.Node) int64 { } func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) { - cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{}) + cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), DNSAutoscalerLabelName, metav1.GetOptions{}) if err != nil { return nil, err } @@ -273,7 +274,7 @@ func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) { } func deleteDNSScalingConfigMap(c clientset.Interface) error { - if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil { + if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), DNSAutoscalerLabelName, nil); err != nil { return err } framework.Logf("DNS autoscaling ConfigMap deleted.") @@ -299,7 +300,7 @@ func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap { } func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error { - _, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(configMap) + _, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(context.TODO(), configMap) if err != nil { return err } @@ -310,7 +311,7 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e func getDNSReplicas(c clientset.Interface) (int, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - deployments, err := c.AppsV1().Deployments(metav1.NamespaceSystem).List(listOpts) + deployments, err := c.AppsV1().Deployments(metav1.NamespaceSystem).List(context.TODO(), listOpts) if err != nil { return 0, err } @@ -325,7 +326,7 @@ func getDNSReplicas(c clientset.Interface) (int, error) { func deleteDNSAutoscalerPod(c clientset.Interface) error { label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts) + pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts) if err != nil { return err } @@ -334,7 +335,7 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error { } podName := pods.Items[0].Name - if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil { + if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), podName, nil); err != nil { return err } framework.Logf("DNS autoscaling pod %v deleted.", podName) diff --git a/test/e2e/cloud/gcp/addon_update.go b/test/e2e/cloud/gcp/addon_update.go index 31edd3290e7..62b4554a3b7 100644 --- a/test/e2e/cloud/gcp/addon_update.go +++ b/test/e2e/cloud/gcp/addon_update.go @@ -18,6 +18,7 @@ package gcp import ( "bytes" + "context" "fmt" "io" "os" @@ -301,7 +302,7 @@ var _ = SIGDescribe("Addon update", func() { // Delete the "ensure exist class" addon at the end. defer func() { framework.Logf("Cleaning up ensure exist class addon.") - err := f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil) + err := f.ClientSet.CoreV1().Services(addonNsName).Delete(context.TODO(), "addon-ensure-exists-test", nil) framework.ExpectNoError(err) }() @@ -335,7 +336,7 @@ var _ = SIGDescribe("Addon update", func() { waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true) ginkgo.By("verify invalid addons weren't created") - _, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get("invalid-addon-test", metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get(context.TODO(), "invalid-addon-test", metav1.GetOptions{}) framework.ExpectError(err) // Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function. @@ -362,7 +363,7 @@ func waitForReplicationControllerwithSelectorInAddonTest(c clientset.Interface, // waitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false) func waitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) + _, err := c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { framework.Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err) return !exist, nil @@ -381,7 +382,7 @@ func waitForReplicationController(c clientset.Interface, namespace, name string, func waitForServiceWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { - services, err := c.CoreV1().Services(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + services, err := c.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) switch { case len(services.Items) != 0: framework.Logf("Service with %s in namespace %s found.", selector.String(), namespace) @@ -408,7 +409,7 @@ func waitForServiceWithSelector(c clientset.Interface, namespace string, selecto func waitForReplicationControllerWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { - rcs, err := c.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + rcs, err := c.CoreV1().ReplicationControllers(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) switch { case len(rcs.Items) != 0: framework.Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace) diff --git a/test/e2e/cloud/gcp/ha_master.go b/test/e2e/cloud/gcp/ha_master.go index 41f3884a75e..416063c019f 100644 --- a/test/e2e/cloud/gcp/ha_master.go +++ b/test/e2e/cloud/gcp/ha_master.go @@ -17,6 +17,7 @@ limitations under the License. package gcp import ( + "context" "fmt" "os/exec" "path" @@ -124,7 +125,7 @@ func generateMasterRegexp(prefix string) string { // waitForMasters waits until the cluster has the desired number of ready masters in it. func waitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Logf("Failed to list nodes: %v", err) continue diff --git a/test/e2e/cloud/gcp/node_lease.go b/test/e2e/cloud/gcp/node_lease.go index 7e8bd8e4927..6d7479e85c0 100644 --- a/test/e2e/cloud/gcp/node_lease.go +++ b/test/e2e/cloud/gcp/node_lease.go @@ -17,6 +17,7 @@ limitations under the License. package gcp import ( + "context" "fmt" "strings" "time" @@ -111,7 +112,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { gomega.Eventually(func() error { pass := true for _, node := range originalNodes.Items { - if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { + if _, err := leaseClient.Get(context.TODO(), node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err) pass = false } @@ -148,7 +149,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { } framework.ExpectNotEqual(deletedNodeName, "") gomega.Eventually(func() error { - if _, err := leaseClient.Get(deletedNodeName, metav1.GetOptions{}); err == nil { + if _, err := leaseClient.Get(context.TODO(), deletedNodeName, metav1.GetOptions{}); err == nil { return fmt.Errorf("node lease is not deleted yet for node %q", deletedNodeName) } return nil @@ -157,7 +158,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { ginkgo.By("verify node leases still exist for remaining nodes") gomega.Eventually(func() error { for _, node := range targetNodes.Items { - if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { + if _, err := leaseClient.Get(context.TODO(), node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { return err } } diff --git a/test/e2e/cloud/gcp/reboot.go b/test/e2e/cloud/gcp/reboot.go index 731b496dc83..931292655c3 100644 --- a/test/e2e/cloud/gcp/reboot.go +++ b/test/e2e/cloud/gcp/reboot.go @@ -17,6 +17,7 @@ limitations under the License. package gcp import ( + "context" "fmt" "strings" "sync" @@ -70,7 +71,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { // events for the kube-system namespace on failures namespaceName := metav1.NamespaceSystem ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName)) - events, err := f.ClientSet.CoreV1().Events(namespaceName).List(metav1.ListOptions{}) + events, err := f.ClientSet.CoreV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, e := range events.Items { @@ -232,7 +233,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { // Get the node initially. framework.Logf("Getting %s", name) - node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { framework.Logf("Couldn't get node %s", name) return false diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index 05c702b1375..a190a4b3ca2 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -17,6 +17,7 @@ limitations under the License. package gcp import ( + "context" "fmt" "strings" "time" @@ -33,12 +34,12 @@ import ( ) func resizeRC(c clientset.Interface, ns, name string, replicas int32) error { - rc, err := c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) + rc, err := c.CoreV1().ReplicationControllers(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return err } *(rc.Spec.Replicas) = replicas - _, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(rc) + _, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(context.TODO(), rc) return err } diff --git a/test/e2e/cloud/nodes.go b/test/e2e/cloud/nodes.go index 7e57e6670b7..09cfc0e9dc1 100644 --- a/test/e2e/cloud/nodes.go +++ b/test/e2e/cloud/nodes.go @@ -17,6 +17,7 @@ limitations under the License. package cloud import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -65,7 +66,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { framework.ExpectNoError(err) framework.ExpectEqual(len(newNodes), len(origNodes.Items)-1) - _, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{}) + _, err = c.CoreV1().Nodes().Get(context.TODO(), nodeToDelete.Name, metav1.GetOptions{}) if err == nil { framework.Failf("node %q still exists when it should be deleted", nodeToDelete.Name) } else if !apierrors.IsNotFound(err) { diff --git a/test/e2e/common/configmap.go b/test/e2e/common/configmap.go index 1b91c869f39..378f7b58f75 100644 --- a/test/e2e/common/configmap.go +++ b/test/e2e/common/configmap.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "encoding/json" "fmt" @@ -43,7 +44,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -91,7 +92,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { configMap := newEnvFromConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -140,17 +141,17 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { name := "configmap-test-" + string(uuid.NewUUID()) configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating ConfigMap %v/%v", f.Namespace.Name, configMap.Name)) - _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) framework.ExpectNoError(err, "failed to create ConfigMap") configMap.Data = map[string]string{ "data": "value", } ginkgo.By(fmt.Sprintf("Updating configMap %v/%v", f.Namespace.Name, configMap.Name)) - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap) framework.ExpectNoError(err, "failed to update ConfigMap") - configMapFromUpdate, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(name, metav1.GetOptions{}) + configMapFromUpdate, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get ConfigMap") ginkgo.By(fmt.Sprintf("Verifying update of ConfigMap %v/%v", f.Namespace.Name, configMap.Name)) framework.ExpectEqual(configMapFromUpdate.Data, configMap.Data) @@ -160,7 +161,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { testNamespaceName := f.Namespace.Name testConfigMapName := "test-configmap" + string(uuid.NewUUID()) - _, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Create(&v1.ConfigMap{ + _, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: testConfigMapName, Labels: map[string]string{ @@ -185,16 +186,16 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { }) framework.ExpectNoError(err, "failed to marshal patch data") - _, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload)) + _, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(context.TODO(), testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload)) framework.ExpectNoError(err, "failed to patch ConfigMap") - configMap, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Get(testConfigMapName, metav1.GetOptions{}) + configMap, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Get(context.TODO(), testConfigMapName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get ConfigMap") framework.ExpectEqual(configMap.Data["valueName"], "value1", "failed to patch ConfigMap") framework.ExpectEqual(configMap.Labels["test-configmap"], "patched", "failed to patch ConfigMap") // listing in all namespaces to hit the endpoint - configMapList, err := f.ClientSet.CoreV1().ConfigMaps("").List(metav1.ListOptions{ + configMapList, err := f.ClientSet.CoreV1().ConfigMaps("").List(context.TODO(), metav1.ListOptions{ LabelSelector: "test-configmap-static=true", }) framework.ExpectNoError(err, "failed to list ConfigMaps with LabelSelector") @@ -211,7 +212,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { } framework.ExpectEqual(testConfigMapFound, true, "failed to find ConfigMap in list") - err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{ + err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: "test-configmap-static=true", }) framework.ExpectNoError(err, "failed to delete ConfigMap collection with LabelSelector") @@ -245,5 +246,5 @@ func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) { } ginkgo.By(fmt.Sprintf("Creating configMap that has name %s", configMap.Name)) - return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap) + return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) } diff --git a/test/e2e/common/configmap_volume.go b/test/e2e/common/configmap_volume.go index 9709462b941..f2da51145e9 100644 --- a/test/e2e/common/configmap_volume.go +++ b/test/e2e/common/configmap_volume.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "path" @@ -138,7 +139,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -188,7 +189,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name)) configMap.ResourceVersion = "" // to force update configMap.Data["data-1"] = "value-2" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) ginkgo.By("waiting to observe update in volume") @@ -225,7 +226,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -343,12 +344,12 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) var err error - if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil { + if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) - if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil { + if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) } @@ -452,18 +453,18 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name)) - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name)) updateConfigMap.ResourceVersion = "" // to force update delete(updateConfigMap.Data, "data-1") updateConfigMap.Data["data-3"] = "value-3" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) - if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil { + if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) } @@ -491,7 +492,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -557,43 +558,43 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { name := "immutable" configMap := newConfigMap(f, name) - currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap) + currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) framework.ExpectNoError(err, "Failed to create config map %q in namespace %q", configMap.Name, configMap.Namespace) currentConfigMap.Data["data-4"] = "value-4" - currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(currentConfigMap) + currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap) framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace) // Mark config map as immutable. trueVal := true currentConfigMap.Immutable = &trueVal - currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(currentConfigMap) + currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap) framework.ExpectNoError(err, "Failed to mark config map %q in namespace %q as immutable", configMap.Name, configMap.Namespace) // Ensure data can't be changed now. currentConfigMap.Data["data-5"] = "value-5" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(currentConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap) framework.ExpectEqual(apierrors.IsInvalid(err), true) // Ensure config map can't be switched from immutable to mutable. - currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(name, metav1.GetOptions{}) + currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace) framework.ExpectEqual(*currentConfigMap.Immutable, true) falseVal := false currentConfigMap.Immutable = &falseVal - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(currentConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap) framework.ExpectEqual(apierrors.IsInvalid(err), true) // Ensure that metadata can be changed. - currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(name, metav1.GetOptions{}) + currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace) currentConfigMap.Labels = map[string]string{"label1": "value1"} - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(currentConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap) framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace) // Ensure that immutable config map can be deleted. - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete config map %q in namespace %q", configMap.Name, configMap.Namespace) }) @@ -644,7 +645,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -720,7 +721,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -855,7 +856,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } // creating a pod with configMap object, but with different key which is not present in configMap object. diff --git a/test/e2e/common/container.go b/test/e2e/common/container.go index f1a10410770..a1ad000f5ee 100644 --- a/test/e2e/common/container.go +++ b/test/e2e/common/container.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "time" @@ -69,11 +70,11 @@ func (cc *ConformanceContainer) Create() { } func (cc *ConformanceContainer) Delete() error { - return cc.PodClient.Delete(cc.podName, metav1.NewDeleteOptions(0)) + return cc.PodClient.Delete(context.TODO(), cc.podName, metav1.NewDeleteOptions(0)) } func (cc *ConformanceContainer) IsReady() (bool, error) { - pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{}) + pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) if err != nil { return false, err } @@ -81,7 +82,7 @@ func (cc *ConformanceContainer) IsReady() (bool, error) { } func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) { - pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{}) + pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) if err != nil { return v1.PodUnknown, err } @@ -89,7 +90,7 @@ func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) { } func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) { - pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{}) + pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) if err != nil { return v1.ContainerStatus{}, err } @@ -101,7 +102,7 @@ func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) { } func (cc *ConformanceContainer) Present() (bool, error) { - _, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{}) + _, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{}) if err == nil { return true, nil } diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go index 41a2cce3dca..b8ca22350ec 100644 --- a/test/e2e/common/container_probe.go +++ b/test/e2e/common/container_probe.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "net/url" "time" @@ -64,7 +65,7 @@ var _ = framework.KubeDescribe("Probing container", func() { p := podClient.Create(testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80)) f.WaitForPodReady(p.Name) - p, err := podClient.Get(p.Name, metav1.GetOptions{}) + p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) isReady, err := testutils.PodRunningReady(p) framework.ExpectNoError(err) @@ -96,14 +97,14 @@ var _ = framework.KubeDescribe("Probing container", func() { framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() { p := podClient.Create(testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80)) gomega.Consistently(func() (bool, error) { - p, err := podClient.Get(p.Name, metav1.GetOptions{}) + p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) if err != nil { return false, err } return podutil.IsPodReady(p), nil }, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod should not be ready") - p, err := podClient.Get(p.Name, metav1.GetOptions{}) + p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) isReady, _ := testutils.PodRunningReady(p) @@ -413,7 +414,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, // At the end of the test, clean up by removing the pod. defer func() { ginkgo.By("deleting the pod") - podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) + podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) }() ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns)) podClient.Create(pod) @@ -427,7 +428,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, // Check the pod's current state and verify that restartCount is present. ginkgo.By("checking the pod's current state and verifying that restartCount is present") - pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns)) initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount) @@ -437,7 +438,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, lastRestartCount := initialRestartCount observedRestarts := int32(0) for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) { - pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name)) restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount if restartCount != lastRestartCount { diff --git a/test/e2e/common/downwardapi_volume.go b/test/e2e/common/downwardapi_volume.go index cb9985de505..65c5d26a96b 100644 --- a/test/e2e/common/downwardapi_volume.go +++ b/test/e2e/common/downwardapi_volume.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "time" @@ -166,7 +167,7 @@ var _ = ginkgo.Describe("[sig-storage] Downward API volume", func() { ginkgo.By("Creating the pod") podClient.CreateSync(pod) - pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get pod %q", pod.Name) gomega.Eventually(func() (string, error) { diff --git a/test/e2e/common/empty_dir.go b/test/e2e/common/empty_dir.go index d008acf1c51..9782e2ecea5 100644 --- a/test/e2e/common/empty_dir.go +++ b/test/e2e/common/empty_dir.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "path" @@ -286,7 +287,7 @@ var _ = ginkgo.Describe("[sig-storage] EmptyDir volumes", func() { framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name) ginkgo.By("Geting the pod") - pod, err = f.PodClient().Get(pod.Name, metav1.GetOptions{}) + pod, err = f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get pod %s", pod.Name) ginkgo.By("Reading file content from the nginx-container") diff --git a/test/e2e/common/expansion.go b/test/e2e/common/expansion.go index 4a042fa3568..d8e767249f2 100644 --- a/test/e2e/common/expansion.go +++ b/test/e2e/common/expansion.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "time" @@ -637,7 +638,7 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount ginkgo.By("Waiting for container to restart") restarts := int32(0) err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -666,7 +667,7 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount stableCount := int(0) stableThreshold := int(time.Minute / framework.Poll) err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/common/init_container.go b/test/e2e/common/init_container.go index 54b317aff4d..0680c412123 100644 --- a/test/e2e/common/init_container.go +++ b/test/e2e/common/init_container.go @@ -199,7 +199,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() { } framework.Logf("PodSpec: initContainers in spec.initContainers") startedPod := podClient.Create(pod) - w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta)) + w, err := podClient.Watch(context.TODO(), metav1.SingleObject(startedPod.ObjectMeta)) framework.ExpectNoError(err, "error watching a pod") wr := watch.NewRecorder(w) ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) @@ -269,7 +269,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() { } framework.Logf("PodSpec: initContainers in spec.initContainers") startedPod := podClient.Create(pod) - w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta)) + w, err := podClient.Watch(context.TODO(), metav1.SingleObject(startedPod.ObjectMeta)) framework.ExpectNoError(err, "error watching a pod") wr := watch.NewRecorder(w) ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) @@ -340,7 +340,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() { } framework.Logf("PodSpec: initContainers in spec.initContainers") startedPod := podClient.Create(pod) - w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta)) + w, err := podClient.Watch(context.TODO(), metav1.SingleObject(startedPod.ObjectMeta)) framework.ExpectNoError(err, "error watching a pod") wr := watch.NewRecorder(w) @@ -457,7 +457,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() { framework.Logf("PodSpec: initContainers in spec.initContainers") startedPod := podClient.Create(pod) - w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta)) + w, err := podClient.Watch(context.TODO(), metav1.SingleObject(startedPod.ObjectMeta)) framework.ExpectNoError(err, "error watching a pod") wr := watch.NewRecorder(w) diff --git a/test/e2e/common/kubelet.go b/test/e2e/common/kubelet.go index fc5e5761879..579ca7c4358 100644 --- a/test/e2e/common/kubelet.go +++ b/test/e2e/common/kubelet.go @@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { */ framework.ConformanceIt("should have an terminated reason [NodeConformance]", func() { gomega.Eventually(func() error { - podData, err := podClient.Get(podName, metav1.GetOptions{}) + podData, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return err } @@ -130,7 +130,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted. */ framework.ConformanceIt("should be possible to delete [NodeConformance]", func() { - err := podClient.Delete(podName, &metav1.DeleteOptions{}) + err := podClient.Delete(context.TODO(), podName, &metav1.DeleteOptions{}) gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Error deleting Pod %v", err)) }) }) diff --git a/test/e2e/common/lease.go b/test/e2e/common/lease.go index 74cab28cb86..251dfaf9caa 100644 --- a/test/e2e/common/lease.go +++ b/test/e2e/common/lease.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "encoding/json" "fmt" "time" @@ -81,10 +82,10 @@ var _ = framework.KubeDescribe("Lease", func() { }, } - createdLease, err := leaseClient.Create(lease) + createdLease, err := leaseClient.Create(context.TODO(), lease) framework.ExpectNoError(err, "creating Lease failed") - readLease, err := leaseClient.Get(name, metav1.GetOptions{}) + readLease, err := leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "couldn't read Lease") framework.ExpectEqual(apiequality.Semantic.DeepEqual(lease.Spec, readLease.Spec), true) @@ -96,10 +97,10 @@ var _ = framework.KubeDescribe("Lease", func() { LeaseTransitions: pointer.Int32Ptr(1), } - _, err = leaseClient.Update(createdLease) + _, err = leaseClient.Update(context.TODO(), createdLease) framework.ExpectNoError(err, "updating Lease failed") - readLease, err = leaseClient.Get(name, metav1.GetOptions{}) + readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "couldn't read Lease") framework.ExpectEqual(apiequality.Semantic.DeepEqual(createdLease.Spec, readLease.Spec), true) @@ -114,10 +115,10 @@ var _ = framework.KubeDescribe("Lease", func() { patchBytes, err := getPatchBytes(readLease, patchedLease) framework.ExpectNoError(err, "creating patch failed") - _, err = leaseClient.Patch(name, types.StrategicMergePatchType, patchBytes) + _, err = leaseClient.Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes) framework.ExpectNoError(err, "patching Lease failed") - readLease, err = leaseClient.Get(name, metav1.GetOptions{}) + readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "couldn't read Lease") framework.ExpectEqual(apiequality.Semantic.DeepEqual(patchedLease.Spec, readLease.Spec), true) @@ -135,25 +136,25 @@ var _ = framework.KubeDescribe("Lease", func() { LeaseTransitions: pointer.Int32Ptr(0), }, } - _, err = leaseClient.Create(lease2) + _, err = leaseClient.Create(context.TODO(), lease2) framework.ExpectNoError(err, "creating Lease failed") - leases, err := leaseClient.List(metav1.ListOptions{}) + leases, err := leaseClient.List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "couldn't list Leases") framework.ExpectEqual(len(leases.Items), 2) selector := labels.Set(map[string]string{"deletecollection": "true"}).AsSelector() - err = leaseClient.DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()}) + err = leaseClient.DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()}) framework.ExpectNoError(err, "couldn't delete collection") - leases, err = leaseClient.List(metav1.ListOptions{}) + leases, err = leaseClient.List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "couldn't list Leases") framework.ExpectEqual(len(leases.Items), 1) - err = leaseClient.Delete(name, &metav1.DeleteOptions{}) + err = leaseClient.Delete(context.TODO(), name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting Lease failed") - _, err = leaseClient.Get(name, metav1.GetOptions{}) + _, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectEqual(apierrors.IsNotFound(err), true) }) }) diff --git a/test/e2e/common/node_lease.go b/test/e2e/common/node_lease.go index cbb2408d589..282985605d1 100644 --- a/test/e2e/common/node_lease.go +++ b/test/e2e/common/node_lease.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "time" @@ -54,7 +55,7 @@ var _ = framework.KubeDescribe("NodeLease", func() { ) ginkgo.By("check that lease for this Kubelet exists in the kube-node-lease namespace") gomega.Eventually(func() error { - lease, err = leaseClient.Get(nodeName, metav1.GetOptions{}) + lease, err = leaseClient.Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return err } @@ -65,7 +66,7 @@ var _ = framework.KubeDescribe("NodeLease", func() { ginkgo.By("check that node lease is updated at least once within the lease duration") gomega.Eventually(func() error { - newLease, err := leaseClient.Get(nodeName, metav1.GetOptions{}) + newLease, err := leaseClient.Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return err } @@ -91,7 +92,7 @@ var _ = framework.KubeDescribe("NodeLease", func() { leaseList *coordinationv1.LeaseList ) gomega.Eventually(func() error { - leaseList, err = leaseClient.List(metav1.ListOptions{}) + leaseList, err = leaseClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -116,7 +117,7 @@ var _ = framework.KubeDescribe("NodeLease", func() { var err error var lease *coordinationv1.Lease gomega.Eventually(func() error { - lease, err = f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease).Get(nodeName, metav1.GetOptions{}) + lease, err = f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease).Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return err } @@ -175,7 +176,7 @@ var _ = framework.KubeDescribe("NodeLease", func() { // This check on node status is only meaningful when this e2e test is // running as cluster e2e test, because node e2e test does not create and // run controller manager, i.e., no node lifecycle controller. - node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) gomega.Expect(err).To(gomega.BeNil()) _, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady) framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue) @@ -184,7 +185,7 @@ var _ = framework.KubeDescribe("NodeLease", func() { }) func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, v1.NodeStatus) { - node, err := clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := clientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) gomega.Expect(err).To(gomega.BeNil()) _, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady) framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue) diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index d906e98c22d..d4662244cc0 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -18,6 +18,7 @@ package common import ( "bytes" + "context" "fmt" "io" "runtime/debug" @@ -63,7 +64,7 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) { hostIPTimeout := 2 * time.Minute t := time.Now() for { - p, err := podClient.Get(pod.Name, metav1.GetOptions{}) + p, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get pod %q", pod.Name) if p.Status.HostIP != "" { framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) @@ -111,7 +112,7 @@ func getRestartDelay(podClient *framework.PodClient, podName string, containerNa var previousFinishedAt time.Time for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay time.Sleep(time.Second) - pod, err := podClient.Get(podName, metav1.GetOptions{}) + pod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName) if !ok { @@ -230,7 +231,7 @@ var _ = framework.KubeDescribe("Pods", func() { ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 0) options = metav1.ListOptions{ @@ -242,7 +243,7 @@ var _ = framework.KubeDescribe("Pods", func() { lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = selector.String() - podList, err := podClient.List(options) + podList, err := podClient.List(context.TODO(), options) if err == nil { select { case listCompleted <- true: @@ -256,7 +257,7 @@ var _ = framework.KubeDescribe("Pods", func() { }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = selector.String() - return podClient.Watch(options) + return podClient.Watch(context.TODO(), options) }, } _, _, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{}) @@ -268,7 +269,7 @@ var _ = framework.KubeDescribe("Pods", func() { ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(options) + pods, err = podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 1) @@ -291,11 +292,11 @@ var _ = framework.KubeDescribe("Pods", func() { // may be carried out immediately rather than gracefully. framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) // save the running pod - pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to GET scheduled pod") ginkgo.By("deleting the pod gracefully") - err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30)) + err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(30)) framework.ExpectNoError(err, "failed to delete pod") ginkgo.By("verifying the kubelet observed the termination notice") @@ -348,7 +349,7 @@ var _ = framework.KubeDescribe("Pods", func() { selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(options) + pods, err = podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 0) }) @@ -386,7 +387,7 @@ var _ = framework.KubeDescribe("Pods", func() { ginkgo.By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 1) @@ -401,7 +402,7 @@ var _ = framework.KubeDescribe("Pods", func() { ginkgo.By("verifying the updated pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(options) + pods, err = podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 1) framework.Logf("Pod update OK") @@ -440,7 +441,7 @@ var _ = framework.KubeDescribe("Pods", func() { ginkgo.By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 1) @@ -504,7 +505,7 @@ var _ = framework.KubeDescribe("Pods", func() { }, }, } - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), svc) framework.ExpectNoError(err, "failed to create service") // Make a client pod that verifies that it has the service environment variables. @@ -828,7 +829,7 @@ var _ = framework.KubeDescribe("Pods", func() { framework.ExpectEqual(podClient.PodIsReady(podName), false, "Expect pod's Ready condition to be false initially.") ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1)) - _, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status") + _, err := podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status") framework.ExpectNoError(err) // Sleep for 10 seconds. time.Sleep(syncLoopFrequency) @@ -836,12 +837,12 @@ var _ = framework.KubeDescribe("Pods", func() { framework.ExpectEqual(podClient.PodIsReady(podName), false, "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True") ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2)) - _, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status") + _, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status") framework.ExpectNoError(err) validatePodReadiness(true) ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1)) - _, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status") + _, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status") framework.ExpectNoError(err) validatePodReadiness(false) diff --git a/test/e2e/common/podtemplates.go b/test/e2e/common/podtemplates.go index 7a347cc5d4f..5c85a27a93b 100644 --- a/test/e2e/common/podtemplates.go +++ b/test/e2e/common/podtemplates.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "encoding/json" v1 "k8s.io/api/core/v1" @@ -36,14 +37,14 @@ var _ = ginkgo.Describe("[sig-architecture] PodTemplates", func() { podTemplateName := "nginx-pod-template-" + string(uuid.NewUUID()) // get a list of PodTemplates (in all namespaces to hit endpoint) - podTemplateList, err := f.ClientSet.CoreV1().PodTemplates("").List(metav1.ListOptions{ + podTemplateList, err := f.ClientSet.CoreV1().PodTemplates("").List(context.TODO(), metav1.ListOptions{ LabelSelector: "podtemplate-static=true", }) framework.ExpectNoError(err, "failed to list all PodTemplates") framework.ExpectEqual(len(podTemplateList.Items), 0, "unable to find templates") // create a PodTemplate - _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(&v1.PodTemplate{ + _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(context.TODO(), &v1.PodTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: podTemplateName, Labels: map[string]string{ @@ -61,7 +62,7 @@ var _ = ginkgo.Describe("[sig-architecture] PodTemplates", func() { framework.ExpectNoError(err, "failed to create PodTemplate") // get template - podTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(podTemplateName, metav1.GetOptions{}) + podTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get created PodTemplate") framework.ExpectEqual(podTemplateRead.ObjectMeta.Name, podTemplateName) @@ -74,20 +75,20 @@ var _ = ginkgo.Describe("[sig-architecture] PodTemplates", func() { }, }) framework.ExpectNoError(err, "failed to marshal patch data") - _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch)) + _, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(context.TODO(), podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch)) framework.ExpectNoError(err, "failed to patch PodTemplate") // get template (ensure label is there) - podTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(podTemplateName, metav1.GetOptions{}) + podTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get PodTemplate") framework.ExpectEqual(podTemplateRead.ObjectMeta.Labels["podtemplate"], "patched", "failed to patch template, new label not found") // delete the PodTemplate - err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(podTemplateName, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(context.TODO(), podTemplateName, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete PodTemplate") // list the PodTemplates - podTemplateList, err = f.ClientSet.CoreV1().PodTemplates("").List(metav1.ListOptions{ + podTemplateList, err = f.ClientSet.CoreV1().PodTemplates("").List(context.TODO(), metav1.ListOptions{ LabelSelector: "podtemplate-static=true", }) framework.ExpectNoError(err, "failed to list PodTemplate") diff --git a/test/e2e/common/projected_combined.go b/test/e2e/common/projected_combined.go index 7660d5d280a..b623cee07b5 100644 --- a/test/e2e/common/projected_combined.go +++ b/test/e2e/common/projected_combined.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "k8s.io/api/core/v1" @@ -62,11 +63,11 @@ var _ = ginkgo.Describe("[sig-storage] Projected combined", func() { } ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } diff --git a/test/e2e/common/projected_configmap.go b/test/e2e/common/projected_configmap.go index 41ab0ac6058..6b561123b7a 100644 --- a/test/e2e/common/projected_configmap.go +++ b/test/e2e/common/projected_configmap.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "path" @@ -137,7 +138,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -193,7 +194,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name)) configMap.ResourceVersion = "" // to force update configMap.Data["data-1"] = "value-2" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) ginkgo.By("waiting to observe update in volume") @@ -252,12 +253,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) var err error - if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil { + if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) - if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil { + if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) } @@ -379,18 +380,18 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name)) - err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name)) updateConfigMap.ResourceVersion = "" // to force update delete(updateConfigMap.Data, "data-1") updateConfigMap.Data["data-3"] = "value-3" - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap) framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) - if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil { + if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) } @@ -418,7 +419,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -524,7 +525,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -605,7 +606,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fs ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } diff --git a/test/e2e/common/projected_downwardapi.go b/test/e2e/common/projected_downwardapi.go index 3545d074ce5..b193d9c37f5 100644 --- a/test/e2e/common/projected_downwardapi.go +++ b/test/e2e/common/projected_downwardapi.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "time" @@ -166,7 +167,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected downwardAPI", func() { ginkgo.By("Creating the pod") podClient.CreateSync(pod) - pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get pod %q", pod.Name) gomega.Eventually(func() (string, error) { diff --git a/test/e2e/common/projected_secret.go b/test/e2e/common/projected_secret.go index bceb70e7806..64bd5aa90b6 100644 --- a/test/e2e/common/projected_secret.go +++ b/test/e2e/common/projected_secret.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "path" @@ -101,7 +102,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { secret2.Data = map[string][]byte{ "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), } - if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil { + if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2); err != nil { framework.Failf("unable to create test secret %s: %v", secret2.Name, err) } doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) @@ -127,7 +128,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -254,12 +255,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) var err error - if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil { + if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret); err != nil { framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) - if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil { + if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret); err != nil { framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) } @@ -381,18 +382,18 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name)) - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name)) updateSecret.ResourceVersion = "" // to force update delete(updateSecret.Data, "data-1") updateSecret.Data["data-3"] = []byte("value-3") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) - if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil { + if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret); err != nil { framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) } @@ -434,7 +435,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -512,7 +513,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) { ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } diff --git a/test/e2e/common/runtime.go b/test/e2e/common/runtime.go index 04b5f9e8e36..5da4b1e67ce 100644 --- a/test/e2e/common/runtime.go +++ b/test/e2e/common/runtime.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "path" "time" @@ -299,9 +300,9 @@ while true; do sleep 1; done } secret.Name = "image-pull-secret-" + string(uuid.NewUUID()) ginkgo.By("create image pull secret") - _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) + _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret) framework.ExpectNoError(err) - defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil) + defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, nil) container.ImagePullSecrets = []string{secret.Name} } // checkContainerStatus checks whether the container status matches expectation. diff --git a/test/e2e/common/runtimeclass.go b/test/e2e/common/runtimeclass.go index d78913eee20..cf8e14ddbe2 100644 --- a/test/e2e/common/runtimeclass.go +++ b/test/e2e/common/runtimeclass.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "time" @@ -66,12 +67,12 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { rcClient := f.ClientSet.NodeV1beta1().RuntimeClasses() ginkgo.By("Deleting RuntimeClass "+rcName, func() { - err := rcClient.Delete(rcName, nil) + err := rcClient.Delete(context.TODO(), rcName, nil) framework.ExpectNoError(err, "failed to delete RuntimeClass %s", rcName) ginkgo.By("Waiting for the RuntimeClass to disappear") framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) { - _, err := rcClient.Get(rcName, metav1.GetOptions{}) + _, err := rcClient.Get(context.TODO(), rcName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil // done } @@ -91,7 +92,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { func createRuntimeClass(f *framework.Framework, name, handler string) string { uniqueName := fmt.Sprintf("%s-%s", f.Namespace.Name, name) rc := runtimeclasstest.NewRuntimeClass(uniqueName, handler) - rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(rc) + rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(context.TODO(), rc) framework.ExpectNoError(err, "failed to create RuntimeClass resource") return rc.GetName() } @@ -122,7 +123,7 @@ func expectPodRejection(f *framework.Framework, pod *v1.Pod) { pod = f.PodClient().Create(pod) expectSandboxFailureEvent(f, pod, fmt.Sprintf("\"%s\" not found", *pod.Spec.RuntimeClassName)) } else { - _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectError(err, "should be forbidden") framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error") } diff --git a/test/e2e/common/secrets.go b/test/e2e/common/secrets.go index e232bed9cf1..6953a3efb84 100644 --- a/test/e2e/common/secrets.go +++ b/test/e2e/common/secrets.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "encoding/json" "fmt" @@ -45,7 +46,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -93,7 +94,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { secret := newEnvFromSecret(f.Namespace.Name, name) ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -153,7 +154,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { secretTestName := "test-secret-" + string(uuid.NewUUID()) // create a secret in the test namespace - _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(&v1.Secret{ + _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretTestName, Labels: map[string]string{ @@ -169,7 +170,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { ginkgo.By("listing secrets in all namespaces to ensure that there are more than zero") // list all secrets in all namespaces to ensure endpoint coverage - secretsList, err := f.ClientSet.CoreV1().Secrets("").List(metav1.ListOptions{ + secretsList, err := f.ClientSet.CoreV1().Secrets("").List(context.TODO(), metav1.ListOptions{ LabelSelector: "testsecret-constant=true", }) framework.ExpectNoError(err, "failed to list secrets") @@ -196,10 +197,10 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { "data": map[string][]byte{"key": []byte(secretPatchNewData)}, }) framework.ExpectNoError(err, "failed to marshal JSON") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch)) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(context.TODO(), secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch)) framework.ExpectNoError(err, "failed to patch secret") - secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretCreatedName, metav1.GetOptions{}) + secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretCreatedName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get secret") secretDecodedstring, err := base64.StdEncoding.DecodeString(string(secret.Data["key"])) @@ -208,14 +209,14 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { framework.ExpectEqual(string(secretDecodedstring), "value1", "found secret, but the data wasn't updated from the patch") ginkgo.By("deleting the secret using a LabelSelector") - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{ + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: "testsecret=true", }) framework.ExpectNoError(err, "failed to delete patched secret") ginkgo.By("listing secrets in all namespaces, searching for label name and value in patch") // list all secrets in all namespaces - secretsList, err = f.ClientSet.CoreV1().Secrets("").List(metav1.ListOptions{ + secretsList, err = f.ClientSet.CoreV1().Secrets("").List(context.TODO(), metav1.ListOptions{ LabelSelector: "testsecret-constant=true", }) framework.ExpectNoError(err, "failed to list secrets") @@ -257,5 +258,5 @@ func createEmptyKeySecretForTest(f *framework.Framework) (*v1.Secret, error) { }, } ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) - return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) + return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret) } diff --git a/test/e2e/common/secrets_volume.go b/test/e2e/common/secrets_volume.go index f2bf26c148e..207802657a7 100644 --- a/test/e2e/common/secrets_volume.go +++ b/test/e2e/common/secrets_volume.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "fmt" "path" @@ -107,7 +108,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { secret2.Data = map[string][]byte{ "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), } - if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil { + if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2); err != nil { framework.Failf("unable to create test secret %s: %v", secret2.Name, err) } doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) @@ -133,7 +134,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -244,12 +245,12 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) var err error - if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil { + if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret); err != nil { framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) - if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil { + if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret); err != nil { framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) } @@ -347,18 +348,18 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name)) - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name)) updateSecret.ResourceVersion = "" // to force update delete(updateSecret.Data, "data-1") updateSecret.Data["data-3"] = []byte("value-3") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name) ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) - if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil { + if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret); err != nil { framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) } @@ -375,43 +376,43 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { name := "immutable" secret := secretForTest(f.Namespace.Name, name) - currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) + currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret) framework.ExpectNoError(err, "Failed to create secret %q in namespace %q", secret.Name, secret.Namespace) currentSecret.Data["data-4"] = []byte("value-4\n") - currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(currentSecret) + currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace) // Mark secret as immutable. trueVal := true currentSecret.Immutable = &trueVal - currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(currentSecret) + currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret) framework.ExpectNoError(err, "Failed to mark secret %q in namespace %q as immutable", secret.Name, secret.Namespace) // Ensure data can't be changed now. currentSecret.Data["data-5"] = []byte("value-5\n") - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(currentSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret) framework.ExpectEqual(apierrors.IsInvalid(err), true) // Ensure secret can't be switched from immutable to mutable. - currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(name, metav1.GetOptions{}) + currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace) framework.ExpectEqual(*currentSecret.Immutable, true) falseVal := false currentSecret.Immutable = &falseVal - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(currentSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret) framework.ExpectEqual(apierrors.IsInvalid(err), true) // Ensure that metadata can be changed. - currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(name, metav1.GetOptions{}) + currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace) currentSecret.Labels = map[string]string{"label1": "value1"} - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(currentSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret) framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace) // Ensure that immutable secret can be deleted. - err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", secret.Name, secret.Namespace) }) @@ -460,7 +461,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -529,7 +530,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -647,7 +648,7 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } // creating a pod with secret object, with the key which is not present in secret object. diff --git a/test/e2e/common/sysctl.go b/test/e2e/common/sysctl.go index e51c0e380d0..501b26e6e07 100644 --- a/test/e2e/common/sysctl.go +++ b/test/e2e/common/sysctl.go @@ -17,6 +17,7 @@ limitations under the License. package common import ( + "context" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -90,7 +91,7 @@ var _ = framework.KubeDescribe("Sysctls [LinuxOnly] [NodeFeature:Sysctls]", func ginkgo.By("Waiting for pod completion") err = f.WaitForPodNoLongerRunning(pod.Name) framework.ExpectNoError(err) - pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Checking that the pod succeeded") @@ -130,7 +131,7 @@ var _ = framework.KubeDescribe("Sysctls [LinuxOnly] [NodeFeature:Sysctls]", func ginkgo.By("Waiting for pod completion") err = f.WaitForPodNoLongerRunning(pod.Name) framework.ExpectNoError(err) - pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Checking that the pod succeeded") @@ -170,7 +171,7 @@ var _ = framework.KubeDescribe("Sysctls [LinuxOnly] [NodeFeature:Sysctls]", func ginkgo.By("Creating a pod with one valid and two invalid sysctls") client := f.ClientSet.CoreV1().Pods(f.Namespace.Name) - _, err := client.Create(pod) + _, err := client.Create(context.TODO(), pod) gomega.Expect(err).NotTo(gomega.BeNil()) gomega.Expect(err.Error()).To(gomega.ContainSubstring(`Invalid value: "foo-"`)) diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index 3d14f41057a..49883a59944 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -18,6 +18,7 @@ package common import ( "bytes" + "context" "fmt" "text/template" "time" @@ -140,7 +141,7 @@ func svcByName(name string, port int) *v1.Service { // NewSVCByName creates a service by name. func NewSVCByName(c clientset.Interface, ns, name string) error { const testPort = 9376 - _, err := c.CoreV1().Services(ns).Create(svcByName(name, testPort)) + _, err := c.CoreV1().Services(ns).Create(context.TODO(), svcByName(name, testPort)) return err } @@ -152,7 +153,7 @@ func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePe containerArgs = []string{"serve-hostname"} } - return c.CoreV1().ReplicationControllers(ns).Create(rcByNamePort( + return c.CoreV1().ReplicationControllers(ns).Create(context.TODO(), rcByNamePort( name, replicas, framework.ServeHostnameImage, containerArgs, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod)) } @@ -189,7 +190,7 @@ func RestartNodes(c clientset.Interface, nodes []v1.Node) error { for i := range nodes { node := &nodes[i] if err := wait.Poll(30*time.Second, framework.RestartNodeReadyAgainTimeout, func() (bool, error) { - newNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + newNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error getting node info after reboot: %s", err) } diff --git a/test/e2e/common/volumes.go b/test/e2e/common/volumes.go index e1890176751..159fcb303aa 100644 --- a/test/e2e/common/volumes.go +++ b/test/e2e/common/volumes.go @@ -43,6 +43,7 @@ limitations under the License. package common import ( + "context" "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -129,7 +130,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() { name := config.Prefix + "-server" defer func() { volume.TestCleanup(f, config) - err := c.CoreV1().Endpoints(namespace.Name).Delete(name, nil) + err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, nil) framework.ExpectNoError(err, "defer: Gluster delete endpoints failed") }() diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index d4622bb283d..db73e093d1e 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -17,6 +17,7 @@ limitations under the License. package e2e import ( + "context" "fmt" "os" "path" @@ -121,12 +122,12 @@ func runKubernetesServiceTestContainer(c clientset.Interface, ns string) { return } p.Namespace = ns - if _, err := c.CoreV1().Pods(ns).Create(p); err != nil { + if _, err := c.CoreV1().Pods(ns).Create(context.TODO(), p); err != nil { framework.Logf("Failed to create %v: %v", p.Name, err) return } defer func() { - if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil { + if err := c.CoreV1().Pods(ns).Delete(context.TODO(), p.Name, nil); err != nil { framework.Logf("Failed to delete pod %v: %v", p.Name, err) } }() @@ -150,7 +151,7 @@ func runKubernetesServiceTestContainer(c clientset.Interface, ns string) { // but we can detect if a cluster is dual stack because pods have two addresses (one per family) func getDefaultClusterIPFamily(c clientset.Interface) string { // Get the ClusterIP of the kubernetes service created in the default namespace - svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) + svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get kubernetes service ClusterIP: %v", err) } @@ -170,7 +171,7 @@ func waitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes in timeout, ns) return wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - dsList, err := c.AppsV1().DaemonSets(ns).List(metav1.ListOptions{}) + dsList, err := c.AppsV1().DaemonSets(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) if testutils.IsRetryableAPIError(err) { diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 1dfee3b7223..169048ff519 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -17,6 +17,7 @@ limitations under the License. package e2e import ( + "context" "fmt" "path/filepath" "sync" @@ -79,7 +80,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { err := e2epod.WaitForPodNameRunningInNamespace(c, podName, ns) framework.ExpectNoError(err) for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { - pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) diff --git a/test/e2e/framework/auth/helpers.go b/test/e2e/framework/auth/helpers.go index 15034e00d1b..ae4a7edd07b 100644 --- a/test/e2e/framework/auth/helpers.go +++ b/test/e2e/framework/auth/helpers.go @@ -17,6 +17,7 @@ limitations under the License. package auth import ( + "context" "sync" "time" @@ -65,7 +66,7 @@ func WaitForNamedAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGette } err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) { - response, err := c.SubjectAccessReviews().Create(review) + response, err := c.SubjectAccessReviews().Create(context.TODO(), review) if err != nil { return false, err } @@ -85,7 +86,7 @@ func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv } // Since the namespace names are unique, we can leave this lying around so we don't have to race any caches - _, err := c.ClusterRoleBindings().Create(&rbacv1.ClusterRoleBinding{ + _, err := c.ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: ns + "--" + clusterRole, }, @@ -122,7 +123,7 @@ func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rb } // Since the namespace names are unique, we can leave this lying around so we don't have to race any caches - _, err := c.RoleBindings(ns).Create(&rbacv1.RoleBinding{ + _, err := c.RoleBindings(ns).Create(context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: ns + "--" + role, }, @@ -149,7 +150,7 @@ var ( // IsRBACEnabled returns true if RBAC is enabled. Otherwise false. func IsRBACEnabled(crGetter v1rbac.ClusterRolesGetter) bool { isRBACEnabledOnce.Do(func() { - crs, err := crGetter.ClusterRoles().List(metav1.ListOptions{}) + crs, err := crGetter.ClusterRoles().List(context.TODO(), metav1.ListOptions{}) if err != nil { e2elog.Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err) isRBACEnabled = false diff --git a/test/e2e/framework/autoscaling/autoscaling_utils.go b/test/e2e/framework/autoscaling/autoscaling_utils.go index da152b88242..eaa4f02bd39 100644 --- a/test/e2e/framework/autoscaling/autoscaling_utils.go +++ b/test/e2e/framework/autoscaling/autoscaling_utils.go @@ -320,21 +320,21 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { func (rc *ResourceConsumer) GetReplicas() int { switch rc.kind { case KindRC: - replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{}) + replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(context.TODO(), rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if replicationController == nil { framework.Failf(rcIsNil) } return int(replicationController.Status.ReadyReplicas) case KindDeployment: - deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{}) + deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(context.TODO(), rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if deployment == nil { framework.Failf(deploymentIsNil) } return int(deployment.Status.ReadyReplicas) case KindReplicaSet: - rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{}) + rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(context.TODO(), rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if rs == nil { framework.Failf(rsIsNil) @@ -348,7 +348,7 @@ func (rc *ResourceConsumer) GetReplicas() int { // GetHpa get the corresponding horizontalPodAutoscaler object func (rc *ResourceConsumer) GetHpa(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) { - return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(name, metav1.GetOptions{}) + return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(context.TODO(), name, metav1.GetOptions{}) } // WaitForReplicas wait for the desired replicas @@ -418,14 +418,14 @@ func (rc *ResourceConsumer) CleanUp() { time.Sleep(10 * time.Second) kind := rc.kind.GroupKind() framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, kind, rc.nsName, rc.name)) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil)) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name, nil)) framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, api.Kind("ReplicationController"), rc.nsName, rc.controllerName)) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil)) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.controllerName, nil)) } func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string) { ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) - _, err := c.CoreV1().Services(ns).Create(&v1.Service{ + _, err := c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, Annotations: serviceAnnotations, @@ -480,7 +480,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st ginkgo.By(fmt.Sprintf("Running controller")) controllerName := name + "-ctrl" - _, err = c.CoreV1().Services(ns).Create(&v1.Service{ + _, err = c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: controllerName, }, @@ -534,14 +534,14 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma TargetCPUUtilizationPercentage: &cpu, }, } - hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(hpa) + hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa) framework.ExpectNoError(errHPA) return hpa } // DeleteHorizontalPodAutoscaler delete the horizontalPodAutoscaler for consuming resources. func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) { - rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil) + rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, nil) } // runReplicaSet launches (and verifies correctness) of a replicaset. diff --git a/test/e2e/framework/deployment/fixtures.go b/test/e2e/framework/deployment/fixtures.go index 6d72b8a512c..24e4b20ea73 100644 --- a/test/e2e/framework/deployment/fixtures.go +++ b/test/e2e/framework/deployment/fixtures.go @@ -17,6 +17,7 @@ limitations under the License. package deployment import ( + "context" "fmt" appsv1 "k8s.io/api/apps/v1" @@ -71,7 +72,7 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s // CreateDeployment creates a deployment. func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*appsv1.Deployment, error) { deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command) - deployment, err := client.AppsV1().Deployments(namespace).Create(deploymentSpec) + deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), deploymentSpec) if err != nil { return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err) } @@ -93,7 +94,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *appsv1.Deploym return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name) } podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { - return client.CoreV1().Pods(namespace).List(options) + return client.CoreV1().Pods(namespace).List(context.TODO(), options) } rsList := []*appsv1.ReplicaSet{replicaSet} podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc) diff --git a/test/e2e/framework/events/events.go b/test/e2e/framework/events/events.go index 098c0688774..49e2dfdfda4 100644 --- a/test/e2e/framework/events/events.go +++ b/test/e2e/framework/events/events.go @@ -17,6 +17,7 @@ limitations under the License. package events import ( + "context" "fmt" "strings" "sync" @@ -50,14 +51,14 @@ func ObserveNodeUpdateAfterAction(c clientset.Interface, nodeName string, nodePr &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = nodeSelector.String() - ls, err := c.CoreV1().Nodes().List(options) + ls, err := c.CoreV1().Nodes().List(context.TODO(), options) return ls, err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { // Signal parent goroutine that watching has begun. defer informerStartedGuard.Do(func() { close(informerStartedChan) }) options.FieldSelector = nodeSelector.String() - w, err := c.CoreV1().Nodes().Watch(options) + w, err := c.CoreV1().Nodes().Watch(context.TODO(), options) return w, err }, }, @@ -107,13 +108,13 @@ func ObserveEventAfterAction(c clientset.Interface, ns string, eventPredicate fu _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - ls, err := c.CoreV1().Events(ns).List(options) + ls, err := c.CoreV1().Events(ns).List(context.TODO(), options) return ls, err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { // Signal parent goroutine that watching has begun. defer informerStartedGuard.Do(func() { close(informerStartedChan) }) - w, err := c.CoreV1().Events(ns).Watch(options) + w, err := c.CoreV1().Events(ns).Watch(context.TODO(), options) return w, err }, }, @@ -162,7 +163,7 @@ func WaitTimeoutForEvent(c clientset.Interface, namespace, eventSelector, msg st func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) wait.ConditionFunc { options := metav1.ListOptions{FieldSelector: eventSelector} return func() (bool, error) { - events, err := c.CoreV1().Events(namespace).List(options) + events, err := c.CoreV1().Events(namespace).List(context.TODO(), options) if err != nil { return false, fmt.Errorf("got error while getting events: %v", err) } diff --git a/test/e2e/framework/exec_util.go b/test/e2e/framework/exec_util.go index e10c8586d5e..bcb4ea7f77e 100644 --- a/test/e2e/framework/exec_util.go +++ b/test/e2e/framework/exec_util.go @@ -18,6 +18,7 @@ package framework import ( "bytes" + "context" "io" "net/url" "strings" @@ -110,14 +111,14 @@ func (f *Framework) ExecShellInContainer(podName, containerName string, cmd stri } func (f *Framework) execCommandInPod(podName string, cmd ...string) string { - pod, err := f.PodClient().Get(podName, metav1.GetOptions{}) + pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{}) ExpectNoError(err, "failed to get pod %v", podName) gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...) } func (f *Framework) execCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) { - pod, err := f.PodClient().Get(podName, metav1.GetOptions{}) + pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{}) ExpectNoError(err, "failed to get pod %v", podName) gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...) diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 45d443d04b8..4bbbc3c9126 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -22,6 +22,7 @@ limitations under the License. package framework import ( + "context" "fmt" "io/ioutil" "math/rand" @@ -385,7 +386,7 @@ func (f *Framework) AfterEach() { if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentGinkgoTestDescription().Failed) { for _, ns := range f.namespacesToDelete { ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name)) - if err := f.ClientSet.CoreV1().Namespaces().Delete(ns.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, nil); err != nil { if !apierrors.IsNotFound(err) { nsDeletionErrors[ns.Name] = err @@ -600,7 +601,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str }} } Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName) - service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{ + service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "service-for-" + appName, Labels: map[string]string{ @@ -625,7 +626,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n } for i, node := range nodes.Items { Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName) - _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{ + _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(appName+"-pod-%v", i), Labels: podLabels, @@ -776,9 +777,9 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin if len(selectors) > 0 { selector = labels.SelectorFromSet(labels.Set(selectors)) options := metav1.ListOptions{LabelSelector: selector.String()} - pl, err = cli.CoreV1().Pods(ns).List(options) + pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), options) } else { - pl, err = cli.CoreV1().Pods(ns).List(metav1.ListOptions{}) + pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) } return pl, err } diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index ec777548616..fb9f41bd59d 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -18,6 +18,7 @@ package ingress import ( "bytes" + "context" "crypto/rand" "crypto/rsa" "crypto/tls" @@ -403,14 +404,14 @@ func createTLSSecret(kubeClient clientset.Interface, namespace, secretName strin }, } var s *v1.Secret - if s, err = kubeClient.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}); err == nil { + if s, err = kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{}); err == nil { // TODO: Retry the update. We don't really expect anything to conflict though. framework.Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host) s.Data = secret.Data - _, err = kubeClient.CoreV1().Secrets(namespace).Update(s) + _, err = kubeClient.CoreV1().Secrets(namespace).Update(context.TODO(), s) } else { framework.Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host) - _, err = kubeClient.CoreV1().Secrets(namespace).Create(secret) + _, err = kubeClient.CoreV1().Secrets(namespace).Create(context.TODO(), secret) } return host, cert, key, err } @@ -462,11 +463,11 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri j.Logger.Infof("creating service") framework.RunKubectlOrDieInput(ns, read("svc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) if len(svcAnnotations) > 0 { - svcList, err := j.Client.CoreV1().Services(ns).List(metav1.ListOptions{}) + svcList, err := j.Client.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations = svcAnnotations - _, err = j.Client.CoreV1().Services(ns).Update(&svc) + _, err = j.Client.CoreV1().Services(ns).Update(context.TODO(), &svc) framework.ExpectNoError(err) } } @@ -536,7 +537,7 @@ func ingressToManifest(ing *networkingv1beta1.Ingress, path string) error { // runCreate runs the required command to create the given ingress. func (j *TestJig) runCreate(ing *networkingv1beta1.Ingress) (*networkingv1beta1.Ingress, error) { if j.Class != MulticlusterIngressClassValue { - return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Create(ing) + return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Create(context.TODO(), ing) } // Use kubemci to create a multicluster ingress. filePath := framework.TestContext.OutputDir + "/mci.yaml" @@ -550,7 +551,7 @@ func (j *TestJig) runCreate(ing *networkingv1beta1.Ingress) (*networkingv1beta1. // runUpdate runs the required command to update the given ingress. func (j *TestJig) runUpdate(ing *networkingv1beta1.Ingress) (*networkingv1beta1.Ingress, error) { if j.Class != MulticlusterIngressClassValue { - return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Update(ing) + return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Update(context.TODO(), ing) } // Use kubemci to update a multicluster ingress. // kubemci does not have an update command. We use "create --force" to update an existing ingress. @@ -567,7 +568,7 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) { var err error ns, name := j.Ingress.Namespace, j.Ingress.Name for i := 0; i < 3; i++ { - j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) + j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { framework.Failf("failed to get ingress %s/%s: %v", ns, name, err) } @@ -658,7 +659,7 @@ func (j *TestJig) tryDeleteGivenIngress(ing *networkingv1beta1.Ingress) { // runDelete runs the required command to delete the given ingress. func (j *TestJig) runDelete(ing *networkingv1beta1.Ingress) error { if j.Class != MulticlusterIngressClassValue { - return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil) + return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(context.TODO(), ing.Name, nil) } // Use kubemci to delete a multicluster ingress. filePath := framework.TestContext.OutputDir + "/mci.yaml" @@ -698,7 +699,7 @@ func getIngressAddress(client clientset.Interface, ns, name, class string) ([]st if class == MulticlusterIngressClassValue { return getIngressAddressFromKubemci(name) } - ing, err := client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) + ing, err := client.NetworkingV1beta1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -850,7 +851,7 @@ func (j *TestJig) pollServiceNodePort(ns, name string, port int) error { // getSvcNodePort returns the node port for the given service:port. func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) { - svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) + svc, err := client.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return 0, err } @@ -876,7 +877,7 @@ func getPortURL(client clientset.Interface, ns, name string, svcPort int) (strin // kube-proxy NodePorts won't work. var nodes *v1.NodeList if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) { - nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err = client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -923,7 +924,7 @@ func (j *TestJig) GetIngressNodePorts(includeDefaultBackend bool) []string { func (j *TestJig) GetServicePorts(includeDefaultBackend bool) map[string]v1.ServicePort { svcPorts := make(map[string]v1.ServicePort) if includeDefaultBackend { - defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{}) + defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(context.TODO(), defaultBackendName, metav1.GetOptions{}) framework.ExpectNoError(err) svcPorts[defaultBackendName] = defaultSvc.Spec.Ports[0] } @@ -938,7 +939,7 @@ func (j *TestJig) GetServicePorts(includeDefaultBackend bool) map[string]v1.Serv } } for _, svcName := range backendSvcs { - svc, err := j.Client.CoreV1().Services(j.Ingress.Namespace).Get(svcName, metav1.GetOptions{}) + svc, err := j.Client.CoreV1().Services(j.Ingress.Namespace).Get(context.TODO(), svcName, metav1.GetOptions{}) framework.ExpectNoError(err) svcPorts[svcName] = svc.Spec.Ports[0] } @@ -1018,14 +1019,14 @@ func (cont *NginxIngressController) Init() { framework.Logf("initializing nginx ingress controller") framework.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns)) - rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{}) + rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get(context.TODO(), "nginx-ingress-controller", metav1.GetOptions{}) framework.ExpectNoError(err) cont.rc = rc framework.Logf("waiting for pods with label %v", rc.Spec.Selector) sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector)) framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel)) - pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()}) + pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(context.TODO(), metav1.ListOptions{LabelSelector: sel.String()}) framework.ExpectNoError(err) if len(pods.Items) == 0 { framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel) @@ -1119,11 +1120,11 @@ func generateBacksideHTTPSDeploymentSpec() *appsv1.Deployment { // SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured. func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*appsv1.Deployment, *v1.Service, *networkingv1beta1.Ingress, error) { - deployCreated, err := cs.AppsV1().Deployments(namespace).Create(generateBacksideHTTPSDeploymentSpec()) + deployCreated, err := cs.AppsV1().Deployments(namespace).Create(context.TODO(), generateBacksideHTTPSDeploymentSpec()) if err != nil { return nil, nil, nil, err } - svcCreated, err := cs.CoreV1().Services(namespace).Create(generateBacksideHTTPSServiceSpec()) + svcCreated, err := cs.CoreV1().Services(namespace).Create(context.TODO(), generateBacksideHTTPSServiceSpec()) if err != nil { return nil, nil, nil, err } @@ -1150,12 +1151,12 @@ func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *appsv1.Depl } } if svc != nil { - if err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil); err != nil { + if err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil); err != nil { errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err)) } } if deploy != nil { - if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(deploy.Name, nil); err != nil { + if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(context.TODO(), deploy.Name, nil); err != nil { errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", deploy.Namespace, deploy.Name, err)) } } diff --git a/test/e2e/framework/job/rest.go b/test/e2e/framework/job/rest.go index 5897363000f..80995ae1afd 100644 --- a/test/e2e/framework/job/rest.go +++ b/test/e2e/framework/job/rest.go @@ -17,6 +17,7 @@ limitations under the License. package job import ( + "context" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,18 +27,18 @@ import ( // GetJob uses c to get the Job in namespace ns named name. If the returned error is nil, the returned Job is valid. func GetJob(c clientset.Interface, ns, name string) (*batchv1.Job, error) { - return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{}) + return c.BatchV1().Jobs(ns).Get(context.TODO(), name, metav1.GetOptions{}) } // GetJobPods returns a list of Pods belonging to a Job. func GetJobPods(c clientset.Interface, ns, jobName string) (*v1.PodList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName})) options := metav1.ListOptions{LabelSelector: label.String()} - return c.CoreV1().Pods(ns).List(options) + return c.CoreV1().Pods(ns).List(context.TODO(), options) } // CreateJob uses c to create job in namespace ns. If the returned error is nil, the returned Job is valid and has // been created. func CreateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) { - return c.BatchV1().Jobs(ns).Create(job) + return c.BatchV1().Jobs(ns).Create(context.TODO(), job) } diff --git a/test/e2e/framework/job/wait.go b/test/e2e/framework/job/wait.go index 4ac529ccf3c..96d8544caf8 100644 --- a/test/e2e/framework/job/wait.go +++ b/test/e2e/framework/job/wait.go @@ -17,6 +17,7 @@ limitations under the License. package job import ( + "context" "time" "k8s.io/api/core/v1" @@ -49,7 +50,7 @@ func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, paralle // WaitForJobComplete uses c to wait for completions to complete for the Job jobName in namespace ns. func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions int32) error { return wait.Poll(framework.Poll, JobTimeout, func() (bool, error) { - curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) + curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) if err != nil { return false, err } @@ -60,7 +61,7 @@ func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions i // WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete). func WaitForJobFinish(c clientset.Interface, ns, jobName string) error { return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { - curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) + curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) if err != nil { return false, err } @@ -71,7 +72,7 @@ func WaitForJobFinish(c clientset.Interface, ns, jobName string) error { // WaitForJobGone uses c to wait for up to timeout for the Job named jobName in namespace ns to be removed. func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error { return wait.Poll(framework.Poll, timeout, func() (bool, error) { - _, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) + _, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil } diff --git a/test/e2e/framework/kubectl/kubectl_utils.go b/test/e2e/framework/kubectl/kubectl_utils.go index 2dbc692a829..0449fc64049 100644 --- a/test/e2e/framework/kubectl/kubectl_utils.go +++ b/test/e2e/framework/kubectl/kubectl_utils.go @@ -18,6 +18,7 @@ package kubectl import ( "bytes" + "context" "fmt" "os/exec" "path/filepath" @@ -97,7 +98,7 @@ func (tk *TestKubeconfig) KubectlCmd(args ...string) *exec.Cmd { // LogFailedContainers runs `kubectl logs` on a failed containers. func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) { - podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { logFunc("Error getting pods in namespace '%s': %v", ns, err) return diff --git a/test/e2e/framework/kubelet/stats.go b/test/e2e/framework/kubelet/stats.go index fb5a1879df2..eb406629707 100644 --- a/test/e2e/framework/kubelet/stats.go +++ b/test/e2e/framework/kubelet/stats.go @@ -116,7 +116,7 @@ func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor client: c, nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate), } - nodes, err := m.client.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := m.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err) } @@ -478,7 +478,7 @@ func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingI // Start starts collectors. func (r *ResourceMonitor) Start() { // It should be OK to monitor unschedulable Nodes - nodes, err := r.client.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := r.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Failf("ResourceMonitor: unable to get list of nodes: %v", err) } diff --git a/test/e2e/framework/metrics/metrics_grabber.go b/test/e2e/framework/metrics/metrics_grabber.go index 303109e72a8..c1e46264002 100644 --- a/test/e2e/framework/metrics/metrics_grabber.go +++ b/test/e2e/framework/metrics/metrics_grabber.go @@ -56,7 +56,7 @@ type Grabber struct { func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool) (*Grabber, error) { registeredMaster := false masterName := "" - nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, err } @@ -101,7 +101,7 @@ func (g *Grabber) HasRegisteredMaster() bool { // GrabFromKubelet returns metrics from kubelet func (g *Grabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) { - nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector().String()}) + nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector().String()}) if err != nil { return KubeletMetrics{}, err } @@ -215,7 +215,7 @@ func (g *Grabber) Grab() (Collection, error) { } if g.grabFromKubelets { result.KubeletMetrics = make(map[string]KubeletMetrics) - nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { errs = append(errs, err) } else { diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index b09d78973fd..d849a12c2e9 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -552,7 +553,7 @@ func (config *NetworkingTestConfig) createSessionAffinityService(selector map[st // DeleteNodePortService deletes NodePort service. func (config *NetworkingTestConfig) DeleteNodePortService() { - err := config.getServiceClient().Delete(config.NodePortService.Name, nil) + err := config.getServiceClient().Delete(context.TODO(), config.NodePortService.Name, nil) framework.ExpectNoError(err, "error while deleting NodePortService. err:%v)", err) time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted. } @@ -569,14 +570,14 @@ func (config *NetworkingTestConfig) createTestPods() { framework.ExpectNoError(config.f.WaitForPodRunning(testContainerPod.Name)) var err error - config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name, metav1.GetOptions{}) + config.TestContainerPod, err = config.getPodClient().Get(context.TODO(), testContainerPod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err) } if config.HostNetwork { framework.ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name)) - config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name, metav1.GetOptions{}) + config.HostTestContainerPod, err = config.getPodClient().Get(context.TODO(), hostTestContainerPod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err) } @@ -584,13 +585,13 @@ func (config *NetworkingTestConfig) createTestPods() { } func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.Service { - _, err := config.getServiceClient().Create(serviceSpec) + _, err := config.getServiceClient().Create(context.TODO(), serviceSpec) framework.ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) err = framework.WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second) framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err)) - createdService, err := config.getServiceClient().Get(serviceSpec.Name, metav1.GetOptions{}) + createdService, err := config.getServiceClient().Get(context.TODO(), serviceSpec.Name, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) return createdService @@ -666,7 +667,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector runningPods := make([]*v1.Pod, 0, len(nodes)) for _, p := range createdPods { framework.ExpectNoError(config.f.WaitForPodReady(p.Name)) - rp, err := config.getPodClient().Get(p.Name, metav1.GetOptions{}) + rp, err := config.getPodClient().Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) runningPods = append(runningPods, rp) } @@ -677,7 +678,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector // DeleteNetProxyPod deletes the first endpoint pod and waits for it being removed. func (config *NetworkingTestConfig) DeleteNetProxyPod() { pod := config.EndpointPods[0] - config.getPodClient().Delete(pod.Name, metav1.NewDeleteOptions(0)) + config.getPodClient().Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) config.EndpointPods = config.EndpointPods[1:] // wait for pod being deleted. err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) diff --git a/test/e2e/framework/node/resource.go b/test/e2e/framework/node/resource.go index 5ad15c5d169..b42634f6820 100644 --- a/test/e2e/framework/node/resource.go +++ b/test/e2e/framework/node/resource.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "net" "strings" @@ -336,7 +337,7 @@ func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, e func GetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) { nodes := &v1.NodeList{} masters := sets.NewString() - all, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + all, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, nil, fmt.Errorf("get nodes error: %s", err) } @@ -460,7 +461,7 @@ func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool { func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) { var result []PodNode - podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return result, err } diff --git a/test/e2e/framework/node/wait.go b/test/e2e/framework/node/wait.go index 0d4c9c8d74e..39354428f03 100644 --- a/test/e2e/framework/node/wait.go +++ b/test/e2e/framework/node/wait.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "regexp" "time" @@ -56,7 +57,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error { err := wait.PollImmediate(poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil @@ -68,7 +69,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error { notReady = append(notReady, node) } } - pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"}) + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } @@ -122,7 +123,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error { func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool { e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue) for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { e2elog.Logf("Couldn't get node %s", name) continue @@ -182,7 +183,7 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) { var nodes *v1.NodeList var err error if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) { - nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -219,7 +220,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed ResourceVersion: "0", FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(), } - nodes, err := c.CoreV1().Nodes().List(opts) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), opts) if err != nil { e2elog.Logf("Unexpected error listing nodes: %v", err) if testutils.IsRetryableAPIError(err) { diff --git a/test/e2e/framework/pod/create.go b/test/e2e/framework/pod/create.go index f3c5dda1d7b..6765b8dc6c0 100644 --- a/test/e2e/framework/pod/create.go +++ b/test/e2e/framework/pod/create.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "fmt" "time" @@ -35,7 +36,7 @@ var ( // CreateUnschedulablePod with given claims based on node selector func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) { pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command) - pod, err := client.CoreV1().Pods(namespace).Create(pod) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } @@ -45,7 +46,7 @@ func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSe return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err) } // get fresh pod info - pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return pod, fmt.Errorf("pod Get API error: %v", err) } @@ -60,7 +61,7 @@ func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeC // CreatePod with given claims based on node selector func CreatePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) { pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command) - pod, err := client.CoreV1().Pods(namespace).Create(pod) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } @@ -70,7 +71,7 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) } // get fresh pod info - pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return pod, fmt.Errorf("pod Get API error: %v", err) } @@ -90,7 +91,7 @@ func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pod.Spec.NodeSelector = node.Selector pod.Spec.Affinity = node.Affinity - pod, err := client.CoreV1().Pods(namespace).Create(pod) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } @@ -101,7 +102,7 @@ func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) } // get fresh pod info - pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return pod, fmt.Errorf("pod Get API error: %v", err) } diff --git a/test/e2e/framework/pod/delete.go b/test/e2e/framework/pod/delete.go index a4a2bfcdf26..6a292457916 100644 --- a/test/e2e/framework/pod/delete.go +++ b/test/e2e/framework/pod/delete.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "fmt" "time" @@ -36,7 +37,7 @@ const ( // DeletePodOrFail deletes the pod of the specified namespace and name. func DeletePodOrFail(c clientset.Interface, ns, name string) { ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns)) - err := c.CoreV1().Pods(ns).Delete(name, nil) + err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, nil) expectNoError(err, "failed to delete pod %s in namespace %s", name, ns) } @@ -53,7 +54,7 @@ func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error { // not existing. func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error { e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace) - err := c.CoreV1().Pods(podNamespace).Delete(podName, nil) + err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, nil) if err != nil { if apierrors.IsNotFound(err) { return nil // assume pod was already deleted diff --git a/test/e2e/framework/pod/resource.go b/test/e2e/framework/pod/resource.go index bdf72aec5a7..a58b71af02e 100644 --- a/test/e2e/framework/pod/resource.go +++ b/test/e2e/framework/pod/resource.go @@ -86,7 +86,7 @@ func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Sele func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) { successes := 0 options := metav1.ListOptions{LabelSelector: r.label.String()} - currentPods, err := r.c.CoreV1().Pods(r.ns).List(options) + currentPods, err := r.c.CoreV1().Pods(r.ns).List(context.TODO(), options) expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns) for i, pod := range r.pods.Items { // Check that the replica list remains unchanged, otherwise we have problems. @@ -147,7 +147,7 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) { func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return false, err } @@ -163,7 +163,7 @@ func podRunning(c clientset.Interface, podName, namespace string) wait.Condition func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return false, err } @@ -177,7 +177,7 @@ func podCompleted(c clientset.Interface, podName, namespace string) wait.Conditi func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return false, err } @@ -196,7 +196,7 @@ func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.C func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return false, err } @@ -222,7 +222,7 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, options := metav1.ListOptions{LabelSelector: label.String()} // List the pods, making sure we observe all the replicas. - pods, err := c.CoreV1().Pods(ns).List(options) + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) if err != nil { return nil, err } @@ -350,7 +350,7 @@ func logPodTerminationMessages(pods []v1.Pod) { // DumpAllPodInfoForNamespace logs all pod information for a given namespace. func DumpAllPodInfoForNamespace(c clientset.Interface, namespace string) { - pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { e2elog.Logf("unable to fetch pod debug info: %v", err) } @@ -436,10 +436,10 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw if tweak != nil { tweak(pod) } - execPod, err := client.CoreV1().Pods(ns).Create(pod) + execPod, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod) expectNoError(err, "failed to create new exec pod in namespace: %s", ns) err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) { - retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(execPod.Name, metav1.GetOptions{}) + retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(context.TODO(), execPod.Name, metav1.GetOptions{}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil @@ -528,7 +528,7 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName // GetPodsInNamespace returns the pods in the given namespace. func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) { - pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return []*v1.Pod{}, err } diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go index cff32a8b734..1cc89d09d1d 100644 --- a/test/e2e/framework/pod/wait.go +++ b/test/e2e/framework/pod/wait.go @@ -18,6 +18,7 @@ package pod import ( "bytes" + "context" "errors" "fmt" "sync" @@ -125,7 +126,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN // checked. replicas, replicaOk := int32(0), int32(0) - rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{}) + rcList, err := c.CoreV1().ReplicationControllers(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err) if testutils.IsRetryableAPIError(err) { @@ -138,7 +139,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN replicaOk += rc.Status.ReadyReplicas } - rsList, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{}) + rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err) if testutils.IsRetryableAPIError(err) { @@ -151,7 +152,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN replicaOk += rs.Status.ReadyReplicas } - podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err) if testutils.IsRetryableAPIError(err) { @@ -211,7 +212,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error { e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err) @@ -297,7 +298,7 @@ func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, name func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error { e2elog.Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts) + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), opts) if err != nil { return err } @@ -386,7 +387,7 @@ func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, nam // than "not found" then that error is returned and the wait stops. func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error { return wait.PollImmediate(poll, timeout, func() (bool, error) { - _, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) + _, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil // done } @@ -402,7 +403,7 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe return wait.PollImmediate(interval, timeout, func() (bool, error) { e2elog.Logf("Waiting for pod %s to disappear", podName) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(options) + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil @@ -489,7 +490,7 @@ func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label label func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) { options := metav1.ListOptions{LabelSelector: label.String()} - pods, err = c.CoreV1().Pods(ns).List(options) + pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options) if err != nil { if testutils.IsRetryableAPIError(err) { continue @@ -540,7 +541,7 @@ func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds in label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := metav1.ListOptions{LabelSelector: label.String()} return wait.Poll(poll, 5*time.Minute, func() (bool, error) { - pods, err := c.CoreV1().Pods(ns).List(options) + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) if err != nil { return false, nil } diff --git a/test/e2e/framework/podlogs/podlogs.go b/test/e2e/framework/podlogs/podlogs.go index 039ea50455d..1485fd26158 100644 --- a/test/e2e/framework/podlogs/podlogs.go +++ b/test/e2e/framework/podlogs/podlogs.go @@ -78,7 +78,7 @@ var expectedErrors = regexp.MustCompile(`container .* in pod .* is (terminated|w // running pods, but that then would have the disadvantage that // already deleted pods aren't covered. func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogOutput) error { - watcher, err := cs.CoreV1().Pods(ns).Watch(meta.ListOptions{}) + watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{}) if err != nil { return errors.Wrap(err, "cannot create Pod event watcher") } @@ -90,7 +90,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO m.Lock() defer m.Unlock() - pods, err := cs.CoreV1().Pods(ns).List(meta.ListOptions{}) + pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), meta.ListOptions{}) if err != nil { if to.StatusWriter != nil { fmt.Fprintf(to.StatusWriter, "ERROR: get pod list in %s: %s\n", ns, err) @@ -213,7 +213,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO // WatchPods prints pod status events for a certain namespace or all namespaces // when namespace name is empty. func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Writer) error { - watcher, err := cs.CoreV1().Pods(ns).Watch(meta.ListOptions{}) + watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{}) if err != nil { return errors.Wrap(err, "cannot create Pod event watcher") } diff --git a/test/e2e/framework/pods.go b/test/e2e/framework/pods.go index dc77ecdd123..56510b72b2d 100644 --- a/test/e2e/framework/pods.go +++ b/test/e2e/framework/pods.go @@ -17,6 +17,7 @@ limitations under the License. package framework import ( + "context" "fmt" "regexp" "sync" @@ -78,7 +79,7 @@ type PodClient struct { // Create creates a new pod according to the framework specifications (don't wait for it to start). func (c *PodClient) Create(pod *v1.Pod) *v1.Pod { c.mungeSpec(pod) - p, err := c.PodInterface.Create(pod) + p, err := c.PodInterface.Create(context.TODO(), pod) ExpectNoError(err, "Error creating Pod") return p } @@ -89,7 +90,7 @@ func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod { p := c.Create(pod) ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace)) // Get the newest pod after it becomes running, some status may change after pod created, such as pod ip. - p, err := c.Get(p.Name, metav1.GetOptions{}) + p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{}) ExpectNoError(err) return p } @@ -115,12 +116,12 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod { // pod object. func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) { ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { - pod, err := c.PodInterface.Get(name, metav1.GetOptions{}) + pod, err := c.PodInterface.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get pod %q: %v", name, err) } updateFn(pod) - _, err = c.PodInterface.Update(pod) + _, err = c.PodInterface.Update(context.TODO(), pod) if err == nil { Logf("Successfully updated pod %q", name) return true, nil @@ -137,7 +138,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) { // disappear before the timeout, it will fail the test. func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeout time.Duration) { namespace := c.f.Namespace.Name - err := c.Delete(name, options) + err := c.Delete(context.TODO(), name, options) if err != nil && !apierrors.IsNotFound(err) { Failf("Failed to delete pod %q: %v", name, err) } @@ -259,7 +260,7 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe // PodIsReady returns true if the specified pod is ready. Otherwise false. func (c *PodClient) PodIsReady(name string) bool { - pod, err := c.Get(name, metav1.GetOptions{}) + pod, err := c.Get(context.TODO(), name, metav1.GetOptions{}) ExpectNoError(err) return podutil.IsPodReady(pod) } diff --git a/test/e2e/framework/providers/gce/gce.go b/test/e2e/framework/providers/gce/gce.go index 59c45a4f3d7..54e37fe7186 100644 --- a/test/e2e/framework/providers/gce/gce.go +++ b/test/e2e/framework/providers/gce/gce.go @@ -17,6 +17,7 @@ limitations under the License. package gce import ( + "context" "fmt" "net/http" "os/exec" @@ -371,7 +372,7 @@ func GetGCECloud() (*gcecloud.Cloud, error) { // GetClusterID returns cluster ID func GetClusterID(c clientset.Interface) (string, error) { - cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{}) + cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), gcecloud.UIDConfigMapName, metav1.GetOptions{}) if err != nil || cm == nil { return "", fmt.Errorf("error getting cluster ID: %v", err) } diff --git a/test/e2e/framework/providers/gce/ingress.go b/test/e2e/framework/providers/gce/ingress.go index 03471724feb..72e102c3509 100644 --- a/test/e2e/framework/providers/gce/ingress.go +++ b/test/e2e/framework/providers/gce/ingress.go @@ -17,6 +17,7 @@ limitations under the License. package gce import ( + "context" "crypto/sha256" "encoding/json" "fmt" @@ -126,7 +127,7 @@ func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time. func (cont *IngressController) getL7AddonUID() (string, error) { framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap) - cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{}) + cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), uidConfigMap, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/test/e2e/framework/providers/gce/recreate_node.go b/test/e2e/framework/providers/gce/recreate_node.go index 1a57b0a504e..b3ae13dda9e 100644 --- a/test/e2e/framework/providers/gce/recreate_node.go +++ b/test/e2e/framework/providers/gce/recreate_node.go @@ -17,6 +17,7 @@ limitations under the License. package gce import ( + "context" "fmt" "time" @@ -77,7 +78,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() { // Make sure that addon/system pods are running, so dump // events for the kube-system namespace on failures ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", systemNamespace)) - events, err := f.ClientSet.CoreV1().Events(systemNamespace).List(metav1.ListOptions{}) + events, err := f.ClientSet.CoreV1().Events(systemNamespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, e := range events.Items { diff --git a/test/e2e/framework/providers/gce/util.go b/test/e2e/framework/providers/gce/util.go index e44169ee723..70ba710512c 100644 --- a/test/e2e/framework/providers/gce/util.go +++ b/test/e2e/framework/providers/gce/util.go @@ -17,6 +17,7 @@ limitations under the License. package gce import ( + "context" "fmt" "strings" "time" @@ -84,7 +85,7 @@ func WaitForNodeBootIdsToChange(c clientset.Interface, nodes []v1.Node, timeout for i := range nodes { node := &nodes[i] if err := wait.Poll(30*time.Second, timeout, func() (bool, error) { - newNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + newNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Could not get node info: %s. Retrying in %v.", err, 30*time.Second) return false, nil diff --git a/test/e2e/framework/psp.go b/test/e2e/framework/psp.go index 3518b106c2b..b14874a20bf 100644 --- a/test/e2e/framework/psp.go +++ b/test/e2e/framework/psp.go @@ -17,6 +17,7 @@ limitations under the License. package framework import ( + "context" "fmt" "sync" @@ -83,7 +84,7 @@ func privilegedPSP(name string) *policyv1beta1.PodSecurityPolicy { // IsPodSecurityPolicyEnabled returns true if PodSecurityPolicy is enabled. Otherwise false. func IsPodSecurityPolicyEnabled(kubeClient clientset.Interface) bool { isPSPEnabledOnce.Do(func() { - psps, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().List(metav1.ListOptions{}) + psps, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), metav1.ListOptions{}) if err != nil { Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err) isPSPEnabled = false @@ -109,8 +110,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string } // Create the privileged PSP & role privilegedPSPOnce.Do(func() { - _, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get( - podSecurityPolicyPrivileged, metav1.GetOptions{}) + _, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get(context.TODO(), podSecurityPolicyPrivileged, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { // Privileged PSP was already created. ExpectNoError(err, "Failed to get PodSecurityPolicy %s", podSecurityPolicyPrivileged) @@ -118,14 +118,14 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string } psp := privilegedPSP(podSecurityPolicyPrivileged) - _, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp) + _, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp) if !apierrors.IsAlreadyExists(err) { ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged) } if auth.IsRBACEnabled(kubeClient.RbacV1()) { // Create the Role to bind it to the namespace. - _, err = kubeClient.RbacV1().ClusterRoles().Create(&rbacv1.ClusterRole{ + _, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged}, Rules: []rbacv1.PolicyRule{{ APIGroups: []string{"extensions"}, diff --git a/test/e2e/framework/pv/pv.go b/test/e2e/framework/pv/pv.go index cc8578f07ad..d946b9a7ba2 100644 --- a/test/e2e/framework/pv/pv.go +++ b/test/e2e/framework/pv/pv.go @@ -17,6 +17,7 @@ limitations under the License. package framework import ( + "context" "fmt" "time" @@ -185,7 +186,7 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa func DeletePersistentVolume(c clientset.Interface, pvName string) error { if c != nil && len(pvName) > 0 { framework.Logf("Deleting PersistentVolume %q", pvName) - err := c.CoreV1().PersistentVolumes().Delete(pvName, nil) + err := c.CoreV1().PersistentVolumes().Delete(context.TODO(), pvName, nil) if err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("PV Delete API error: %v", err) } @@ -197,7 +198,7 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error { func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error { if c != nil && len(pvcName) > 0 { framework.Logf("Deleting PersistentVolumeClaim %q", pvcName) - err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil) + err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, nil) if err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("PVC Delete API error: %v", err) } @@ -224,7 +225,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent } // examine the pv's ClaimRef and UID and compare to expected values - pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("PV Get API error: %v", err) } @@ -255,7 +256,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, var boundPVs, deletedPVCs int for pvName := range pvols { - pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("PV Get API error: %v", err) } @@ -270,7 +271,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey) } // get the pvc for the delete call below - pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(cr.Name, metav1.GetOptions{}) + pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), cr.Name, metav1.GetOptions{}) if err == nil { if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil { return err @@ -292,7 +293,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, // create the PV resource. Fails test on error. func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { - pv, err := c.CoreV1().PersistentVolumes().Create(pv) + pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv) if err != nil { return nil, fmt.Errorf("PV Create API error: %v", err) } @@ -306,7 +307,7 @@ func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVol // CreatePVC creates the PVC resource. Fails test on error. func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { - pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) + pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc) if err != nil { return nil, fmt.Errorf("PVC Create API error: %v", err) } @@ -446,11 +447,11 @@ func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p } // Re-get the pv and pvc objects - pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("PV Get API error: %v", err) } - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("PVC Get API error: %v", err) } @@ -496,7 +497,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV return fmt.Errorf("PV %q did not become Bound: %v", pvName, err) } - pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("PV Get API error: %v", err) } @@ -688,12 +689,12 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist return persistentvolumes, err } // Get new copy of the claim - claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) if err != nil { return persistentvolumes, fmt.Errorf("PVC Get API error: %v", err) } // Get the bounded PV - persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) + persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) if err != nil { return persistentvolumes, fmt.Errorf("PV Get API error: %v", err) } @@ -705,7 +706,7 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if err != nil { framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) continue @@ -734,7 +735,7 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { phaseFoundInAllClaims := true for _, pvcName := range pvcNames { - pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}) + pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{}) if err != nil { framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err) continue @@ -772,7 +773,7 @@ func DeletePVSource(pvSource *v1.PersistentVolumeSource) error { // GetDefaultStorageClassName returns default storageClass or return error func GetDefaultStorageClassName(c clientset.Interface) (string, error) { - list, err := c.StorageV1().StorageClasses().List(metav1.ListOptions{}) + list, err := c.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{}) if err != nil { return "", fmt.Errorf("Error listing storage classes: %v", err) } diff --git a/test/e2e/framework/replicaset/wait.go b/test/e2e/framework/replicaset/wait.go index 2ae50709ffa..7bac05536c7 100644 --- a/test/e2e/framework/replicaset/wait.go +++ b/test/e2e/framework/replicaset/wait.go @@ -17,6 +17,7 @@ limitations under the License. package replicaset import ( + "context" "fmt" "time" @@ -30,7 +31,7 @@ import ( // WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready. func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { err := wait.Poll(framework.Poll, framework.PollShortTimeout, func() (bool, error) { - rs, err := c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{}) + rs, err := c.AppsV1().ReplicaSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -52,7 +53,7 @@ func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet func WaitForReplicaSetTargetAvailableReplicasWithTimeout(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32, timeout time.Duration) error { desiredGeneration := replicaSet.Generation err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) + rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/framework/resource/runtimeobj.go b/test/e2e/framework/resource/runtimeobj.go index 3f7c322b14e..7bfacbccfc4 100644 --- a/test/e2e/framework/resource/runtimeobj.go +++ b/test/e2e/framework/resource/runtimeobj.go @@ -17,6 +17,7 @@ limitations under the License. package resource import ( + "context" "fmt" appsv1 "k8s.io/api/apps/v1" @@ -39,15 +40,15 @@ import ( func GetRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) { switch kind { case api.Kind("ReplicationController"): - return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) + return c.CoreV1().ReplicationControllers(ns).Get(context.TODO(), name, metav1.GetOptions{}) case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"): - return c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{}) + return c.AppsV1().ReplicaSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"): - return c.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) + return c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{}) case extensionsinternal.Kind("DaemonSet"): - return c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{}) + return c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) case batchinternal.Kind("Job"): - return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{}) + return c.BatchV1().Jobs(ns).Get(context.TODO(), name, metav1.GetOptions{}) default: return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind) } diff --git a/test/e2e/framework/resource_usage_gatherer.go b/test/e2e/framework/resource_usage_gatherer.go index eabc38b5cde..3b43ca0e37e 100644 --- a/test/e2e/framework/resource_usage_gatherer.go +++ b/test/e2e/framework/resource_usage_gatherer.go @@ -397,7 +397,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt // Tracks kube-system pods if no valid PodList is passed in. var err error if pods == nil { - pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{}) + pods, err = c.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{}) if err != nil { Logf("Error while listing Pods: %v", err) return nil, err @@ -421,7 +421,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt dnsNodes[pod.Spec.NodeName] = true } } - nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { Logf("Error while listing Nodes: %v", err) return nil, err diff --git a/test/e2e/framework/security/apparmor.go b/test/e2e/framework/security/apparmor.go index 5f016baa904..806f0dffebb 100644 --- a/test/e2e/framework/security/apparmor.go +++ b/test/e2e/framework/security/apparmor.go @@ -17,6 +17,7 @@ limitations under the License. package security import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -119,7 +120,7 @@ done`, testCmd) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace( clientset, pod.Name, nsName)) var err error - pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) } else { pod = podClient.CreateSync(pod) @@ -155,7 +156,7 @@ profile %s flags=(attach_disconnected) { profileName: profile, }, } - _, err := clientset.CoreV1().ConfigMaps(nsName).Create(cm) + _, err := clientset.CoreV1().ConfigMaps(nsName).Create(context.TODO(), cm) framework.ExpectNoError(err, "Failed to create apparmor-profiles ConfigMap") } @@ -223,7 +224,7 @@ func createAppArmorProfileLoader(nsName string, clientset clientset.Interface) { }, }, } - _, err := clientset.CoreV1().ReplicationControllers(nsName).Create(loader) + _, err := clientset.CoreV1().ReplicationControllers(nsName).Create(context.TODO(), loader) framework.ExpectNoError(err, "Failed to create apparmor-loader ReplicationController") // Wait for loader to be ready. diff --git a/test/e2e/framework/service/jig.go b/test/e2e/framework/service/jig.go index 9d14880e698..0ea48ac8dc0 100644 --- a/test/e2e/framework/service/jig.go +++ b/test/e2e/framework/service/jig.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "fmt" "net" "regexp" @@ -105,7 +106,7 @@ func (j *TestJig) CreateTCPServiceWithPort(tweak func(svc *v1.Service), port int if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc) if err != nil { return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err) } @@ -120,7 +121,7 @@ func (j *TestJig) CreateTCPService(tweak func(svc *v1.Service)) (*v1.Service, er if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc) if err != nil { return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err) } @@ -135,7 +136,7 @@ func (j *TestJig) CreateUDPService(tweak func(svc *v1.Service)) (*v1.Service, er if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc) if err != nil { return nil, fmt.Errorf("failed to create UDP Service %q: %v", svc.Name, err) } @@ -160,7 +161,7 @@ func (j *TestJig) CreateExternalNameService(tweak func(svc *v1.Service)) (*v1.Se if tweak != nil { tweak(svc) } - result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc) + result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc) if err != nil { return nil, fmt.Errorf("failed to create ExternalName Service %q: %v", svc.Name, err) } @@ -252,7 +253,7 @@ func (j *TestJig) CreateLoadBalancerService(timeout time.Duration, tweak func(sv if tweak != nil { tweak(svc) } - _, err := j.Client.CoreV1().Services(j.Namespace).Create(svc) + _, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc) if err != nil { return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err) } @@ -284,7 +285,7 @@ func (j *TestJig) GetEndpointNodes() (map[string][]string, error) { // GetEndpointNodeNames returns a string set of node names on which the // endpoints of the given Service are running. func (j *TestJig) GetEndpointNodeNames() (sets.String, error) { - endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(j.Name, metav1.GetOptions{}) + endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err) } @@ -305,7 +306,7 @@ func (j *TestJig) GetEndpointNodeNames() (sets.String, error) { // WaitForEndpointOnNode waits for a service endpoint on the given node. func (j *TestJig) WaitForEndpointOnNode(nodeName string) error { return wait.PollImmediate(framework.Poll, LoadBalancerPropagationTimeoutDefault, func() (bool, error) { - endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(j.Name, metav1.GetOptions{}) + endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err) return false, nil @@ -340,12 +341,12 @@ func (j *TestJig) WaitForAvailableEndpoint(timeout time.Duration) error { &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = endpointSelector.String() - obj, err := j.Client.CoreV1().Endpoints(j.Namespace).List(options) + obj, err := j.Client.CoreV1().Endpoints(j.Namespace).List(context.TODO(), options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = endpointSelector.String() - return j.Client.CoreV1().Endpoints(j.Namespace).Watch(options) + return j.Client.CoreV1().Endpoints(j.Namespace).Watch(context.TODO(), options) }, }, &v1.Endpoints{}, @@ -437,12 +438,12 @@ func (j *TestJig) sanityCheckService(svc *v1.Service, svcType v1.ServiceType) (* // face of timeouts and conflicts. func (j *TestJig) UpdateService(update func(*v1.Service)) (*v1.Service, error) { for i := 0; i < 3; i++ { - service, err := j.Client.CoreV1().Services(j.Namespace).Get(j.Name, metav1.GetOptions{}) + service, err := j.Client.CoreV1().Services(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get Service %q: %v", j.Name, err) } update(service) - result, err := j.Client.CoreV1().Services(j.Namespace).Update(service) + result, err := j.Client.CoreV1().Services(j.Namespace).Update(context.TODO(), service) if err == nil { return j.sanityCheckService(result, service.Spec.Type) } @@ -534,7 +535,7 @@ func (j *TestJig) WaitForLoadBalancerDestroy(ip string, port int, timeout time.D func (j *TestJig) waitForCondition(timeout time.Duration, message string, conditionFn func(*v1.Service) bool) (*v1.Service, error) { var service *v1.Service pollFunc := func() (bool, error) { - svc, err := j.Client.CoreV1().Services(j.Namespace).Get(j.Name, metav1.GetOptions{}) + svc, err := j.Client.CoreV1().Services(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -618,7 +619,7 @@ func (j *TestJig) AddRCAntiAffinity(rc *v1.ReplicationController) { // CreatePDB returns a PodDisruptionBudget for the given ReplicationController, or returns an error if a PodDisruptionBudget isn't ready func (j *TestJig) CreatePDB(rc *v1.ReplicationController) (*policyv1beta1.PodDisruptionBudget, error) { pdb := j.newPDBTemplate(rc) - newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Create(pdb) + newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Create(context.TODO(), pdb) if err != nil { return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err) } @@ -658,7 +659,7 @@ func (j *TestJig) Run(tweak func(rc *v1.ReplicationController)) (*v1.Replication if tweak != nil { tweak(rc) } - result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(rc) + result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(context.TODO(), rc) if err != nil { return nil, fmt.Errorf("failed to create RC %q: %v", rc.Name, err) } @@ -675,14 +676,14 @@ func (j *TestJig) Run(tweak func(rc *v1.ReplicationController)) (*v1.Replication // Scale scales pods to the given replicas func (j *TestJig) Scale(replicas int) error { rc := j.Name - scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(rc, metav1.GetOptions{}) + scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(context.TODO(), rc, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to get scale for RC %q: %v", rc, err) } scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(replicas) - _, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(rc, scale) + _, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(context.TODO(), rc, scale) if err != nil { return fmt.Errorf("failed to scale RC %q: %v", rc, err) } @@ -699,7 +700,7 @@ func (j *TestJig) Scale(replicas int) error { func (j *TestJig) waitForPdbReady() error { timeout := 2 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { - pdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Get(j.Name, metav1.GetOptions{}) + pdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) if err != nil { return err } @@ -718,7 +719,7 @@ func (j *TestJig) waitForPodsCreated(replicas int) ([]string, error) { framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas) for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := j.Client.CoreV1().Pods(j.Namespace).List(options) + pods, err := j.Client.CoreV1().Pods(j.Namespace).List(context.TODO(), options) if err != nil { return nil, err } diff --git a/test/e2e/framework/service/resource.go b/test/e2e/framework/service/resource.go index 5ae5d5b0d6c..ada215aa739 100644 --- a/test/e2e/framework/service/resource.go +++ b/test/e2e/framework/service/resource.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -64,14 +65,14 @@ func UpdateService(c clientset.Interface, namespace, serviceName string, update var service *v1.Service var err error for i := 0; i < 3; i++ { - service, err = c.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{}) + service, err = c.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) if err != nil { return service, err } update(service) - service, err = c.CoreV1().Services(namespace).Update(service) + service, err = c.CoreV1().Services(namespace).Update(context.TODO(), service) if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) { return service, err diff --git a/test/e2e/framework/service/wait.go b/test/e2e/framework/service/wait.go index 67d1b1b6370..627a8d21273 100644 --- a/test/e2e/framework/service/wait.go +++ b/test/e2e/framework/service/wait.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "fmt" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -32,13 +33,13 @@ import ( // WaitForServiceDeletedWithFinalizer waits for the service with finalizer to be deleted. func WaitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name string) { ginkgo.By("Delete service with finalizer") - if err := cs.CoreV1().Services(namespace).Delete(name, nil); err != nil { + if err := cs.CoreV1().Services(namespace).Delete(context.TODO(), name, nil); err != nil { framework.Failf("Failed to delete service %s/%s", namespace, name) } ginkgo.By("Wait for service to disappear") if pollErr := wait.PollImmediate(LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(cs), func() (bool, error) { - svc, err := cs.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { framework.Logf("Service %s/%s is gone.", namespace, name) @@ -58,7 +59,7 @@ func WaitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name func WaitForServiceUpdatedWithFinalizer(cs clientset.Interface, namespace, name string, hasFinalizer bool) { ginkgo.By(fmt.Sprintf("Wait for service to hasFinalizer=%t", hasFinalizer)) if pollErr := wait.PollImmediate(LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(cs), func() (bool, error) { - svc, err := cs.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/framework/ssh/ssh.go b/test/e2e/framework/ssh/ssh.go index ef29c60938b..a7372afa0e4 100644 --- a/test/e2e/framework/ssh/ssh.go +++ b/test/e2e/framework/ssh/ssh.go @@ -18,6 +18,7 @@ package ssh import ( "bytes" + "context" "fmt" "net" "os" @@ -319,7 +320,7 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) { var nodes *v1.NodeList var err error if wait.PollImmediate(pollNodeInterval, singleCallTimeout, func() (bool, error) { - nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { diff --git a/test/e2e/framework/statefulset/rest.go b/test/e2e/framework/statefulset/rest.go index cbbcbe9c6a2..ca1056c10d5 100644 --- a/test/e2e/framework/statefulset/rest.go +++ b/test/e2e/framework/statefulset/rest.go @@ -17,6 +17,7 @@ limitations under the License. package statefulset import ( + "context" "fmt" "path/filepath" "strings" @@ -49,11 +50,11 @@ func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.S framework.ExpectNoError(err) framework.Logf(fmt.Sprintf("creating " + ss.Name + " service")) - _, err = c.CoreV1().Services(ns).Create(svc) + _, err = c.CoreV1().Services(ns).Create(context.TODO(), svc) framework.ExpectNoError(err) framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector)) - _, err = c.AppsV1().StatefulSets(ns).Create(ss) + _, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss) framework.ExpectNoError(err) WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) return ss @@ -63,14 +64,14 @@ func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.S func GetPodList(c clientset.Interface, ss *appsv1.StatefulSet) *v1.PodList { selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector) framework.ExpectNoError(err) - podList, err := c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + podList, err := c.CoreV1().Pods(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) framework.ExpectNoError(err) return podList } // DeleteAllStatefulSets deletes all StatefulSet API Objects in Namespace ns. func DeleteAllStatefulSets(c clientset.Interface, ns string) { - ssList, err := c.AppsV1().StatefulSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + ssList, err := c.AppsV1().StatefulSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) framework.ExpectNoError(err) // Scale down each statefulset, then delete it completely. @@ -86,7 +87,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { framework.Logf("Deleting statefulset %v", ss.Name) // Use OrphanDependents=false so it's deleted synchronously. // We already made sure the Pods are gone inside Scale(). - if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil { + if err := c.AppsV1().StatefulSets(ss.Namespace).Delete(context.TODO(), ss.Name, &metav1.DeleteOptions{OrphanDependents: new(bool)}); err != nil { errList = append(errList, fmt.Sprintf("%v", err)) } } @@ -95,7 +96,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { pvNames := sets.NewString() // TODO: Don't assume all pvcs in the ns belong to a statefulset pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("WARNING: Failed to list pvcs, retrying %v", err) return false, nil @@ -104,7 +105,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { pvNames.Insert(pvc.Spec.VolumeName) // TODO: Double check that there are no pods referencing the pvc framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName) - if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil { + if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvc.Name, nil); err != nil { return false, nil } } @@ -115,7 +116,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { } pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pvList, err := c.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("WARNING: Failed to list pvs, retrying %v", err) return false, nil @@ -247,12 +248,12 @@ func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd strin // udpate updates a statefulset, and it is only used within rest.go func update(c clientset.Interface, ns, name string, update func(ss *appsv1.StatefulSet)) *appsv1.StatefulSet { for i := 0; i < 3; i++ { - ss, err := c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{}) + ss, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { framework.Failf("failed to get statefulset %q: %v", name, err) } update(ss) - ss, err = c.AppsV1().StatefulSets(ns).Update(ss) + ss, err = c.AppsV1().StatefulSets(ns).Update(context.TODO(), ss) if err == nil { return ss } diff --git a/test/e2e/framework/statefulset/wait.go b/test/e2e/framework/statefulset/wait.go index b348f200a9f..bdffeef5517 100644 --- a/test/e2e/framework/statefulset/wait.go +++ b/test/e2e/framework/statefulset/wait.go @@ -17,6 +17,7 @@ limitations under the License. package statefulset import ( + "context" "fmt" appsv1 "k8s.io/api/apps/v1" @@ -62,7 +63,7 @@ func WaitForRunning(c clientset.Interface, numPodsRunning, numPodsReady int32, s func WaitForState(c clientset.Interface, ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *v1.PodList) (bool, error)) { pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - ssGet, err := c.AppsV1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{}) + ssGet, err := c.AppsV1().StatefulSets(ss.Namespace).Get(context.TODO(), ss.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -102,7 +103,7 @@ func WaitForStatusReadyReplicas(c clientset.Interface, ss *appsv1.StatefulSet, e ns, name := ss.Namespace, ss.Name pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - ssGet, err := c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{}) + ssGet, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -127,7 +128,7 @@ func WaitForStatusReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expect ns, name := ss.Namespace, ss.Name pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - ssGet, err := c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{}) + ssGet, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index c1b48836da0..18900d3edb1 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -239,7 +239,7 @@ func NodeOSDistroIs(supportedNodeOsDistros ...string) bool { // Returns the list of deleted namespaces or an error. func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) { ginkgo.By("Deleting namespaces") - nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) + nsList, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) ExpectNoError(err, "Failed to get namespace list") var deleted []string var wg sync.WaitGroup @@ -269,7 +269,7 @@ OUTER: go func(nsName string) { defer wg.Done() defer ginkgo.GinkgoRecover() - gomega.Expect(c.CoreV1().Namespaces().Delete(nsName, nil)).To(gomega.Succeed()) + gomega.Expect(c.CoreV1().Namespaces().Delete(context.TODO(), nsName, nil)).To(gomega.Succeed()) Logf("namespace : %v api call to delete is complete ", nsName) }(item.Name) } @@ -287,7 +287,7 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou //Now POLL until all namespaces have been eradicated. return wait.Poll(2*time.Second, timeout, func() (bool, error) { - nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) + nsList, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -301,7 +301,7 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou } func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { - w, err := c.CoreV1().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName})) + w, err := c.CoreV1().ServiceAccounts(ns).Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName})) if err != nil { return err } @@ -322,7 +322,7 @@ func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace st func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if err == nil { Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start)) continue @@ -341,7 +341,7 @@ func findAvailableNamespaceName(baseName string, c clientset.Interface) (string, var name string err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { name = fmt.Sprintf("%v-%v", baseName, RandomSuffix()) - _, err := c.CoreV1().Namespaces().Get(name, metav1.GetOptions{}) + _, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) if err == nil { // Already taken return false, nil @@ -383,7 +383,7 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s var got *v1.Namespace if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { var err error - got, err = c.CoreV1().Namespaces().Create(namespaceObj) + got, err = c.CoreV1().Namespaces().Create(context.TODO(), namespaceObj) if err != nil { Logf("Unexpected error while creating namespace: %v", err) return false, nil @@ -422,7 +422,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { Logf("Waiting for terminating namespaces to be deleted...") for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) { - namespaces, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) + namespaces, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) if err != nil { Logf("Listing namespaces failed: %v", err) continue @@ -446,7 +446,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { // WaitForService waits until the service appears (exist == true), or disappears (exist == false) func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := c.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + _, err := c.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) switch { case err == nil: Logf("Service %s in namespace %s found.", name, namespace) @@ -473,7 +473,7 @@ func WaitForService(c clientset.Interface, namespace, name string, exist bool, i func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { return wait.Poll(interval, timeout, func() (bool, error) { Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum) - list, err := c.CoreV1().Endpoints(namespace).List(metav1.ListOptions{}) + list, err := c.CoreV1().Endpoints(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -837,7 +837,7 @@ func (f *Framework) MatchContainerOutput( podErr := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns) // Grab its logs. Get host first. - podStatus, err := podClient.Get(createdPod.Name, metav1.GetOptions{}) + podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to get pod status: %v", err) } @@ -907,7 +907,7 @@ func dumpEventsInNamespace(eventsLister EventsLister, namespace string) { // DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace. func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) { - return c.CoreV1().Events(ns).List(opts) + return c.CoreV1().Events(ns).List(context.TODO(), opts) }, namespace) e2epod.DumpAllPodInfoForNamespace(c, namespace) @@ -917,7 +917,7 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { // 2. there are so many of them that working with them are mostly impossible // So we dump them only if the cluster is relatively small. maxNodesForDump := TestContext.MaxNodesToGather - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { Logf("unable to fetch node list: %v", err) return @@ -954,7 +954,7 @@ func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) { func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) { for _, n := range nodeNames { logFunc("\nLogging node info for node %v", n) - node, err := c.CoreV1().Nodes().Get(n, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{}) if err != nil { logFunc("Error getting node info %v", err) } @@ -1025,7 +1025,7 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event { "source": "kubelet", }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(options) + events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options) if err != nil { Logf("Unexpected error retrieving node events %v", err) return []v1.Event{} @@ -1061,7 +1061,7 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration { } func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) { - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil || len(nodes.Items) == 0 { return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err) } @@ -1090,7 +1090,7 @@ func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, la // ExpectNodeHasLabel expects that the given node has the given label pair. func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) { ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue) - node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) ExpectNoError(err) ExpectEqual(node.Labels[labelKey], labelValue) } @@ -1118,7 +1118,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) { ginkgo.By("verifying the node doesn't have the taint " + taint.ToString()) - nodeUpdated, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + nodeUpdated, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) ExpectNoError(err) if taintutils.TaintExists(nodeUpdated.Spec.Taints, taint) { Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName) @@ -1136,7 +1136,7 @@ func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) // NodeHasTaint returns true if the node has the given taint, else returns false. func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) { - node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1325,7 +1325,7 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error { err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil @@ -1471,7 +1471,7 @@ func waitForApiserverRestarted(c clientset.Interface, initialRestartCount int32) func getApiserverRestartCount(c clientset.Interface) (int32, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "kube-apiserver"})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts) + pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts) if err != nil { return -1, err } @@ -1764,7 +1764,7 @@ func getMasterAddresses(c clientset.Interface) (string, string, string) { var externalIP, internalIP, hostname string // Populate the internal IP. - eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) + eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) if err != nil { Failf("Failed to get kubernetes endpoints: %v", err) } @@ -1868,7 +1868,7 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err // DumpDebugInfo dumps debug info of tests. func DumpDebugInfo(c clientset.Interface, ns string) { - sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()}) for _, s := range sl.Items { desc, _ := RunKubectl(ns, "describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns)) Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc) @@ -1921,7 +1921,7 @@ func DsFromManifest(url string) (*appsv1.DaemonSet, error) { // GetClusterZones returns the values of zone label collected from all nodes. func GetClusterZones(c clientset.Interface) (sets.String, error) { - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err) } diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index 9ba32930ee2..7f6f0273dbb 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -40,6 +40,7 @@ limitations under the License. package volume import ( + "context" "fmt" "path/filepath" "strconv" @@ -198,7 +199,7 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestCo }, }, } - _, err := cs.CoreV1().Endpoints(namespace).Create(endpoints) + _, err := cs.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoints) framework.ExpectNoError(err, "failed to create endpoints for Gluster server") return config, pod, ip @@ -302,13 +303,13 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { } var pod *v1.Pod - serverPod, err := podClient.Create(serverPod) + serverPod, err := podClient.Create(context.TODO(), serverPod) // ok if the server pod already exists. TODO: make this controllable by callers if err != nil { if apierrors.IsAlreadyExists(err) { framework.Logf("Ignore \"already-exists\" error, re-get pod...") ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName)) - serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{}) + serverPod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{}) framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err) pod = serverPod } else { @@ -317,12 +318,12 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { } if config.WaitForCompletion { framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace)) - framework.ExpectNoError(podClient.Delete(serverPod.Name, nil)) + framework.ExpectNoError(podClient.Delete(context.TODO(), serverPod.Name, nil)) } else { framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, serverPod)) if pod == nil { ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName)) - pod, err = podClient.Get(serverPodName, metav1.GetOptions{}) + pod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{}) framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err) } } @@ -423,7 +424,7 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix }) } podsNamespacer := client.CoreV1().Pods(config.Namespace) - clientPod, err := podsNamespacer.Create(clientPod) + clientPod, err := podsNamespacer.Create(context.TODO(), clientPod) if err != nil { return nil, err } diff --git a/test/e2e/instrumentation/logging/elasticsearch/kibana.go b/test/e2e/instrumentation/logging/elasticsearch/kibana.go index 4e598a58768..2645062a37f 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/kibana.go +++ b/test/e2e/instrumentation/logging/elasticsearch/kibana.go @@ -63,7 +63,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) { - if _, err := s.Get("kibana-logging", metav1.GetOptions{}); err != nil { + if _, err := s.Get(context.TODO(), "kibana-logging", metav1.GetOptions{}); err != nil { framework.Logf("Kibana is unreachable: %v", err) return false, nil } @@ -75,7 +75,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { ginkgo.By("Checking to make sure the Kibana pods are running") label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(options) + pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), options) framework.ExpectNoError(err) for _, pod := range pods.Items { err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, &pod) diff --git a/test/e2e/instrumentation/logging/elasticsearch/utils.go b/test/e2e/instrumentation/logging/elasticsearch/utils.go index 856da06ac99..644492e4df1 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/utils.go +++ b/test/e2e/instrumentation/logging/elasticsearch/utils.go @@ -63,7 +63,7 @@ func (p *esLogProvider) Init() error { // being run as the first e2e test just after the e2e cluster has been created. var err error for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) { - if _, err = s.Get("elasticsearch-logging", meta_v1.GetOptions{}); err == nil { + if _, err = s.Get(context.TODO(), "elasticsearch-logging", meta_v1.GetOptions{}); err == nil { break } framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start)) @@ -76,7 +76,7 @@ func (p *esLogProvider) Init() error { framework.Logf("Checking to make sure the Elasticsearch pods are running") labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String() options := meta_v1.ListOptions{LabelSelector: labelSelector} - pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options) + pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(context.TODO(), options) if err != nil { return err } diff --git a/test/e2e/instrumentation/logging/utils/logging_agent.go b/test/e2e/instrumentation/logging/utils/logging_agent.go index 77655c401b8..e898c8c8ac9 100644 --- a/test/e2e/instrumentation/logging/utils/logging_agent.go +++ b/test/e2e/instrumentation/logging/utils/logging_agent.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -91,5 +92,5 @@ func EnsureLoggingAgentRestartsCount(f *framework.Framework, appName string, max func getLoggingAgentPods(f *framework.Framework, appName string) (*v1.PodList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": appName})) options := meta_v1.ListOptions{LabelSelector: label.String()} - return f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options) + return f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(context.TODO(), options) } diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index 762450e06fd..ac015b712f4 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -120,11 +120,11 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c } defer CleanupAdapter(f.Namespace.Name, adapterDeployment) - _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions) + _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions) if err != nil { framework.Failf("Failed to create ClusterRoleBindings: %v", err) } - defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{}) + defer kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), HPAPermissions.Name, &metav1.DeleteOptions{}) // Run application that exports the metric _, err = createSDExporterPods(f, kubeClient) @@ -168,11 +168,11 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface, } defer CleanupAdapter(f.Namespace.Name, AdapterForOldResourceModel) - _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions) + _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), HPAPermissions) if err != nil { framework.Failf("Failed to create ClusterRoleBindings: %v", err) } - defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{}) + defer kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), HPAPermissions.Name, &metav1.DeleteOptions{}) // Run application that exports the metric pod, err := createSDExporterPods(f, kubeClient) @@ -258,21 +258,21 @@ func verifyResponseFromExternalMetricsAPI(f *framework.Framework, externalMetric } func cleanupSDExporterPod(f *framework.Framework, cs clientset.Interface) { - err := cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod1, &metav1.DeleteOptions{}) + err := cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), stackdriverExporterPod1, &metav1.DeleteOptions{}) if err != nil { framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err) } - err = cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod2, &metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), stackdriverExporterPod2, &metav1.DeleteOptions{}) if err != nil { framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err) } } func createSDExporterPods(f *framework.Framework, cs clientset.Interface) (*v1.Pod, error) { - pod, err := cs.CoreV1().Pods(f.Namespace.Name).Create(StackdriverExporterPod(stackdriverExporterPod1, f.Namespace.Name, stackdriverExporterLabel, CustomMetricName, CustomMetricValue)) + pod, err := cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), StackdriverExporterPod(stackdriverExporterPod1, f.Namespace.Name, stackdriverExporterLabel, CustomMetricName, CustomMetricValue)) if err != nil { return nil, err } - _, err = cs.CoreV1().Pods(f.Namespace.Name).Create(StackdriverExporterPod(stackdriverExporterPod2, f.Namespace.Name, stackdriverExporterLabel, UnusedMetricName, UnusedMetricValue)) + _, err = cs.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), StackdriverExporterPod(stackdriverExporterPod2, f.Namespace.Name, stackdriverExporterLabel, UnusedMetricName, UnusedMetricValue)) return pod, err } diff --git a/test/e2e/instrumentation/monitoring/metrics_grabber.go b/test/e2e/instrumentation/monitoring/metrics_grabber.go index 66ac7a6c574..89b0bd6299d 100644 --- a/test/e2e/instrumentation/monitoring/metrics_grabber.go +++ b/test/e2e/instrumentation/monitoring/metrics_grabber.go @@ -17,6 +17,7 @@ limitations under the License. package monitoring import ( + "context" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -62,7 +63,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { gin.It("should grab all metrics from a Scheduler.", func() { gin.By("Proxying to Pod through the API server") // Check if master Node is registered - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) var masterRegistered = false @@ -83,7 +84,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { gin.It("should grab all metrics from a ControllerManager.", func() { gin.By("Proxying to Pod through the API server") // Check if master Node is registered - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) var masterRegistered = false diff --git a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go index 15df04e37f7..9a9a22cbd47 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go +++ b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go @@ -77,7 +77,7 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) { _ = e2epod.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) { pod.Spec.Containers[0].Name = uniqueContainerName }) - defer kubeClient.CoreV1().Pods(f.Namespace.Name).Delete(uniqueContainerName, &metav1.DeleteOptions{}) + defer kubeClient.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), uniqueContainerName, &metav1.DeleteOptions{}) // Wait a short amount of time for Metadata Agent to be created and metadata to be exported time.Sleep(metadataWaitTime) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index a716cab05ab..c28a3d07f57 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -646,7 +646,7 @@ var _ = SIGDescribe("Kubectl client", func() { gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234")) gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) - gomega.Expect(c.CoreV1().Pods(ns).Delete("run-test", nil)).To(gomega.BeNil()) + gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test", nil)).To(gomega.BeNil()) ginkgo.By("executing a command with run and attach without stdin") runOutput = framework.NewKubectlCommand(ns, fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). @@ -655,7 +655,7 @@ var _ = SIGDescribe("Kubectl client", func() { gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234")) gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) - gomega.Expect(c.CoreV1().Pods(ns).Delete("run-test-2", nil)).To(gomega.BeNil()) + gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", nil)).To(gomega.BeNil()) ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running") runOutput = framework.NewKubectlCommand(ns, nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). @@ -681,7 +681,7 @@ var _ = SIGDescribe("Kubectl client", func() { }) gomega.Expect(err).To(gomega.BeNil()) - gomega.Expect(c.CoreV1().Pods(ns).Delete("run-test-3", nil)).To(gomega.BeNil()) + gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-3", nil)).To(gomega.BeNil()) }) ginkgo.It("should contain last line of the log", func() { @@ -1173,7 +1173,7 @@ metadata: // Node // It should be OK to list unschedulable Nodes here. - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) node := nodes.Items[0] output = framework.RunKubectlOrDie(ns, "describe", "node", node.Name) @@ -1215,7 +1215,7 @@ metadata: ginkgo.By("waiting for cronjob to start.") err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { - cj, err := c.BatchV1beta1().CronJobs(ns).List(metav1.ListOptions{}) + cj, err := c.BatchV1beta1().CronJobs(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed getting CronJob %s: %v", ns, err) } @@ -1270,7 +1270,7 @@ metadata: }) validateService := func(name string, servicePort int, timeout time.Duration) { err := wait.Poll(framework.Poll, timeout, func() (bool, error) { - ep, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) + ep, err := c.CoreV1().Endpoints(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { // log the real error framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err) @@ -1301,7 +1301,7 @@ metadata: }) framework.ExpectNoError(err) - e2eservice, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) + e2eservice, err := c.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) if len(e2eservice.Spec.Ports) != 1 { @@ -1597,7 +1597,7 @@ metadata: ginkgo.By("running the image " + httpdImage) framework.RunKubectlOrDie(ns, "run", podName, "--restart=Never", "--image="+httpdImage, nsFlag) ginkgo.By("verifying the pod " + podName + " was created") - pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting pod %s: %v", podName, err) } @@ -1651,7 +1651,7 @@ metadata: framework.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-", nsFlag) ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage) - pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting deployment %s: %v", podName, err) } @@ -1835,7 +1835,7 @@ metadata: framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag) ginkgo.By("verifying that the quota was created") - quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) } @@ -1864,7 +1864,7 @@ metadata: framework.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag) ginkgo.By("verifying that the quota was created") - quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) } @@ -2099,7 +2099,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) { label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) options := metav1.ListOptions{LabelSelector: label.String()} - rcs, err = c.CoreV1().ReplicationControllers(ns).List(options) + rcs, err = c.CoreV1().ReplicationControllers(ns).List(context.TODO(), options) framework.ExpectNoError(err) if len(rcs.Items) > 0 { break @@ -2339,7 +2339,7 @@ func waitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D "metadata.name": name, "metadata.namespace": ns, }.AsSelector().String()} - w, err := c.CoreV1().ReplicationControllers(ns).Watch(options) + w, err := c.CoreV1().ReplicationControllers(ns).Watch(context.TODO(), options) if err != nil { return err } diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index 2834f39c229..85cc658441d 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -20,6 +20,7 @@ package kubectl import ( "bytes" + "context" "encoding/binary" "fmt" "io/ioutil" @@ -207,7 +208,7 @@ func runPortForward(ns, podName string, port int) *portForwardCommand { func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { ginkgo.By("Creating the target pod") pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { @@ -255,7 +256,7 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { ginkgo.By("Creating the target pod") pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { @@ -292,7 +293,7 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) { ginkgo.By("Creating the target pod") pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { @@ -356,7 +357,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { ginkgo.By("Creating the pod") pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go index 3917e0f99af..853e824fbb3 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go @@ -17,6 +17,7 @@ limitations under the License. package bootstrap import ( + "context" "github.com/onsi/ginkgo" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,7 +43,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { ginkgo.AfterEach(func() { if len(secretNeedClean) > 0 { ginkgo.By("delete the bootstrap token secret") - err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(secretNeedClean, &metav1.DeleteOptions{}) + err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), secretNeedClean, &metav1.DeleteOptions{}) framework.ExpectNoError(err) secretNeedClean = "" } @@ -56,7 +57,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { tokenID, err := GenerateTokenID() framework.ExpectNoError(err) secret := newTokenSecret(tokenID, "tokenSecret") - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenID framework.ExpectNoError(err) @@ -71,14 +72,14 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { tokenID, err := GenerateTokenID() framework.ExpectNoError(err) secret := newTokenSecret(tokenID, "tokenSecret") - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret) framework.ExpectNoError(err) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenID ginkgo.By("wait for the bootstrap token secret be signed") err = WaitforSignedClusterInfoByBootStrapToken(c, tokenID) - cfgMap, err := f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) + cfgMap, err := f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) framework.ExpectNoError(err) signedToken, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] framework.ExpectEqual(ok, true) @@ -88,14 +89,14 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { updatedKubeConfig, err := randBytes(20) framework.ExpectNoError(err) cfgMap.Data[bootstrapapi.KubeConfigKey] = updatedKubeConfig - _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(cfgMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(context.TODO(), cfgMap) framework.ExpectNoError(err) defer func() { ginkgo.By("update back the cluster-info ConfigMap") - cfgMap, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) + cfgMap, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) framework.ExpectNoError(err) cfgMap.Data[bootstrapapi.KubeConfigKey] = originalData - _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(cfgMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(context.TODO(), cfgMap) framework.ExpectNoError(err) }() @@ -109,7 +110,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { tokenID, err := GenerateTokenID() framework.ExpectNoError(err) secret := newTokenSecret(tokenID, "tokenSecret") - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret) framework.ExpectNoError(err) ginkgo.By("wait for the bootstrap secret be signed") @@ -117,7 +118,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { framework.ExpectNoError(err) ginkgo.By("delete the bootstrap token secret") - err = c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(bootstrapapi.BootstrapTokenSecretPrefix+tokenID, &metav1.DeleteOptions{}) + err = c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), bootstrapapi.BootstrapTokenSecretPrefix+tokenID, &metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("wait for the bootstrap token removed from cluster-info ConfigMap") diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go index 7757e73a163..dd021872862 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go @@ -17,6 +17,7 @@ limitations under the License. package bootstrap import ( + "context" "time" "github.com/onsi/ginkgo" @@ -42,7 +43,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { ginkgo.AfterEach(func() { if len(secretNeedClean) > 0 { ginkgo.By("delete the bootstrap token secret") - err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(secretNeedClean, &metav1.DeleteOptions{}) + err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), secretNeedClean, &metav1.DeleteOptions{}) secretNeedClean = "" framework.ExpectNoError(err) } @@ -56,7 +57,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { secret := newTokenSecret(tokenID, tokenSecret) addSecretExpiration(secret, TimeStringFromNow(-time.Hour)) - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret) framework.ExpectNoError(err) @@ -73,7 +74,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { framework.ExpectNoError(err) secret := newTokenSecret(tokenID, tokenSecret) addSecretExpiration(secret, TimeStringFromNow(time.Hour)) - _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret) + _, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), secret) secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenID framework.ExpectNoError(err) diff --git a/test/e2e/lifecycle/bootstrap/util.go b/test/e2e/lifecycle/bootstrap/util.go index f397b87bd49..d7e57b76299 100644 --- a/test/e2e/lifecycle/bootstrap/util.go +++ b/test/e2e/lifecycle/bootstrap/util.go @@ -17,6 +17,7 @@ limitations under the License. package bootstrap import ( + "context" "crypto/rand" "encoding/hex" "errors" @@ -87,7 +88,7 @@ func TimeStringFromNow(delta time.Duration) string { func WaitforSignedClusterInfoByBootStrapToken(c clientset.Interface, tokenID string) error { return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { - cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) + cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get cluster-info configMap: %v", err) return false, err @@ -104,7 +105,7 @@ func WaitforSignedClusterInfoByBootStrapToken(c clientset.Interface, tokenID str func WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c clientset.Interface, tokenID string, signedToken string) error { return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { - cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) + cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get cluster-info configMap: %v", err) return false, err @@ -121,7 +122,7 @@ func WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c clientset.Interface, t func WaitForSignedClusterInfoByBootstrapTokenToDisappear(c clientset.Interface, tokenID string) error { return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { - cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) + cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get cluster-info configMap: %v", err) return false, err @@ -138,7 +139,7 @@ func WaitForSignedClusterInfoByBootstrapTokenToDisappear(c clientset.Interface, func WaitForBootstrapTokenSecretToDisappear(c clientset.Interface, tokenID string) error { return wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) { - _, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{}) + _, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil } @@ -149,7 +150,7 @@ func WaitForBootstrapTokenSecretToDisappear(c clientset.Interface, tokenID strin // WaitForBootstrapTokenSecretNotDisappear waits for bootstrap token secret not to be disappeared and takes time for the specified timeout as success path. func WaitForBootstrapTokenSecretNotDisappear(c clientset.Interface, tokenID string, t time.Duration) error { err := wait.Poll(framework.Poll, t, func() (bool, error) { - secret, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{}) + secret, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, errors.New("secret not exists") } diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index bd9836d45b8..6bdb65a0f2c 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" "strings" "time" @@ -137,23 +138,23 @@ var _ = SIGDescribe("DNS", func() { "dns-test": "true", } headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService) framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName) defer func() { ginkgo.By("deleting the test headless service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, nil) }() regularServiceName := "test-service-2" regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) - regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService) + regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService) framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName) defer func() { ginkgo.By("deleting the test service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), regularService.Name, nil) }() // All the names we need to be able to resolve. @@ -192,22 +193,22 @@ var _ = SIGDescribe("DNS", func() { "dns-test": "true", } headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService) framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName) defer func() { ginkgo.By("deleting the test headless service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, nil) }() regularServiceName := "test-service-2" regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) - regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService) + regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), regularService) framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName) defer func() { ginkgo.By("deleting the test service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), regularService.Name, nil) }() // All the names we need to be able to resolve. @@ -249,13 +250,13 @@ var _ = SIGDescribe("DNS", func() { serviceName := "dns-test-service-2" podHostname := "dns-querier-2" headlessService := e2eservice.CreateServiceSpec(serviceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService) framework.ExpectNoError(err, "failed to create headless service: %s", serviceName) defer func() { ginkgo.By("deleting the test headless service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, nil) }() hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) @@ -291,13 +292,13 @@ var _ = SIGDescribe("DNS", func() { serviceName := "dns-test-service-2" podHostname := "dns-querier-2" headlessService := e2eservice.CreateServiceSpec(serviceName, "", true, testServiceSelector) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService) framework.ExpectNoError(err, "failed to create headless service: %s", serviceName) defer func() { ginkgo.By("deleting the test headless service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), headlessService.Name, nil) }() hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) @@ -330,13 +331,13 @@ var _ = SIGDescribe("DNS", func() { ginkgo.By("Creating a test externalName service") serviceName := "dns-test-service-3" externalNameService := e2eservice.CreateServiceSpec(serviceName, "foo.example.com", false, nil) - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService) framework.ExpectNoError(err, "failed to create ExternalName service: %s", serviceName) defer func() { ginkgo.By("deleting the test externalName service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameService.Name, nil) }() hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy") @@ -391,7 +392,7 @@ var _ = SIGDescribe("DNS", func() { ginkgo.By("creating a third pod to probe DNS") pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) - svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(context.TODO(), externalNameService.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get service: %s", externalNameService.Name) validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP) @@ -413,12 +414,12 @@ var _ = SIGDescribe("DNS", func() { Nameservers: []string{testServerIP}, Searches: []string{testSearchPath}, } - testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testAgnhostPod) + testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testAgnhostPod) framework.ExpectNoError(err, "failed to create pod: %s", testAgnhostPod.Name) framework.Logf("Created pod %v", testAgnhostPod) defer func() { framework.Logf("Deleting pod %s...", testAgnhostPod.Name) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err) } }() @@ -462,12 +463,12 @@ var _ = SIGDescribe("DNS", func() { testServerPod := generateDNSServerPod(map[string]string{ testDNSNameFull: testInjectedIP, }) - testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod) + testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testServerPod) framework.ExpectNoError(err, "failed to create pod: %s", testServerPod.Name) framework.Logf("Created pod %v", testServerPod) defer func() { framework.Logf("Deleting pod %s...", testServerPod.Name) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err) } }() @@ -475,7 +476,7 @@ var _ = SIGDescribe("DNS", func() { framework.ExpectNoError(err, "failed to wait for pod %s to be running", testServerPod.Name) // Retrieve server pod IP. - testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(testServerPod.Name, metav1.GetOptions{}) + testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), testServerPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get pod %v", testServerPod.Name) testServerIP := testServerPod.Status.PodIP framework.Logf("testServerIP is %s", testServerIP) @@ -494,12 +495,12 @@ var _ = SIGDescribe("DNS", func() { }, }, } - testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod) + testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testUtilsPod) framework.ExpectNoError(err, "failed to create pod: %s", testUtilsPod.Name) framework.Logf("Created pod %v", testUtilsPod) defer func() { framework.Logf("Deleting pod %s...", testUtilsPod.Name) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err) } }() diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index e36393d1fbf..00c75833cae 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -68,7 +68,7 @@ func (t *dnsTestCommon) init() { options := metav1.ListOptions{LabelSelector: label.String()} namespace := "kube-system" - pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(options) + pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(context.TODO(), options) framework.ExpectNoError(err, "failed to list pods in namespace: %s", namespace) gomega.Expect(len(pods.Items)).Should(gomega.BeNumerically(">=", 1)) @@ -152,23 +152,23 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) { "metadata.name": t.name, }.AsSelector().String(), } - cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(options) + cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(context.TODO(), options) framework.ExpectNoError(err, "failed to list ConfigMaps in namespace: %s", t.ns) if len(cmList.Items) == 0 { ginkgo.By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)) - _, err := t.c.CoreV1().ConfigMaps(t.ns).Create(cm) + _, err := t.c.CoreV1().ConfigMaps(t.ns).Create(context.TODO(), cm) framework.ExpectNoError(err, "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm) } else { ginkgo.By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)) - _, err := t.c.CoreV1().ConfigMaps(t.ns).Update(cm) + _, err := t.c.CoreV1().ConfigMaps(t.ns).Update(context.TODO(), cm) framework.ExpectNoError(err, "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm) } } func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string { if t.name == "coredns" { - pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(t.name, metav1.GetOptions{}) + pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), t.name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get DNS ConfigMap: %s", t.name) return pcm.Data } @@ -180,14 +180,14 @@ func (t *dnsTestCommon) restoreDNSConfigMap(configMapData map[string]string) { t.setConfigMap(&v1.ConfigMap{Data: configMapData}) t.deleteCoreDNSPods() } else { - t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil) + t.c.CoreV1().ConfigMaps(t.ns).Delete(context.TODO(), t.name, nil) } } func (t *dnsTestCommon) deleteConfigMap() { ginkgo.By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name)) t.cm = nil - err := t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil) + err := t.c.CoreV1().ConfigMaps(t.ns).Delete(context.TODO(), t.name, nil) framework.ExpectNoError(err, "failed to delete config map: %s", t.name) } @@ -219,7 +219,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) { } var err error - t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod) + t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.utilPod) framework.ExpectNoError(err, "failed to create pod: %v", t.utilPod) framework.Logf("Created pod %v", t.utilPod) err = t.f.WaitForPodRunning(t.utilPod.Name) @@ -245,14 +245,14 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) { }, } - t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService) + t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(context.TODO(), t.utilService) framework.ExpectNoError(err, "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name) framework.Logf("Created service %v", t.utilService) } func (t *dnsTestCommon) deleteUtilPod() { podClient := t.c.CoreV1().Pods(t.f.Namespace.Name) - if err := podClient.Delete(t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := podClient.Delete(context.TODO(), t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Logf("Delete of pod %v/%v failed: %v", t.utilPod.Namespace, t.utilPod.Name, err) } @@ -264,12 +264,12 @@ func (t *dnsTestCommon) deleteCoreDNSPods() { label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(options) + pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(context.TODO(), options) framework.ExpectNoError(err, "failed to list pods of kube-system with label %q", label.String()) podClient := t.c.CoreV1().Pods(metav1.NamespaceSystem) for _, pod := range pods.Items { - err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "failed to delete pod: %s", pod.Name) } } @@ -312,14 +312,13 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) { t.dnsServerPod = pod var err error - t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod) + t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.dnsServerPod) framework.ExpectNoError(err, "failed to create pod: %v", t.dnsServerPod) framework.Logf("Created pod %v", t.dnsServerPod) err = t.f.WaitForPodRunning(t.dnsServerPod.Name) framework.ExpectNoError(err, "pod failed to start running: %v", t.dnsServerPod) - t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get( - t.dnsServerPod.Name, metav1.GetOptions{}) + t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(context.TODO(), t.dnsServerPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get pod: %s", t.dnsServerPod.Name) } @@ -368,7 +367,7 @@ func (t *dnsTestCommon) createDNSServerWithPtrRecord(isIPv6 bool) { func (t *dnsTestCommon) deleteDNSServerPod() { podClient := t.c.CoreV1().Pods(t.f.Namespace.Name) - if err := podClient.Delete(t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := podClient.Delete(context.TODO(), t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Logf("Delete of pod %v/%v failed: %v", t.utilPod.Namespace, t.dnsServerPod.Name, err) } @@ -562,16 +561,16 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) defer func() { ginkgo.By("deleting the pod") defer ginkgo.GinkgoRecover() - podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) + podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) }() - if _, err := podClient.Create(pod); err != nil { + if _, err := podClient.Create(context.TODO(), pod); err != nil { framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } framework.ExpectNoError(f.WaitForPodRunningSlow(pod.Name)) ginkgo.By("retrieving the pod") - pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) } @@ -590,16 +589,16 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames defer func() { ginkgo.By("deleting the pod") defer ginkgo.GinkgoRecover() - podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) + podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) }() - if _, err := podClient.Create(pod); err != nil { + if _, err := podClient.Create(context.TODO(), pod); err != nil { framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } framework.ExpectNoError(f.WaitForPodRunningSlow(pod.Name)) ginkgo.By("retrieving the pod") - pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) } diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index cea5b7921ec..c937c590089 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" "time" @@ -56,7 +57,7 @@ var _ = SIGDescribe("DNS configMap federations [Feature:Federation]", func() { func (t *dnsFederationsConfigMapTest) run() { t.init() - defer t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil) + defer t.c.CoreV1().ConfigMaps(t.ns).Delete(context.TODO(), t.name, nil) t.createUtilPodLabel("e2e-dns-configmap") defer t.deleteUtilPod() originalConfigMapData := t.fetchDNSConfigMapData() @@ -412,19 +413,19 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { f := t.f serviceName := "dns-externalname-upstream-test" externalNameService := e2eservice.CreateServiceSpec(serviceName, googleDNSHostname, false, nil) - if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService); err != nil { + if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameService); err != nil { ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } serviceNameLocal := "dns-externalname-upstream-local" externalNameServiceLocal := e2eservice.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil) - if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameServiceLocal); err != nil { + if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), externalNameServiceLocal); err != nil { ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } defer func() { ginkgo.By("deleting the test externalName service") defer ginkgo.GinkgoRecover() - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil) - f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameServiceLocal.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), externalNameServiceLocal.Name, nil) }() if isIPv6 { diff --git a/test/e2e/network/dns_scale_records.go b/test/e2e/network/dns_scale_records.go index cfb15b142ea..f2820a80667 100644 --- a/test/e2e/network/dns_scale_records.go +++ b/test/e2e/network/dns_scale_records.go @@ -82,7 +82,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { continue } s := services[i] - svc, err := f.ClientSet.CoreV1().Services(s.Namespace).Get(s.Name, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(s.Namespace).Get(context.TODO(), s.Name, metav1.GetOptions{}) framework.ExpectNoError(err) qname := fmt.Sprintf("%v.%v.svc.%v", s.Name, s.Namespace, framework.TestContext.ClusterDNSDomain) framework.Logf("Querying %v expecting %v", qname, svc.Spec.ClusterIP) diff --git a/test/e2e/network/dual_stack.go b/test/e2e/network/dual_stack.go index 7ee4c00ebb5..32eff31c6e0 100644 --- a/test/e2e/network/dual_stack.go +++ b/test/e2e/network/dual_stack.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" "net" "time" @@ -100,7 +101,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { podClient.CreateSync(pod) framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - p, err := podClient.Get(pod.Name, metav1.GetOptions{}) + p, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get pod %q", pod.Name) gomega.Expect(p.Status.PodIP).ShouldNot(gomega.BeEquivalentTo("")) @@ -114,7 +115,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { framework.ExpectEqual(isIPv4(p.Status.PodIPs[0].IP) != isIPv4(p.Status.PodIPs[1].IP), true) ginkgo.By("deleting the pod") - err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30)) + err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(30)) framework.ExpectNoError(err, "failed to delete pod") }) @@ -189,10 +190,10 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { }, } - serverDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(serverDeploymentSpec) + serverDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), serverDeploymentSpec) framework.ExpectNoError(err) - clientDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(clientDeploymentSpec) + clientDeployment, err := cs.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), clientDeploymentSpec) framework.ExpectNoError(err) err = e2edeploy.WaitForDeploymentComplete(cs, serverDeployment) @@ -243,7 +244,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { // ensure endpoint belong to same ipfamily as service if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) { - endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) + endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -285,7 +286,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { // ensure endpoints belong to same ipfamily as service if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) { - endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) + endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -327,7 +328,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { // ensure endpoints belong to same ipfamily as service if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) { - endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) + endpoint, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) if err != nil { return false, nil } diff --git a/test/e2e/network/endpointslice.go b/test/e2e/network/endpointslice.go index 2768911973f..4aa0b0d0ffe 100644 --- a/test/e2e/network/endpointslice.go +++ b/test/e2e/network/endpointslice.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" "time" @@ -157,12 +158,12 @@ var _ = SIGDescribe("EndpointSlice [Feature:EndpointSlice]", func() { } var err error - pod1, err = podClient.Get(pod1.Name, metav1.GetOptions{}) + pod1, err = podClient.Get(context.TODO(), pod1.Name, metav1.GetOptions{}) if err != nil { return false, err } - pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{}) + pod2, err = podClient.Get(context.TODO(), pod2.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -354,7 +355,7 @@ func expectEndpointsAndSlices(cs clientset.Interface, ns string, svc *v1.Service // met. func hasMatchingEndpointSlices(cs clientset.Interface, ns, svcName string, numEndpoints, numSlices int) ([]discoveryv1alpha1.EndpointSlice, bool) { listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1alpha1.LabelServiceName, svcName)} - esList, err := cs.DiscoveryV1alpha1().EndpointSlices(ns).List(listOptions) + esList, err := cs.DiscoveryV1alpha1().EndpointSlices(ns).List(context.TODO(), listOptions) framework.ExpectNoError(err, "Error fetching EndpointSlice for %s/%s Service", ns, svcName) if len(esList.Items) == 0 { @@ -381,7 +382,7 @@ func hasMatchingEndpointSlices(cs clientset.Interface, ns, svcName string, numEn // hasMatchingEndpoints returns any Endpoints that match the conditions along // with a boolean indicating if all the conditions have been met. func hasMatchingEndpoints(cs clientset.Interface, ns, svcName string, numIPs, numSubsets int) (*v1.Endpoints, bool) { - endpoints, err := cs.CoreV1().Endpoints(ns).Get(svcName, metav1.GetOptions{}) + endpoints, err := cs.CoreV1().Endpoints(ns).Get(context.TODO(), svcName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { framework.Logf("Endpoints for %s/%s Service not found", ns, svcName) @@ -427,7 +428,7 @@ func ensurePodTargetRef(pod *v1.Pod, targetRef *v1.ObjectReference) { // createServiceReportErr creates a Service and reports any associated error. func createServiceReportErr(cs clientset.Interface, ns string, service *v1.Service) *v1.Service { - svc, err := cs.CoreV1().Services(ns).Create(service) + svc, err := cs.CoreV1().Services(ns).Create(context.TODO(), service) framework.ExpectNoError(err) return svc } diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index 04358a6e198..ccf61dea14a 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -110,7 +110,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { for _, ns := range namespaces { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns.Name).List(options) + pods, err := c.CoreV1().Pods(ns.Name).List(context.TODO(), options) framework.ExpectNoError(err, "failed to list pods in namespace: %s", ns.Name) err = e2epod.PodsResponding(c, ns.Name, backendPodName, false, pods) framework.ExpectNoError(err, "waiting for all pods to respond") @@ -130,7 +130,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { // This code is probably unnecessary, but let's stay on the safe side. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(namespaces[0].Name).List(options) + pods, err := c.CoreV1().Pods(namespaces[0].Name).List(context.TODO(), options) if err != nil || pods == nil || len(pods.Items) == 0 { framework.Failf("no running pods found") diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index 9b9f3415d92..fa809e2088b 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" "strings" "time" @@ -97,7 +98,7 @@ var _ = SIGDescribe("Firewall rule", func() { svc.Spec.LoadBalancerSourceRanges = nil }) framework.ExpectNoError(err) - err = cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) framework.ExpectNoError(err) ginkgo.By("Waiting for the local traffic health check firewall rule to be deleted") localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false) @@ -150,14 +151,14 @@ var _ = SIGDescribe("Firewall rule", func() { pod.ObjectMeta.Labels = jig.Labels pod.Spec.NodeName = nodeName pod.Spec.HostNetwork = true - _, err := cs.CoreV1().Pods(ns).Create(pod) + _, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err) framework.ExpectNoError(f.WaitForPodReady(podName)) framework.Logf("Netexec pod %q in namespace %q running", podName, ns) defer func() { framework.Logf("Cleaning up the netexec pod: %v", podName) - err = cs.CoreV1().Pods(ns).Delete(podName, nil) + err = cs.CoreV1().Pods(ns).Delete(context.TODO(), podName, nil) framework.ExpectNoError(err) }() } diff --git a/test/e2e/network/fixture.go b/test/e2e/network/fixture.go index 1b7884300c4..e53e854a1d5 100644 --- a/test/e2e/network/fixture.go +++ b/test/e2e/network/fixture.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -84,7 +85,7 @@ func (t *TestFixture) BuildServiceSpec() *v1.Service { // CreateRC creates a replication controller and records it for cleanup. func (t *TestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) { - rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(rc) + rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(context.TODO(), rc) if err == nil { t.rcs[rc.Name] = true } @@ -93,7 +94,7 @@ func (t *TestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationCon // CreateService creates a service, and record it for cleanup func (t *TestFixture) CreateService(service *v1.Service) (*v1.Service, error) { - result, err := t.Client.CoreV1().Services(t.Namespace).Create(service) + result, err := t.Client.CoreV1().Services(t.Namespace).Create(context.TODO(), service) if err == nil { t.services[service.Name] = true } @@ -102,7 +103,7 @@ func (t *TestFixture) CreateService(service *v1.Service) (*v1.Service, error) { // DeleteService deletes a service, and remove it from the cleanup list func (t *TestFixture) DeleteService(serviceName string) error { - err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) + err := t.Client.CoreV1().Services(t.Namespace).Delete(context.TODO(), serviceName, nil) if err == nil { delete(t.services, serviceName) } @@ -116,7 +117,7 @@ func (t *TestFixture) Cleanup() []error { ginkgo.By("stopping RC " + rcName + " in namespace " + t.Namespace) err := retry.RetryOnConflict(retry.DefaultRetry, func() error { // First, resize the RC to 0. - old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{}) + old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(context.TODO(), rcName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return nil @@ -125,7 +126,7 @@ func (t *TestFixture) Cleanup() []error { } x := int32(0) old.Spec.Replicas = &x - if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(old); err != nil { + if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(context.TODO(), old); err != nil { if apierrors.IsNotFound(err) { return nil } @@ -138,7 +139,7 @@ func (t *TestFixture) Cleanup() []error { } // TODO(mikedanese): Wait. // Then, delete the RC altogether. - if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { + if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(context.TODO(), rcName, nil); err != nil { if !apierrors.IsNotFound(err) { errs = append(errs, err) } @@ -147,7 +148,7 @@ func (t *TestFixture) Cleanup() []error { for serviceName := range t.services { ginkgo.By("deleting service " + serviceName + " in namespace " + t.Namespace) - err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) + err := t.Client.CoreV1().Services(t.Namespace).Delete(context.TODO(), serviceName, nil) if err != nil { if !apierrors.IsNotFound(err) { errs = append(errs, err) diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 2e07fe137ce..694ed1e7ce1 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "encoding/json" "fmt" "net/http" @@ -172,7 +173,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name)) pollErr := wait.Poll(2*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { - ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) + ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) annotations := ing.Annotations if annotations == nil || annotations[instanceGroupAnnotation] == "" { @@ -195,7 +196,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { scKey := ingress.StatusPrefix + "/ssl-cert" beKey := ingress.StatusPrefix + "/backends" wait.Poll(2*time.Second, time.Minute, func() (bool, error) { - ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) + ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) annotations := ing.Annotations if annotations != nil && (annotations[umKey] != "" || annotations[fwKey] != "" || @@ -295,11 +296,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) ginkgo.By("Switch backend service to use IG") - svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) + svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) framework.ExpectNoError(err) } err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { @@ -313,11 +314,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { jig.WaitForIngress(true) ginkgo.By("Switch backend service to use NEG") - svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) + svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) framework.ExpectNoError(err) } err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { @@ -348,12 +349,12 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ginkgo.It("should sync endpoints to NEG", func() { name := "hostname" scaleAndValidateNEG := func(num int) { - scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) + scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) if scale.Spec.Replicas != int32(num) { scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(num) - _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) + _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale) framework.ExpectNoError(err) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { @@ -400,11 +401,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Scale backend replicas to %d", replicas)) - scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) + scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(replicas) - _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) + _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale) framework.ExpectNoError(err) err = wait.Poll(10*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { @@ -417,17 +418,17 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { framework.ExpectNoError(err) ginkgo.By("Trigger rolling update and observe service disruption") - deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) + deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) // trigger by changing graceful termination period to 60 seconds gracePeriod := int64(60) deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod - _, err = f.ClientSet.AppsV1().Deployments(ns).Update(deploy) + _, err = f.ClientSet.AppsV1().Deployments(ns).Update(context.TODO(), deploy) framework.ExpectNoError(err) err = wait.Poll(10*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() framework.ExpectNoError(err) - deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) + deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) if int(deploy.Status.UpdatedReplicas) == replicas { if res.Len() == replicas { @@ -448,16 +449,16 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { expectedKeys := []int32{80, 443} scaleAndValidateExposedNEG := func(num int) { - scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) + scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) if scale.Spec.Replicas != int32(num) { scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.Spec.Replicas = int32(num) - _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) + _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale) framework.ExpectNoError(err) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { - svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) var status ingress.NegStatus @@ -537,46 +538,46 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Add Ingress annotation - NEGs should stay the same. ginkgo.By("Adding NEG Ingress annotation") - svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) + svcList, err := f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 2) // Modify exposed NEG annotation, but keep ingress annotation ginkgo.By("Modifying exposed NEG annotation, but keep Ingress annotation") - svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) + svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 2) // Remove Ingress annotation. Expect 1 NEG ginkgo.By("Disabling Ingress annotation, but keeping one standalone NEG") - svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) + svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}` - _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 1) // Remove NEG annotation entirely. Expect 0 NEGs. ginkgo.By("Removing NEG annotation") - svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) + svcList, err = f.ClientSet.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, svc := range svcList.Items { delete(svc.Annotations, ingress.NEGAnnotation) // Service cannot be ClusterIP if it's using Instance Groups. svc.Spec.Type = v1.ServiceTypeNodePort - _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) + _, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc) framework.ExpectNoError(err) } detectNegAnnotation(f, jig, gceController, ns, name, 0) @@ -875,7 +876,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) { if err := wait.Poll(5*time.Second, negUpdateTimeout, func() (bool, error) { - svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, nil } diff --git a/test/e2e/network/kube_proxy.go b/test/e2e/network/kube_proxy.go index 300d614513b..0fb9a0affdc 100644 --- a/test/e2e/network/kube_proxy.go +++ b/test/e2e/network/kube_proxy.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "encoding/json" "fmt" "math" @@ -267,7 +268,7 @@ var _ = SIGDescribe("Network", func() { }, }, } - _, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(serverPod) + _, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(context.TODO(), serverPod) framework.ExpectNoError(err) err = e2epod.WaitForPodsRunningReady(fr.ClientSet, fr.Namespace.Name, 1, 0, framework.PodReadyBeforeTimeout, map[string]string{}) @@ -289,7 +290,7 @@ var _ = SIGDescribe("Network", func() { }, }, } - _, err = fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(svc) + _, err = fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(context.TODO(), svc) framework.ExpectNoError(err) ginkgo.By("Server service created") @@ -324,14 +325,14 @@ var _ = SIGDescribe("Network", func() { RestartPolicy: v1.RestartPolicyNever, }, } - _, err = fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(pod) + _, err = fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Client pod created") for i := 0; i < 20; i++ { time.Sleep(3 * time.Second) - resultPod, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Get(serverPod.Name, metav1.GetOptions{}) + resultPod, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Get(context.TODO(), serverPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) gomega.Expect(resultPod.Status.ContainerStatuses[0].LastTerminationState.Terminated).Should(gomega.BeNil()) } diff --git a/test/e2e/network/network_policy.go b/test/e2e/network/network_policy.go index 676e6b25e2c..3388bd6bf65 100644 --- a/test/e2e/network/network_policy.go +++ b/test/e2e/network/network_policy.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "encoding/json" v1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" @@ -87,7 +88,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -140,7 +141,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -193,7 +194,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -227,7 +228,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -279,7 +280,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -325,7 +326,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -371,7 +372,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -438,7 +439,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err, "Error occurred while creating policy: policy.") defer cleanupNetworkPolicy(f, policy) @@ -477,7 +478,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -507,7 +508,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -531,7 +532,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy2) + policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy2) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy2) @@ -554,7 +555,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { Ingress: []networkingv1.NetworkPolicyIngressRule{{}}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -584,7 +585,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -631,7 +632,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -669,7 +670,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err) defer cleanupNetworkPolicy(f, policy) @@ -713,7 +714,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) testCanConnect(f, f.Namespace, "client-a", service, clientAAllowedPort) @@ -759,7 +760,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }}, }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Update(policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Update(context.TODO(), policy) framework.ExpectNoError(err, "Error updating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -805,17 +806,17 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy) + policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(context.TODO(), policy) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) testCannotConnect(f, nsB, "client-a", service, allowedPort) - nsB, err = f.ClientSet.CoreV1().Namespaces().Get(nsB.Name, metav1.GetOptions{}) + nsB, err = f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), nsB.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Error getting Namespace %v: %v", nsB.ObjectMeta.Name, err) nsB.ObjectMeta.Labels["ns-name"] = newNsBName - nsB, err = f.ClientSet.CoreV1().Namespaces().Update(nsB) + nsB, err = f.ClientSet.CoreV1().Namespaces().Update(context.TODO(), nsB) framework.ExpectNoError(err, "Error updating Namespace %v: %v", nsB.ObjectMeta.Name, err) testCanConnect(f, nsB, "client-b", service, allowedPort) @@ -847,7 +848,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) + policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy) framework.ExpectNoError(err, "Error creating Network Policy %v: %v", policy.ObjectMeta.Name, err) defer cleanupNetworkPolicy(f, policy) @@ -855,7 +856,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { podClient := createNetworkClientPod(f, f.Namespace, "client-a", service, allowedPort) defer func() { ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name)) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podClient.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podClient.Name, nil); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } }() @@ -942,7 +943,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyAllowToServerInNSB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowToServerInNSB) + policyAllowToServerInNSB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToServerInNSB) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToServerInNSB.") defer cleanupNetworkPolicy(f, policyAllowToServerInNSB) @@ -985,7 +986,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyAllowOnlyFromClientB, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowOnlyFromClientB) + policyAllowOnlyFromClientB, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyFromClientB) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyFromClientB.") defer cleanupNetworkPolicy(f, policyAllowOnlyFromClientB) @@ -1012,7 +1013,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyIngressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyIngressAllowAll) + policyIngressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyIngressAllowAll) framework.ExpectNoError(err, "Error occurred while creating policy: policyIngressAllowAll.") defer cleanupNetworkPolicy(f, policyIngressAllowAll) @@ -1078,7 +1079,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, }, } - policyAllowOnlyToServerA, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowOnlyToServerA) + policyAllowOnlyToServerA, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowOnlyToServerA) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowOnlyToServerA.") defer cleanupNetworkPolicy(f, policyAllowOnlyToServerA) @@ -1104,7 +1105,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyEgressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyEgressAllowAll) + policyEgressAllowAll, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyEgressAllowAll) framework.ExpectNoError(err, "Error occurred while creating policy: policyEgressAllowAll.") defer cleanupNetworkPolicy(f, policyEgressAllowAll) @@ -1131,7 +1132,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyDenyAll, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyDenyAll) + policyDenyAll, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyDenyAll) framework.ExpectNoError(err, "Error occurred while creating policy: policyDenyAll.") ginkgo.By("Creating client-a which should not be able to contact the server.", func() { @@ -1165,7 +1166,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyAllowFromClientA, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowFromClientA) + policyAllowFromClientA, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowFromClientA) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowFromClientA.") ginkgo.By("Creating client-a which should be able to contact the server.", func() { @@ -1195,7 +1196,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { protocolUDP := v1.ProtocolUDP // Getting podServer's status to get podServer's IP, to create the CIDR - podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podServer.Name, metav1.GetOptions{}) + podServerStatus, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podServer.Name, metav1.GetOptions{}) if err != nil { framework.ExpectNoError(err, "Error occurred while getting pod status.") } @@ -1255,7 +1256,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyAllowCIDR, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowCIDR) + policyAllowCIDR, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowCIDR) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowCIDR.") defer cleanupNetworkPolicy(f, policyAllowCIDR) @@ -1334,7 +1335,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyAllowToPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyAllowToPodB) + policyAllowToPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyAllowToPodB) framework.ExpectNoError(err, "Error occurred while creating policy: policyAllowToPodB.") defer cleanupNetworkPolicy(f, policyAllowToPodB) @@ -1357,7 +1358,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }, } - policyDenyFromPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policyDenyFromPodB) + policyDenyFromPodB, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policyDenyFromPodB) framework.ExpectNoError(err, "Error occurred while creating policy: policyDenyFromPodB.") defer cleanupNetworkPolicy(f, policyDenyFromPodB) @@ -1388,7 +1389,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se podClient := createNetworkClientPod(f, ns, podName, service, targetPort) defer func() { ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name)) - if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podClient.Name, nil); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } }() @@ -1400,7 +1401,7 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, podClient := createNetworkClientPod(f, ns, podName, service, targetPort) defer func() { ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name)) - if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podClient.Name, nil); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } }() @@ -1422,7 +1423,7 @@ func checkConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1.P } // Collect current NetworkPolicies applied in the test namespace. - policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(metav1.ListOptions{}) + policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err) } @@ -1459,7 +1460,7 @@ func checkNoConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1 } // Collect current NetworkPolicies applied in the test namespace. - policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(metav1.ListOptions{}) + policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err) } @@ -1530,7 +1531,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, } ginkgo.By(fmt.Sprintf("Creating a server pod %s in namespace %s", podName, namespace.Name)) - pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(&v1.Pod{ + pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(context.TODO(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: podName + "-", Labels: map[string]string{ @@ -1547,7 +1548,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, svcName := fmt.Sprintf("svc-%s", podName) ginkgo.By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name)) - svc, err := f.ClientSet.CoreV1().Services(namespace.Name).Create(&v1.Service{ + svc, err := f.ClientSet.CoreV1().Services(namespace.Name).Create(context.TODO(), &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: svcName, }, @@ -1566,11 +1567,11 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) { ginkgo.By("Cleaning up the server.") - if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, nil); err != nil { framework.Failf("unable to cleanup pod %v: %v", pod.Name, err) } ginkgo.By("Cleaning up the server's service.") - if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(service.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, nil); err != nil { framework.Failf("unable to cleanup svc %v: %v", service.Name, err) } } @@ -1579,7 +1580,7 @@ func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1 // This client will attempt a one-shot connection, then die, without restarting the pod. // Test can then be asserted based on whether the pod quit with an error or not. func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, podName string, targetService *v1.Service, targetPort int) *v1.Pod { - pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(&v1.Pod{ + pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(context.TODO(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: podName + "-", Labels: map[string]string{ @@ -1622,7 +1623,7 @@ func updateNetworkClientPodLabel(f *framework.Framework, namespace *v1.Namespace payloadBytes, err := json.Marshal(payload) framework.ExpectNoError(err) - pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Patch(podName, types.JSONPatchType, payloadBytes) + pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Patch(context.TODO(), podName, types.JSONPatchType, payloadBytes) framework.ExpectNoError(err) return pod @@ -1630,7 +1631,7 @@ func updateNetworkClientPodLabel(f *framework.Framework, namespace *v1.Namespace func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) { ginkgo.By("Cleaning up the policy.") - if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(policy.Name, nil); err != nil { + if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(context.TODO(), policy.Name, nil); err != nil { framework.Failf("unable to cleanup policy %v: %v", policy.Name, err) } } diff --git a/test/e2e/network/networking.go b/test/e2e/network/networking.go index 824092bade9..bafd589b4ec 100644 --- a/test/e2e/network/networking.go +++ b/test/e2e/network/networking.go @@ -69,7 +69,7 @@ func checkConnectivityToHost(f *framework.Framework, nodeName, podName, host str }, } podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) - _, err := podClient.Create(pod) + _, err := podClient.Create(context.TODO(), pod) if err != nil { return err } diff --git a/test/e2e/network/no_snat.go b/test/e2e/network/no_snat.go index 7a61c1f21f9..730edca1ca0 100644 --- a/test/e2e/network/no_snat.go +++ b/test/e2e/network/no_snat.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" "io/ioutil" "net/http" @@ -138,7 +139,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { nc := cs.CoreV1().Nodes() ginkgo.By("creating a test pod on each Node") - nodes, err := nc.List(metav1.ListOptions{}) + nodes, err := nc.List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) if len(nodes.Items) == 0 { framework.ExpectNoError(fmt.Errorf("no Nodes in the cluster")) @@ -150,7 +151,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { // target Pod at Node and feed Pod Node's InternalIP pod := newTestPod(node.Name, inIP) - _, err = pc.Create(pod) + _, err = pc.Create(context.TODO(), pod) framework.ExpectNoError(err) } @@ -171,12 +172,12 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { framework.ExpectNoError(err) proxyNodeIP := extIP + ":" + strconv.Itoa(testProxyPort) - _, err = pc.Create(newTestProxyPod(node.Name)) + _, err = pc.Create(context.TODO(), newTestProxyPod(node.Name)) framework.ExpectNoError(err) ginkgo.By("waiting for all of the no-snat-test pods to be scheduled and running") err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) { - pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"}) + pods, err := pc.List(context.TODO(), metav1.ListOptions{LabelSelector: "no-snat-test"}) if err != nil { return false, err } @@ -196,7 +197,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { ginkgo.By("waiting for the no-snat-test-proxy Pod to be scheduled and running") err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) { - pod, err := pc.Get("no-snat-test-proxy", metav1.GetOptions{}) + pod, err := pc.Get(context.TODO(), "no-snat-test-proxy", metav1.GetOptions{}) if err != nil { return false, err } @@ -211,7 +212,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { framework.ExpectNoError(err) ginkgo.By("sending traffic from each pod to the others and checking that SNAT does not occur") - pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"}) + pods, err := pc.List(context.TODO(), metav1.ListOptions{LabelSelector: "no-snat-test"}) framework.ExpectNoError(err) // collect pod IPs diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index a2a0d7b8008..d9dec789610 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -87,7 +87,7 @@ var _ = SIGDescribe("Proxy", func() { framework.ConformanceIt("should proxy through a service and a pod ", func() { start := time.Now() labels := map[string]string{"proxy-service-target": "true"} - service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{ + service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), &v1.Service{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "proxy-service-", }, @@ -322,7 +322,7 @@ func waitForEndpoint(c clientset.Interface, ns, name string) error { // registerTimeout is how long to wait for an endpoint to be registered. registerTimeout := time.Minute for t := time.Now(); time.Since(t) < registerTimeout; time.Sleep(framework.Poll) { - endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) + endpoint, err := c.CoreV1().Endpoints(ns).Get(context.TODO(), name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { framework.Logf("Endpoint %s/%s is not ready yet", ns, name) continue diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go index 78f6f6cebbe..b022f421caa 100644 --- a/test/e2e/network/scale/ingress.go +++ b/test/e2e/network/scale/ingress.go @@ -17,6 +17,7 @@ limitations under the License. package scale import ( + "context" "fmt" "io/ioutil" "sync" @@ -134,7 +135,7 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error { f.Logger.Infof("Cleaning up ingresses...") for _, ing := range f.ScaleTestIngs { if ing != nil { - if err := f.Clientset.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil); err != nil { + if err := f.Clientset.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(context.TODO(), ing.Name, nil); err != nil { errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err)) } } @@ -142,14 +143,14 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error { f.Logger.Infof("Cleaning up services...") for _, svc := range f.ScaleTestSvcs { if svc != nil { - if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil); err != nil { + if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil); err != nil { errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err)) } } } if f.ScaleTestDeploy != nil { f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name) - if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(f.ScaleTestDeploy.Name, nil); err != nil { + if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(context.TODO(), f.ScaleTestDeploy.Name, nil); err != nil { errs = append(errs, fmt.Errorf("error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err)) } } @@ -168,7 +169,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error { testDeploy := generateScaleTestBackendDeploymentSpec(scaleTestNumBackends) f.Logger.Infof("Creating deployment %s...", testDeploy.Name) - testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(testDeploy) + testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(context.TODO(), testDeploy) if err != nil { errs = append(errs, fmt.Errorf("failed to create deployment %s: %v", testDeploy.Name, err)) return errs @@ -271,14 +272,14 @@ func (f *IngressScaleFramework) RunScaleTest() []error { f.StepCreateLatencies = append(f.StepCreateLatencies, elapsed) f.Logger.Infof("Updating ingress and wait for change to take effect") - ingToUpdate, err := f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Get(ingCreated.Name, metav1.GetOptions{}) + ingToUpdate, err := f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Get(context.TODO(), ingCreated.Name, metav1.GetOptions{}) if err != nil { errs = append(errs, err) return } addTestPathToIngress(ingToUpdate) start = time.Now() - ingToUpdate, err = f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Update(ingToUpdate) + ingToUpdate, err = f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ingToUpdate) if err != nil { errs = append(errs, err) return @@ -368,11 +369,11 @@ func addTestPathToIngress(ing *networkingv1beta1.Ingress) { } func (f *IngressScaleFramework) createScaleTestServiceIngress(suffix string, enableTLS bool) (*v1.Service, *networkingv1beta1.Ingress, error) { - svcCreated, err := f.Clientset.CoreV1().Services(f.Namespace).Create(generateScaleTestServiceSpec(suffix)) + svcCreated, err := f.Clientset.CoreV1().Services(f.Namespace).Create(context.TODO(), generateScaleTestServiceSpec(suffix)) if err != nil { return nil, nil, err } - ingCreated, err := f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Create(generateScaleTestIngressSpec(suffix, enableTLS)) + ingCreated, err := f.Clientset.NetworkingV1beta1().Ingresses(f.Namespace).Create(context.TODO(), generateScaleTestIngressSpec(suffix, enableTLS)) if err != nil { return nil, nil, err } diff --git a/test/e2e/network/scale/localrun/ingress_scale.go b/test/e2e/network/scale/localrun/ingress_scale.go index ecd9d7c8b9a..31719d2e629 100644 --- a/test/e2e/network/scale/localrun/ingress_scale.go +++ b/test/e2e/network/scale/localrun/ingress_scale.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "context" "flag" "fmt" "os" @@ -135,7 +136,7 @@ func main() { }, } klog.Infof("Creating namespace %s...", ns.Name) - if _, err := cs.CoreV1().Namespaces().Create(ns); err != nil { + if _, err := cs.CoreV1().Namespaces().Create(context.TODO(), ns); err != nil { klog.Errorf("Failed to create namespace %s: %v", ns.Name, err) testSuccessFlag = false return @@ -143,7 +144,7 @@ func main() { if cleanup { defer func() { klog.Infof("Deleting namespace %s...", ns.Name) - if err := cs.CoreV1().Namespaces().Delete(ns.Name, nil); err != nil { + if err := cs.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, nil); err != nil { klog.Errorf("Failed to delete namespace %s: %v", ns.Name, err) testSuccessFlag = false } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 54c744cf7a4..78463773715 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -200,7 +200,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string podNames := make([]string, replicas) name := svc.ObjectMeta.Name ginkgo.By("creating service " + name + " in namespace " + ns) - _, err := c.CoreV1().Services(ns).Create(svc) + _, err := c.CoreV1().Services(ns).Create(context.TODO(), svc) if err != nil { return podNames, "", err } @@ -233,7 +233,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string } sort.StringSlice(podNames).Sort() - service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) + service, err := c.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return podNames, "", err } @@ -249,7 +249,7 @@ func StopServeHostnameService(clientset clientset.Interface, ns, name string) er if err := e2erc.DeleteRCAndWaitForGC(clientset, ns, name); err != nil { return err } - if err := clientset.CoreV1().Services(ns).Delete(name, nil); err != nil { + if err := clientset.CoreV1().Services(ns).Delete(context.TODO(), name, nil); err != nil { return err } return nil @@ -713,7 +713,7 @@ var _ = SIGDescribe("Services", func() { Description: By default when a kubernetes cluster is running there MUST be a ‘kubernetes’ service running in the cluster. */ framework.ConformanceIt("should provide secure master service ", func() { - _, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) + _, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch the service object for the service named kubernetes") }) @@ -729,7 +729,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("creating service " + serviceName + " in namespace " + ns) defer func() { - err := cs.CoreV1().Services(ns).Delete(serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() _, err := jig.CreateTCPServiceWithPort(nil, 80) @@ -741,7 +741,7 @@ var _ = SIGDescribe("Services", func() { names := map[string]bool{} defer func() { for name := range names { - err := cs.CoreV1().Pods(ns).Delete(name, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, nil) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns) } }() @@ -782,7 +782,7 @@ var _ = SIGDescribe("Services", func() { jig := e2eservice.NewTestJig(cs, ns, serviceName) defer func() { - err := cs.CoreV1().Services(ns).Delete(serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() @@ -814,7 +814,7 @@ var _ = SIGDescribe("Services", func() { names := map[string]bool{} defer func() { for name := range names { - err := cs.CoreV1().Pods(ns).Delete(name, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, nil) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns) } }() @@ -880,7 +880,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the sourceip test service") - err := cs.CoreV1().Services(ns).Delete(serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() serviceIP := tcpService.Spec.ClusterIP @@ -898,12 +898,12 @@ var _ = SIGDescribe("Services", func() { serverPodName := "echo-sourceip" pod := f.NewAgnhostPod(serverPodName, "netexec", "--http-port", strconv.Itoa(servicePort)) pod.Labels = jig.Labels - _, err = cs.CoreV1().Pods(ns).Create(pod) + _, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err) framework.ExpectNoError(f.WaitForPodReady(pod.Name)) defer func() { framework.Logf("Cleaning up the echo server pod") - err := cs.CoreV1().Pods(ns).Delete(serverPodName, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), serverPodName, nil) framework.ExpectNoError(err, "failed to delete pod: %s on node", serverPodName) }() @@ -916,17 +916,17 @@ var _ = SIGDescribe("Services", func() { defer func() { framework.Logf("Deleting deployment") - err = cs.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) + err = cs.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name) }() framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment") - deployment, err = cs.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) + deployment, err = cs.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Error in retrieving pause pod deployment") labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) - pausePods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labelSelector.String()}) + pausePods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()}) framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments") gomega.Expect(pausePods.Items[0].Spec.NodeName).ToNot(gomega.Equal(pausePods.Items[1].Spec.NodeName)) @@ -956,7 +956,7 @@ var _ = SIGDescribe("Services", func() { serverPodName := "hairpin" podTemplate := f.NewAgnhostPod(serverPodName, "netexec", "--http-port", strconv.Itoa(servicePort)) podTemplate.Labels = jig.Labels - pod, err := cs.CoreV1().Pods(ns).Create(podTemplate) + pod, err := cs.CoreV1().Pods(ns).Create(context.TODO(), podTemplate) framework.ExpectNoError(err) framework.ExpectNoError(f.WaitForPodReady(pod.Name)) @@ -1538,7 +1538,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the updating NodePorts test service") - err := cs.CoreV1().Services(ns).Delete(serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) }() framework.Logf("Service Port TCP: %v", tcpService.Spec.Ports[0].Port) @@ -1610,7 +1610,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the ExternalName to ClusterIP test service") - err := cs.CoreV1().Services(ns).Delete(serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() @@ -1649,7 +1649,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the ExternalName to NodePort test service") - err := cs.CoreV1().Services(ns).Delete(serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() @@ -1687,7 +1687,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the ClusterIP to ExternalName test service") - err := cs.CoreV1().Services(ns).Delete(serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() @@ -1729,7 +1729,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) defer func() { framework.Logf("Cleaning up the NodePort to ExternalName test service") - err := cs.CoreV1().Services(ns).Delete(serviceName, nil) + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, nil) framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns) }() @@ -2069,13 +2069,13 @@ var _ = SIGDescribe("Services", func() { label := labels.SelectorFromSet(labels.Set(t.Labels)) options := metav1.ListOptions{LabelSelector: label.String()} podClient := t.Client.CoreV1().Pods(f.Namespace.Name) - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) if err != nil { framework.Logf("warning: error retrieving pods: %s", err) } else { for _, pod := range pods.Items { var gracePeriodSeconds int64 = 0 - err := podClient.Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds}) + err := podClient.Delete(context.TODO(), pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds}) if err != nil { framework.Logf("warning: error force deleting pod '%s': %s", pod.Name, err) } @@ -2109,7 +2109,7 @@ var _ = SIGDescribe("Services", func() { _, err := jig.Run(nil) framework.ExpectNoError(err) // Make sure acceptPod is running. There are certain chances that pod might be teminated due to unexpected reasons. - acceptPod, err = cs.CoreV1().Pods(namespace).Get(acceptPod.Name, metav1.GetOptions{}) + acceptPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), acceptPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name) framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning) framework.ExpectNotEqual(acceptPod.Status.PodIP, "") @@ -2140,7 +2140,7 @@ var _ = SIGDescribe("Services", func() { checkReachabilityFromPod(false, normalReachabilityTimeout, namespace, dropPod.Name, svcIP) // Make sure dropPod is running. There are certain chances that the pod might be teminated due to unexpected reasons. dropPod, err = cs.CoreV1().Pods(namespace).Get(dropPod.Name, metav1.GetOptions{}) - dropPod, err = cs.CoreV1().Pods(namespace).Get(dropPod.Name, metav1.GetOptions{}) + dropPod, err = cs.CoreV1().Pods(namespace).Get(context.TODO(), dropPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get pod %s", dropPod.Name) framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning) framework.ExpectNotEqual(acceptPod.Status.PodIP, "") @@ -2239,7 +2239,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName) if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { - svc, err := cs.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{}) + svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) if err != nil { return false, err } @@ -2268,7 +2268,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName) if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { - svc, err := cs.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{}) + svc, err := cs.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) if err != nil { return false, err } @@ -2647,7 +2647,7 @@ var _ = SIGDescribe("Services", func() { */ framework.ConformanceIt("should find a service from listing all namespaces", func() { ginkgo.By("fetching services") - svcs, _ := f.ClientSet.CoreV1().Services("").List(metav1.ListOptions{}) + svcs, _ := f.ClientSet.CoreV1().Services("").List(context.TODO(), metav1.ListOptions{}) foundSvc := false for _, svc := range svcs.Items { @@ -2713,7 +2713,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { err := TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", e2eservice.KubeProxyEndpointLagTimeout, false, threshold) framework.ExpectNoError(err) } - err = cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + err = cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) framework.ExpectNoError(err) }() @@ -2739,7 +2739,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { svc, err := jig.CreateOnlyLocalNodePortService(true) framework.ExpectNoError(err) defer func() { - err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) framework.ExpectNoError(err) }() @@ -2782,7 +2782,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { defer func() { err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) framework.ExpectNoError(err) - err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) framework.ExpectNoError(err) }() @@ -2843,7 +2843,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { defer func() { err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) framework.ExpectNoError(err) - err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) framework.ExpectNoError(err) }() @@ -2858,16 +2858,16 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { defer func() { framework.Logf("Deleting deployment") - err = cs.AppsV1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{}) + err = cs.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name) }() - deployment, err = cs.AppsV1().Deployments(namespace).Get(deployment.Name, metav1.GetOptions{}) + deployment, err = cs.AppsV1().Deployments(namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Error in retrieving pause pod deployment") labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) framework.ExpectNoError(err, "Error in setting LabelSelector as selector from deployment") - pausePods, err := cs.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: labelSelector.String()}) + pausePods, err := cs.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()}) framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments") pausePod := pausePods.Items[0] @@ -2906,7 +2906,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { defer func() { err = jig.ChangeServiceType(v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) framework.ExpectNoError(err) - err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) + err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil) framework.ExpectNoError(err) }() @@ -3061,7 +3061,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor StopServeHostnameService(cs, ns, serviceName) }() jig := e2eservice.NewTestJig(cs, ns, serviceName) - svc, err = jig.Client.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{}) + svc, err = jig.Client.CoreV1().Services(ns).Get(context.TODO(), serviceName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns) var svcIP string if serviceType == v1.ServiceTypeNodePort { @@ -3078,7 +3078,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil) defer func() { framework.Logf("Cleaning up the exec pod") - err := cs.CoreV1().Pods(ns).Delete(execPod.Name, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), execPod.Name, nil) framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns) }() err = jig.CheckServiceReachability(svc, execPod) @@ -3178,7 +3178,7 @@ func createPausePodDeployment(cs clientset.Interface, name, ns string, replicas }, } - deployment, err := cs.AppsV1().Deployments(ns).Create(pauseDeployment) + deployment, err := cs.AppsV1().Deployments(ns).Create(context.TODO(), pauseDeployment) framework.ExpectNoError(err, "Error in creating deployment for pause pod") return deployment } @@ -3205,7 +3205,7 @@ func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]s }, }, } - _, err := c.CoreV1().Pods(ns).Create(pod) + _, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err, "failed to create pod %s in namespace %s", name, ns) } @@ -3213,7 +3213,7 @@ func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]s // until it's Running func launchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod { hostExecPod := e2epod.NewExecPodSpec(ns, name, true) - pod, err := client.CoreV1().Pods(ns).Create(hostExecPod) + pod, err := client.CoreV1().Pods(ns).Create(context.TODO(), hostExecPod) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(client, pod) framework.ExpectNoError(err) @@ -3301,7 +3301,7 @@ func validatePorts(ep e2eendpoints.PortsByPodUID, expectedEndpoints e2eendpoints func translatePodNameToUID(c clientset.Interface, ns string, expectedEndpoints portsByPodName) (e2eendpoints.PortsByPodUID, error) { portsByUID := make(e2eendpoints.PortsByPodUID) for name, portList := range expectedEndpoints { - pod, err := c.CoreV1().Pods(ns).Get(name, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) } @@ -3315,7 +3315,7 @@ func validateEndpointsPorts(c clientset.Interface, namespace, serviceName string ginkgo.By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", framework.ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) i := 1 for start := time.Now(); time.Since(start) < framework.ServiceStartTimeout; time.Sleep(1 * time.Second) { - ep, err := c.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) + ep, err := c.CoreV1().Endpoints(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) if err != nil { framework.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) continue @@ -3339,7 +3339,7 @@ func validateEndpointsPorts(c clientset.Interface, namespace, serviceName string } i++ } - if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil { + if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}); err == nil { for _, pod := range pods.Items { framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) } diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go index 741dbae4fca..f02f9d929de 100644 --- a/test/e2e/network/service_latency.go +++ b/test/e2e/network/service_latency.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" "sort" "strings" @@ -295,11 +296,11 @@ func startEndpointWatcher(f *framework.Framework, q *endpointQueries) { _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - obj, err := f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).List(options) + obj, err := f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).List(context.TODO(), options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).Watch(options) + return f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).Watch(context.TODO(), options) }, }, &v1.Endpoints{}, @@ -344,7 +345,7 @@ func singleServiceLatency(f *framework.Framework, name string, q *endpointQuerie }, } startTime := time.Now() - gotSvc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc) + gotSvc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), svc) if err != nil { return 0, err } diff --git a/test/e2e/node/events.go b/test/e2e/node/events.go index 0eb9aac7cc2..7c282c629c4 100644 --- a/test/e2e/node/events.go +++ b/test/e2e/node/events.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "strconv" "time" @@ -69,9 +70,9 @@ var _ = SIGDescribe("Events", func() { ginkgo.By("submitting the pod to kubernetes") defer func() { ginkgo.By("deleting the pod") - podClient.Delete(pod.Name, nil) + podClient.Delete(context.TODO(), pod.Name, nil) }() - if _, err := podClient.Create(pod); err != nil { + if _, err := podClient.Create(context.TODO(), pod); err != nil { framework.Failf("Failed to create pod: %v", err) } @@ -80,11 +81,11 @@ var _ = SIGDescribe("Events", func() { ginkgo.By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) framework.ExpectEqual(len(pods.Items), 1) ginkgo.By("retrieving the pod") - podWithUID, err := podClient.Get(pod.Name, metav1.GetOptions{}) + podWithUID, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get pod: %v", err) } @@ -100,7 +101,7 @@ var _ = SIGDescribe("Events", func() { "source": v1.DefaultSchedulerName, }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options) + events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), options) if err != nil { return false, err } @@ -120,7 +121,7 @@ var _ = SIGDescribe("Events", func() { "source": "kubelet", }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - events, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options) + events, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), options) if err != nil { return false, err } diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index 959b2a82868..368cf2fbcc6 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "path/filepath" "strings" @@ -168,13 +169,13 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, }, }, } - rtnPod, err := c.CoreV1().Pods(ns).Create(pod) + rtnPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err) err = f.WaitForPodReady(rtnPod.Name) // running & ready framework.ExpectNoError(err) - rtnPod, err = c.CoreV1().Pods(ns).Get(rtnPod.Name, metav1.GetOptions{}) // return fresh pod + rtnPod, err = c.CoreV1().Pods(ns).Get(context.TODO(), rtnPod.Name, metav1.GetOptions{}) // return fresh pod framework.ExpectNoError(err) return rtnPod } @@ -183,7 +184,7 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP, // address. Returns an error if the node the pod is on doesn't have an External // address. func getHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) { - node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/test/e2e/node/node_problem_detector.go b/test/e2e/node/node_problem_detector.go index 47d7dd9ff5e..51c67714518 100644 --- a/test/e2e/node/node_problem_detector.go +++ b/test/e2e/node/node_problem_detector.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "net" "sort" @@ -193,7 +194,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() { }) func verifyEvents(f *framework.Framework, options metav1.ListOptions, num int, reason, nodeName string) error { - events, err := f.ClientSet.CoreV1().Events(metav1.NamespaceDefault).List(options) + events, err := f.ClientSet.CoreV1().Events(metav1.NamespaceDefault).List(context.TODO(), options) if err != nil { return err } @@ -211,7 +212,7 @@ func verifyEvents(f *framework.Framework, options metav1.ListOptions, num int, r } func verifyEventExists(f *framework.Framework, options metav1.ListOptions, reason, nodeName string) error { - events, err := f.ClientSet.CoreV1().Events(metav1.NamespaceDefault).List(options) + events, err := f.ClientSet.CoreV1().Events(metav1.NamespaceDefault).List(context.TODO(), options) if err != nil { return err } @@ -224,7 +225,7 @@ func verifyEventExists(f *framework.Framework, options metav1.ListOptions, reaso } func verifyNodeCondition(f *framework.Framework, condition v1.NodeConditionType, status v1.ConditionStatus, reason, nodeName string) error { - node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/e2e/node/pod_gc.go b/test/e2e/node/pod_gc.go index b1e680e173c..f7799cca7e6 100644 --- a/test/e2e/node/pod_gc.go +++ b/test/e2e/node/pod_gc.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "time" @@ -44,7 +45,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]" } pod.ResourceVersion = "" pod.Status.Phase = v1.PodFailed - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(context.TODO(), pod) if err != nil { framework.Failf("err failing pod: %v", err) } @@ -66,7 +67,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]" ginkgo.By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold)) pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) { - pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Logf("Failed to list pod %v", err) return false, nil @@ -99,5 +100,5 @@ func createTerminatingPod(f *framework.Framework) (*v1.Pod, error) { SchedulerName: "please don't schedule my pods", }, } - return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) } diff --git a/test/e2e/node/pods.go b/test/e2e/node/pods.go index 5d56ed87cf3..a16468312cd 100644 --- a/test/e2e/node/pods.go +++ b/test/e2e/node/pods.go @@ -76,7 +76,7 @@ var _ = SIGDescribe("Pods Extended", func() { ginkgo.By("setting up selector") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pod") framework.ExpectEqual(len(pods.Items), 0) options = metav1.ListOptions{ @@ -90,7 +90,7 @@ var _ = SIGDescribe("Pods Extended", func() { ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(options) + pods, err = podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pod") framework.ExpectEqual(len(pods.Items), 1) @@ -98,7 +98,7 @@ var _ = SIGDescribe("Pods Extended", func() { // may be carried out immediately rather than gracefully. framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) // save the running pod - pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to GET scheduled pod") ginkgo.By("deleting the pod gracefully") @@ -139,7 +139,7 @@ var _ = SIGDescribe("Pods Extended", func() { selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(options) + pods, err = podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pods") framework.ExpectEqual(len(pods.Items), 0) @@ -192,7 +192,7 @@ var _ = SIGDescribe("Pods Extended", func() { podClient.Create(pod) ginkgo.By("verifying QOS class is set on the pod") - pod, err := podClient.Get(name, metav1.GetOptions{}) + pod, err := podClient.Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to query for pod") framework.ExpectEqual(pod.Status.QOSClass, v1.PodQOSGuaranteed) }) diff --git a/test/e2e/node/pre_stop.go b/test/e2e/node/pre_stop.go index f7c60c94654..2b762a3e53f 100644 --- a/test/e2e/node/pre_stop.go +++ b/test/e2e/node/pre_stop.go @@ -60,13 +60,13 @@ func testPreStop(c clientset.Interface, ns string) { }, } ginkgo.By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) - podDescr, err := c.CoreV1().Pods(ns).Create(podDescr) + podDescr, err := c.CoreV1().Pods(ns).Create(context.TODO(), podDescr) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. defer func() { ginkgo.By("Deleting the server pod") - c.CoreV1().Pods(ns).Delete(podDescr.Name, nil) + c.CoreV1().Pods(ns).Delete(context.TODO(), podDescr.Name, nil) }() ginkgo.By("Waiting for pods to come up.") @@ -75,7 +75,7 @@ func testPreStop(c clientset.Interface, ns string) { val := "{\"Source\": \"prestop\"}" - podOut, err := c.CoreV1().Pods(ns).Get(podDescr.Name, metav1.GetOptions{}) + podOut, err := c.CoreV1().Pods(ns).Get(context.TODO(), podDescr.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "getting pod info") podURL := net.JoinHostPort(podOut.Status.PodIP, "8080") @@ -105,7 +105,7 @@ func testPreStop(c clientset.Interface, ns string) { } ginkgo.By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) - preStopDescr, err = c.CoreV1().Pods(ns).Create(preStopDescr) + preStopDescr, err = c.CoreV1().Pods(ns).Create(context.TODO(), preStopDescr) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) deletePreStop := true @@ -113,7 +113,7 @@ func testPreStop(c clientset.Interface, ns string) { defer func() { if deletePreStop { ginkgo.By("Deleting the tester pod") - c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil) + c.CoreV1().Pods(ns).Delete(context.TODO(), preStopDescr.Name, nil) } }() @@ -122,7 +122,7 @@ func testPreStop(c clientset.Interface, ns string) { // Delete the pod with the preStop handler. ginkgo.By("Deleting pre-stop pod") - if err := c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil); err == nil { + if err := c.CoreV1().Pods(ns).Delete(context.TODO(), preStopDescr.Name, nil); err == nil { deletePreStop = false } framework.ExpectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) @@ -194,11 +194,11 @@ var _ = SIGDescribe("PreStop", func() { framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) var err error - pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to GET scheduled pod") ginkgo.By("deleting the pod gracefully") - err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds)) + err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds)) framework.ExpectNoError(err, "failed to delete pod") //wait up to graceful termination period seconds diff --git a/test/e2e/node/runtimeclass.go b/test/e2e/node/runtimeclass.go index 0a95b1f2842..7f51e401eb7 100644 --- a/test/e2e/node/runtimeclass.go +++ b/test/e2e/node/runtimeclass.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -46,14 +47,14 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { runtimeClass := newRuntimeClass(f.Namespace.Name, "conflict-runtimeclass") runtimeClass.Scheduling = scheduling - rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(runtimeClass) + rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(context.TODO(), runtimeClass) framework.ExpectNoError(err, "failed to create RuntimeClass resource") pod := newRuntimeClassPod(rc.GetName()) pod.Spec.NodeSelector = map[string]string{ "foo": "bar", } - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectError(err, "should be forbidden") framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error") }) @@ -97,7 +98,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { ginkgo.By("Trying to create runtimeclass and pod") runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass") runtimeClass.Scheduling = scheduling - rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(runtimeClass) + rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(context.TODO(), runtimeClass) framework.ExpectNoError(err, "failed to create RuntimeClass resource") pod := newRuntimeClassPod(rc.GetName()) @@ -109,7 +110,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, f.Namespace.Name, pod.Name)) // check that pod got scheduled on specified node. - scheduledPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + scheduledPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(nodeName, scheduledPod.Spec.NodeName) framework.ExpectEqual(nodeSelector, pod.Spec.NodeSelector) diff --git a/test/e2e/node/security_context.go b/test/e2e/node/security_context.go index abe65122a77..f2733b03c29 100644 --- a/test/e2e/node/security_context.go +++ b/test/e2e/node/security_context.go @@ -23,6 +23,7 @@ limitations under the License. package node import ( + "context" "fmt" "k8s.io/api/core/v1" @@ -206,7 +207,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) pod.Spec.Containers[0].Command = []string{"sleep", "6000"} client := f.ClientSet.CoreV1().Pods(f.Namespace.Name) - pod, err := client.Create(pod) + pod, err := client.Create(context.TODO(), pod) framework.ExpectNoError(err, "Error creating pod %v", pod) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) @@ -220,7 +221,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) framework.ExpectNoError(err) gomega.Expect(content).To(gomega.ContainSubstring(testContent)) - foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Confirm that the file can be accessed from a second @@ -262,7 +263,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{ Level: "s0:c2,c3", } - _, err = client.Create(pod) + _, err = client.Create(context.TODO(), pod) framework.ExpectNoError(err, "Error creating pod %v", pod) err = f.WaitForPodRunning(pod.Name) diff --git a/test/e2e/node/ttlafterfinished.go b/test/e2e/node/ttlafterfinished.go index f52ebe151d3..7f655574c3e 100644 --- a/test/e2e/node/ttlafterfinished.go +++ b/test/e2e/node/ttlafterfinished.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "fmt" "time" @@ -119,12 +120,12 @@ func updateJobWithRetries(c clientset.Interface, namespace, name string, applyUp jobs := c.BatchV1().Jobs(namespace) var updateErr error pollErr := wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { - if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil { + if job, err = jobs.Get(context.TODO(), name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(job) - if job, err = jobs.Update(job); err == nil { + if job, err = jobs.Update(context.TODO(), job); err == nil { framework.Logf("Updating job %s", name) return true, nil } @@ -141,7 +142,7 @@ func updateJobWithRetries(c clientset.Interface, namespace, name string, applyUp // a non-nil deletionTimestamp (i.e. being deleted). func waitForJobDeleting(c clientset.Interface, ns, jobName string) error { return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { - curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) + curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/scheduling/framework.go b/test/e2e/scheduling/framework.go index 40d28f2b1cc..35b0da62084 100644 --- a/test/e2e/scheduling/framework.go +++ b/test/e2e/scheduling/framework.go @@ -17,6 +17,7 @@ limitations under the License. package scheduling import ( + "context" "fmt" "time" @@ -79,7 +80,7 @@ func WaitForPodsToBeDeleted(c clientset.Interface) { // getScheduledAndUnscheduledPods lists scheduled and not scheduled pods in the given namespace, with succeeded and failed pods filtered out. func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.String, ns string) (scheduledPods, notScheduledPods []v1.Pod) { - pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, fmt.Sprintf("listing all pods in namespace %q while waiting for stable cluster", ns)) // API server returns also Pods that succeeded. We need to filter them out. filteredPods := make([]v1.Pod, 0, len(pods.Items)) @@ -94,7 +95,7 @@ func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.Stri // getDeletingPods returns whether there are any pods marked for deletion. func getDeletingPods(c clientset.Interface, ns string) []v1.Pod { - pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, fmt.Sprintf("listing all pods in namespace %q while waiting for pods to terminate", ns)) var deleting []v1.Pod for _, p := range pods.Items { diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index c0ff69a6ae4..3da4504ddbf 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -17,6 +17,7 @@ limitations under the License. package scheduling import ( + "context" "fmt" "reflect" "strconv" @@ -68,7 +69,7 @@ var _ = SIGDescribe("LimitRange", func() { selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options) + limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for limitRanges") framework.ExpectEqual(len(limitRanges.Items), 0) options = metav1.ListOptions{ @@ -80,7 +81,7 @@ var _ = SIGDescribe("LimitRange", func() { lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = selector.String() - limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options) + limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(context.TODO(), options) if err == nil { select { case listCompleted <- true: @@ -94,14 +95,14 @@ var _ = SIGDescribe("LimitRange", func() { }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = selector.String() - return f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(options) + return f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(context.TODO(), options) }, } _, _, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.LimitRange{}) defer w.Stop() ginkgo.By("Submitting a LimitRange") - limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange) + limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(context.TODO(), limitRange) framework.ExpectNoError(err) ginkgo.By("Verifying LimitRange creation was observed") @@ -120,7 +121,7 @@ var _ = SIGDescribe("LimitRange", func() { } ginkgo.By("Fetching the LimitRange to ensure it has proper values") - limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{}) + limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(context.TODO(), limitRange.Name, metav1.GetOptions{}) framework.ExpectNoError(err) expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit} actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default} @@ -129,11 +130,11 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with no resource requirements") pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) for i := range pod.Spec.Containers { err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) @@ -146,11 +147,11 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with partial resource requirements") pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", "")) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // This is an interesting case, so it's worth a comment // If you specify a Limit, and no Request, the Limit will default to the Request @@ -167,23 +168,23 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Failing to create a Pod with less than min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectError(err) ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectError(err) ginkgo.By("Updating a LimitRange") newMin := getResourceList("9m", "49Mi", "49Gi") limitRange.Spec.Limits[0].Min = newMin - limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange) + limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(context.TODO(), limitRange) framework.ExpectNoError(err) ginkgo.By("Verifying LimitRange updating is effective") err = wait.Poll(time.Second*2, time.Second*20, func() (bool, error) { - limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{}) + limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(context.TODO(), limitRange.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil }) @@ -191,23 +192,23 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with less than former min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectError(err) ginkgo.By("Deleting a LimitRange") - err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(limitRange.Name, metav1.NewDeleteOptions(30)) + err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(context.TODO(), limitRange.Name, metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) ginkgo.By("Verifying the LimitRange was deleted") gomega.Expect(wait.Poll(time.Second*5, e2eservice.RespondingTimeout, func() (bool, error) { selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name})) options := metav1.ListOptions{LabelSelector: selector.String()} - limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options) + limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(context.TODO(), options) if err != nil { framework.Logf("Unable to retrieve LimitRanges: %v", err) @@ -235,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with more than former max resources") pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) }) diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index b10ded3a69e..20bea26fced 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -17,6 +17,7 @@ limitations under the License. package scheduling import ( + "context" "os" "regexp" "time" @@ -84,7 +85,7 @@ func makeCudaAdditionDevicePluginTestPod() *v1.Pod { } func logOSImages(f *framework.Framework) { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "getting node list") for _, node := range nodeList.Items { framework.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage) @@ -93,7 +94,7 @@ func logOSImages(f *framework.Framework) { func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool { framework.Logf("Getting list of Nodes from API server") - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "getting node list") for _, node := range nodeList.Items { if node.Spec.Unschedulable { @@ -110,7 +111,7 @@ func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool { } func getGPUsAvailable(f *framework.Framework) int64 { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "getting node list") var gpusAvailable int64 for _, node := range nodeList.Items { @@ -138,7 +139,7 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra ds, err := framework.DsFromManifest(dsYamlURL) framework.ExpectNoError(err) ds.Namespace = f.Namespace.Name - _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds) + _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), ds) framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset") framework.Logf("Successfully created daemonset to install Nvidia drivers.") diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index ffce11ba4c6..f1f75cfa786 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -17,6 +17,7 @@ limitations under the License. package scheduling import ( + "context" "fmt" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "time" @@ -77,7 +78,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { f := framework.NewDefaultFramework("sched-pred") ginkgo.AfterEach(func() { - rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{}) + rc, err := cs.CoreV1().ReplicationControllers(ns).Get(context.TODO(), RCName, metav1.GetOptions{}) if err == nil && *(rc.Spec.Replicas) != 0 { ginkgo.By("Cleaning up the replication controller") err := e2erc.DeleteRCAndWaitForGC(f.ClientSet, ns, RCName) @@ -136,7 +137,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { } WaitForStableCluster(cs, masterNodes) - pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) + pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, pod := range pods.Items { _, found := nodeToAllocatableMap[pod.Spec.NodeName] @@ -241,7 +242,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { } }() - pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) + pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, pod := range pods.Items { _, found := nodeToAllocatableMap[pod.Spec.NodeName] @@ -363,7 +364,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName)) - labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{}) + labelPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(labelPod.Spec.NodeName, nodeName) }) @@ -450,7 +451,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName)) - labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{}) + labelPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(labelPod.Spec.NodeName, nodeName) }) @@ -493,7 +494,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // already when the kubelet does not know about its new taint yet. The // kubelet will then refuse to launch the pod. framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, tolerationPodName)) - deployedPod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{}) + deployedPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), tolerationPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(deployedPod.Spec.NodeName, nodeName) }) @@ -656,7 +657,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { if len(namespace) == 0 { namespace = f.Namespace.Name } - pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(initPausePod(f, conf)) + pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), initPausePod(f, conf)) framework.ExpectNoError(err) return pod } @@ -664,7 +665,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { pod := createPausePod(f, conf) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) - pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(conf.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), conf.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return pod } @@ -677,7 +678,7 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string { pod := runPausePod(f, conf) ginkgo.By("Explicitly delete pod here to free the resource it takes.") - err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) return pod.Spec.NodeName @@ -711,7 +712,7 @@ func removeTaintFromNodeAction(cs clientset.Interface, nodeName string, testTain // createPausePodAction returns a closure that creates a pause pod upon invocation. func createPausePodAction(f *framework.Framework, conf pausePodConfig) e2eevents.Action { return func() error { - _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(initPausePod(f, conf)) + _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), initPausePod(f, conf)) return err } } @@ -730,7 +731,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action e2eevents.Action // TODO: upgrade calls in PodAffinity tests when we're able to run them func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) { - allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + allPods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) scheduledPods, notScheduledPods := GetPodsScheduled(masterNodes, allPods) diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 8e052d10372..aaa95da3079 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -17,6 +17,7 @@ limitations under the License. package scheduling import ( + "context" "fmt" "strings" "sync/atomic" @@ -71,7 +72,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { ginkgo.AfterEach(func() { for _, pair := range priorityPairs { - cs.SchedulingV1().PriorityClasses().Delete(pair.name, metav1.NewDeleteOptions(0)) + cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, metav1.NewDeleteOptions(0)) } }) @@ -81,7 +82,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { nodeList = &v1.NodeList{} var err error for _, pair := range priorityPairs { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) } @@ -105,7 +106,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { // Create one pod per node that uses a lot of the node's resources. ginkgo.By("Create pods that use 60% of node resources.") pods := make([]*v1.Pod, 0, len(nodeList.Items)) - allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) + allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for i, node := range nodeList.Items { currentCPUUsage, currentMemUsage := getCurrentPodUsageOnTheNode(node.Name, allPods.Items, podRequestedResource) @@ -165,11 +166,11 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { NodeName: pods[0].Spec.NodeName, }) - preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{}) + preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(context.TODO(), pods[0].Name, metav1.GetOptions{}) podPreempted := (err != nil && apierrors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) for i := 1; i < len(pods); i++ { - livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{}) + livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(context.TODO(), pods[i].Name, metav1.GetOptions{}) framework.ExpectNoError(err) gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil()) } @@ -185,7 +186,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { // Create one pod per node that uses a lot of the node's resources. ginkgo.By("Create pods that use 60% of node resources.") pods := make([]*v1.Pod, 0, len(nodeList.Items)) - allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) + allPods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for i, node := range nodeList.Items { currentCPUUsage, currentMemUsage := getCurrentPodUsageOnTheNode(node.Name, allPods.Items, podRequestedResource) @@ -238,7 +239,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { defer func() { // Clean-up the critical pod // Always run cleanup to make sure the pod is properly cleaned up. - err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0)) + err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", metav1.NewDeleteOptions(0)) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error cleanup pod `%s/%s`: %v", metav1.NamespaceSystem, "critical-pod", err) } @@ -255,15 +256,15 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { defer func() { // Clean-up the critical pod - err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete("critical-pod", metav1.NewDeleteOptions(0)) + err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), "critical-pod", metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) }() // Make sure that the lowest priority pod is deleted. - preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{}) + preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(context.TODO(), pods[0].Name, metav1.GetOptions{}) podPreempted := (err != nil && apierrors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) for i := 1; i < len(pods); i++ { - livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{}) + livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(context.TODO(), pods[i].Name, metav1.GetOptions{}) framework.ExpectNoError(err) gomega.Expect(livePod.DeletionTimestamp).To(gomega.BeNil()) } @@ -288,7 +289,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { // print out additional info if tests failed if ginkgo.CurrentGinkgoTestDescription().Failed { // list existing priorities - priorityList, err := cs.SchedulingV1().PriorityClasses().List(metav1.ListOptions{}) + priorityList, err := cs.SchedulingV1().PriorityClasses().List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Logf("Unable to list priorities: %v", err) } else { @@ -304,11 +305,11 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { // force it to update nodeCopy.ResourceVersion = "0" delete(nodeCopy.Status.Capacity, fakecpu) - _, err := cs.CoreV1().Nodes().UpdateStatus(nodeCopy) + _, err := cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy) framework.ExpectNoError(err) } for _, pair := range priorityPairs { - cs.SchedulingV1().PriorityClasses().Delete(pair.name, metav1.NewDeleteOptions(0)) + cs.SchedulingV1().PriorityClasses().Delete(context.TODO(), pair.name, metav1.NewDeleteOptions(0)) } }) @@ -323,7 +324,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { // get the node API object var err error - node, err = cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err = cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { framework.Failf("error getting node %q: %v", nodeName, err) } @@ -338,7 +339,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { // force it to update nodeCopy.ResourceVersion = "0" nodeCopy.Status.Capacity[fakecpu] = resource.MustParse("1000") - node, err = cs.CoreV1().Nodes().UpdateStatus(nodeCopy) + node, err = cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy) framework.ExpectNoError(err) // create four PriorityClass: p1, p2, p3, p4 @@ -346,7 +347,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { priorityName := fmt.Sprintf("p%d", i) priorityVal := int32(i) priorityPairs = append(priorityPairs, priorityPair{name: priorityName, value: priorityVal}) - _, err := cs.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}) + _, err := cs.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}) if err != nil { framework.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err) framework.Logf("Reason: %v. Msg: %v", apierrors.ReasonForError(err), err) @@ -363,11 +364,11 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { _, podController := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - obj, err := f.ClientSet.CoreV1().Pods(ns).List(options) + obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return f.ClientSet.CoreV1().Pods(ns).Watch(options) + return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options) }, }, &v1.Pod{}, @@ -526,7 +527,7 @@ func createPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSe if len(namespace) == 0 { namespace = f.Namespace.Name } - rs, err := f.ClientSet.AppsV1().ReplicaSets(namespace).Create(initPauseRS(f, conf)) + rs, err := f.ClientSet.AppsV1().ReplicaSets(namespace).Create(context.TODO(), initPauseRS(f, conf)) framework.ExpectNoError(err) return rs } @@ -542,7 +543,7 @@ func createPod(f *framework.Framework, conf pausePodConfig) *v1.Pod { if len(namespace) == 0 { namespace = f.Namespace.Name } - pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(initPausePod(f, conf)) + pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), initPausePod(f, conf)) framework.ExpectNoError(err) return pod } @@ -551,7 +552,7 @@ func createPod(f *framework.Framework, conf pausePodConfig) *v1.Pod { // if the 'spec.NodeName' field of preemptor 'pod' has been set. func waitForPreemptingWithTimeout(f *framework.Framework, pod *v1.Pod, timeout time.Duration) { err := wait.Poll(2*time.Second, timeout, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 9668cde9fa0..1aa7e09f32a 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -17,6 +17,7 @@ limitations under the License. package scheduling import ( + "context" "encoding/json" "fmt" "math" @@ -68,7 +69,7 @@ var podRequestedResource = &v1.ResourceRequirements{ // addOrUpdateAvoidPodOnNode adds avoidPods annotations to node, will override if it exists func addOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods v1.AvoidPods) { err := wait.PollImmediate(framework.Poll, framework.SingleCallTimeout, func() (bool, error) { - node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil @@ -83,7 +84,7 @@ func addOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods node.Annotations = make(map[string]string) } node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData) - _, err = c.CoreV1().Nodes().Update(node) + _, err = c.CoreV1().Nodes().Update(context.TODO(), node) if err != nil { if !apierrors.IsConflict(err) { framework.ExpectNoError(err) @@ -100,7 +101,7 @@ func addOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods // removeAvoidPodsOffNode removes AvoidPods annotations from the node. It does not fail if no such annotation exists. func removeAvoidPodsOffNode(c clientset.Interface, nodeName string) { err := wait.PollImmediate(framework.Poll, framework.SingleCallTimeout, func() (bool, error) { - node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { if testutils.IsRetryableAPIError(err) { return false, nil @@ -112,7 +113,7 @@ func removeAvoidPodsOffNode(c clientset.Interface, nodeName string) { return true, nil } delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey) - _, err = c.CoreV1().Nodes().Update(node) + _, err = c.CoreV1().Nodes().Update(context.TODO(), node) if err != nil { if !apierrors.IsConflict(err) { framework.ExpectNoError(err) @@ -170,7 +171,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { k := v1.LabelHostname ginkgo.By("Verifying the node has a label " + k) - node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) if _, hasLabel := node.Labels[k]; !hasLabel { // If the label is not exists, label all nodes for testing. @@ -234,7 +235,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { }) ginkgo.By("Wait the pod becomes running") framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{}) + labelPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Verify the pod was scheduled to the expected node.") framework.ExpectNotEqual(labelPod.Spec.NodeName, nodeName) @@ -293,7 +294,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { ginkgo.By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1)) e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true) - testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{ + testPods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{ LabelSelector: "name=scheduler-priority-avoid-pod", }) framework.ExpectNoError(err) @@ -335,7 +336,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) ginkgo.By("Pod should prefer scheduled to the node that pod can tolerate.") - tolePod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{}) + tolePod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), tolerationPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(tolePod.Spec.NodeName, nodeName) }) @@ -404,7 +405,7 @@ func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re framework.Logf("ComputeCPUMemFraction for node: %v", node.Name) totalRequestedCPUResource := resource.Requests.Cpu().MilliValue() totalRequestedMemResource := resource.Requests.Memory().Value() - allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) + allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Failf("Expect error of invalid, got : %v", err) } @@ -480,7 +481,7 @@ func createRC(ns, rsName string, replicas int32, rcPodLabels map[string]string, }, }, } - rc, err := f.ClientSet.CoreV1().ReplicationControllers(ns).Create(rc) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(ns).Create(context.TODO(), rc) framework.ExpectNoError(err) return rc } diff --git a/test/e2e/scheduling/taints.go b/test/e2e/scheduling/taints.go index f2850a295af..e456df31340 100644 --- a/test/e2e/scheduling/taints.go +++ b/test/e2e/scheduling/taints.go @@ -17,6 +17,7 @@ limitations under the License. package scheduling import ( + "context" "time" "k8s.io/api/core/v1" @@ -121,12 +122,12 @@ func createTestController(cs clientset.Interface, observedDeletions chan string, &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"group": podLabel}).String() - obj, err := cs.CoreV1().Pods(ns).List(options) + obj, err := cs.CoreV1().Pods(ns).List(context.TODO(), options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"group": podLabel}).String() - return cs.CoreV1().Pods(ns).Watch(options) + return cs.CoreV1().Pods(ns).Watch(context.TODO(), options) }, }, &v1.Pod{}, @@ -426,7 +427,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { ginkgo.By("Starting pods...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute) framework.ExpectNoError(err) - node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"] if !ok { diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index 774b4d89846..f94713f0e47 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -17,6 +17,7 @@ limitations under the License. package scheduling import ( + "context" "fmt" "math" @@ -81,7 +82,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) }}, }, } - _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(serviceSpec) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), serviceSpec) framework.ExpectNoError(err) // Now create some pods behind the service @@ -140,7 +141,7 @@ func getZoneCount(c clientset.Interface) (int, error) { // Find the name of the zone in which the pod is scheduled func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) { ginkgo.By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName)) - node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) return getZoneNameForNode(*node) } @@ -180,7 +181,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, args []string) { name := "ubelite-spread-rc-" + string(uuid.NewUUID()) ginkgo.By(fmt.Sprintf("Creating replication controller %s", name)) - controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{ + controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), &v1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ Namespace: f.Namespace.Name, Name: name, diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index 533e7572ac2..6f4f1a7a3a9 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -17,6 +17,7 @@ limitations under the License. package scheduling import ( + "context" "fmt" "strconv" @@ -143,7 +144,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { // Defer the cleanup defer func() { framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) - err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil) + err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, nil) if err != nil { framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) } @@ -160,11 +161,11 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { ginkgo.By("Checking that PDs have been provisioned in only the expected zones") for _, claim := range pvcList { // Get a new copy of the claim to have all fields populated - claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Get the related PV - pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) pvZone, ok := pv.ObjectMeta.Labels[v1.LabelZoneFailureDomain] @@ -235,7 +236,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) ginkgo.By("Creating pods for each static PV") for _, config := range configs { podConfig := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "") - config.pod, err = c.CoreV1().Pods(ns).Create(podConfig) + config.pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), podConfig) framework.ExpectNoError(err) } diff --git a/test/e2e/servicecatalog/podpreset.go b/test/e2e/servicecatalog/podpreset.go index dfbd4ad1b75..de61908b80e 100644 --- a/test/e2e/servicecatalog/podpreset.go +++ b/test/e2e/servicecatalog/podpreset.go @@ -17,6 +17,7 @@ limitations under the License. package servicecatalog import ( + "context" "reflect" "strconv" "time" @@ -112,14 +113,14 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pod") framework.ExpectEqual(len(pods.Items), 0) options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } - w, err := podClient.Watch(options) + w, err := podClient.Watch(context.TODO(), options) framework.ExpectNoError(err, "failed to set up watch") ginkgo.By("submitting the pod to kubernetes") @@ -128,7 +129,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(options) + pods, err = podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pod") framework.ExpectEqual(len(pods.Items), 1) @@ -148,7 +149,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { ginkgo.By("ensuring pod is modified") // save the running pod - pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) + pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to GET scheduled pod") // check the annotation is there @@ -232,14 +233,14 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pod") framework.ExpectEqual(len(pods.Items), 0) options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } - w, err := podClient.Watch(options) + w, err := podClient.Watch(context.TODO(), options) framework.ExpectNoError(err, "failed to set up watch") ginkgo.By("submitting the pod to kubernetes") @@ -248,7 +249,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} - pods, err = podClient.List(options) + pods, err = podClient.List(context.TODO(), options) framework.ExpectNoError(err, "failed to query for pod") framework.ExpectEqual(len(pods.Items), 1) @@ -268,7 +269,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { ginkgo.By("ensuring pod is modified") // save the running pod - pod, err := podClient.Get(originalPod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(context.TODO(), originalPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to GET scheduled pod") // check the annotation is not there @@ -288,5 +289,5 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { }) func createPodPreset(c clientset.Interface, ns string, job *settingsv1alpha1.PodPreset) (*settingsv1alpha1.PodPreset, error) { - return c.SettingsV1alpha1().PodPresets(ns).Create(job) + return c.SettingsV1alpha1().PodPresets(ns).Create(context.TODO(), job) } diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 82a23372e37..393db398f65 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "crypto/sha256" "encoding/json" "fmt" @@ -210,9 +211,9 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { for _, claim := range m.pvcs { ginkgo.By(fmt.Sprintf("Deleting claim %s", claim.Name)) - claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) if err == nil { - cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) + cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, nil) framework.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute) } @@ -220,7 +221,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { for _, sc := range m.sc { ginkgo.By(fmt.Sprintf("Deleting storageclass %s", sc.Name)) - cs.StorageV1().StorageClasses().Delete(sc.Name, nil) + cs.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, nil) } ginkgo.By("Cleaning up resources") @@ -275,7 +276,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { handle := getVolumeHandle(m.cs, claim) attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeName))) attachmentName := fmt.Sprintf("csi-%x", attachmentHash) - _, err = m.cs.StorageV1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{}) + _, err = m.cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { if !test.disableAttach { @@ -597,7 +598,7 @@ func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error { return err } waitErr := wait.PollImmediate(10*time.Second, csiPodUnschedulableTimeout, func() (bool, error) { - pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -620,7 +621,7 @@ func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Inte var attachLimit int32 waitErr := wait.PollImmediate(10*time.Second, csiNodeLimitUpdateTimeout, func() (bool, error) { - csiNode, err := cs.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{}) + csiNode, err := cs.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -639,9 +640,9 @@ func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Inte func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { class := newStorageClass(t, ns, "") var err error - _, err = cs.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{}) + _, err = cs.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{}) if err != nil { - class, err = cs.StorageV1().StorageClasses().Create(class) + class, err = cs.StorageV1().StorageClasses().Create(context.TODO(), class) framework.ExpectNoError(err, "Failed to create class : %v", err) } @@ -650,7 +651,7 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e StorageClassName: &(class.Name), VolumeMode: &t.VolumeMode, }, ns) - claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(claim) + claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim) framework.ExpectNoError(err, "Failed to create claim: %v", err) pvcClaims := []*v1.PersistentVolumeClaim{claim} @@ -726,7 +727,7 @@ func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.Volum pod.Spec.NodeSelector = node.Selector } - return cs.CoreV1().Pods(ns).Create(pod) + return cs.CoreV1().Pods(ns).Create(context.TODO(), pod) } // checkPodLogs tests that NodePublish was called with expected volume_context and (for ephemeral inline volumes) @@ -815,7 +816,7 @@ func waitForCSIDriver(cs clientset.Interface, driverName string) error { framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) { - _, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{}) + _, err := cs.StorageV1beta1().CSIDrivers().Get(context.TODO(), driverName, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { return err } @@ -824,24 +825,24 @@ func waitForCSIDriver(cs clientset.Interface, driverName string) error { } func destroyCSIDriver(cs clientset.Interface, driverName string) { - driverGet, err := cs.StorageV1beta1().CSIDrivers().Get(driverName, metav1.GetOptions{}) + driverGet, err := cs.StorageV1beta1().CSIDrivers().Get(context.TODO(), driverName, metav1.GetOptions{}) if err == nil { framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name) // Uncomment the following line to get full dump of CSIDriver object // framework.Logf("%s", framework.PrettyPrint(driverGet)) - cs.StorageV1beta1().CSIDrivers().Delete(driverName, nil) + cs.StorageV1beta1().CSIDrivers().Delete(context.TODO(), driverName, nil) } } func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) string { // re-get the claim to the latest state with bound volume - claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) if err != nil { framework.ExpectNoError(err, "Cannot get PVC") return "" } pvName := claim.Spec.VolumeName - pv, err := cs.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if err != nil { framework.ExpectNoError(err, "Cannot get PV") return "" diff --git a/test/e2e/storage/detach_mounted.go b/test/e2e/storage/detach_mounted.go index b17bb8d091a..13ed30b3497 100644 --- a/test/e2e/storage/detach_mounted.go +++ b/test/e2e/storage/detach_mounted.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "path" @@ -84,7 +85,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { clientPod := getFlexVolumePod(volumeSource, node.Name) ginkgo.By("Creating pod that uses slow format volume") - pod, err := cs.CoreV1().Pods(ns.Name).Create(clientPod) + pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), clientPod) framework.ExpectNoError(err) uniqueVolumeName := getUniqueVolumeName(pod, driverInstallAs) @@ -132,7 +133,7 @@ func getUniqueVolumeName(pod *v1.Pod, driverName string) string { func waitForVolumesNotInUse(client clientset.Interface, nodeName, volumeName string) error { waitErr := wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) { - node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) } @@ -152,7 +153,7 @@ func waitForVolumesNotInUse(client clientset.Interface, nodeName, volumeName str func waitForVolumesAttached(client clientset.Interface, nodeName, volumeName string) error { waitErr := wait.PollImmediate(2*time.Second, 2*time.Minute, func() (bool, error) { - node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) } @@ -172,7 +173,7 @@ func waitForVolumesAttached(client clientset.Interface, nodeName, volumeName str func waitForVolumesInUse(client clientset.Interface, nodeName, volumeName string) error { waitErr := wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) { - node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) } diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 0a3f23d9ad3..a772c5e5f57 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -36,6 +36,7 @@ limitations under the License. package drivers import ( + "context" "fmt" "strconv" "time" @@ -492,7 +493,7 @@ func waitForCSIDriverRegistrationOnNode(nodeName string, driverName string, cs c const csiNodeRegisterTimeout = 1 * time.Minute waitErr := wait.PollImmediate(10*time.Second, csiNodeRegisterTimeout, func() (bool, error) { - csiNode, err := cs.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{}) + csiNode, err := cs.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } diff --git a/test/e2e/storage/drivers/csi_objects.go b/test/e2e/storage/drivers/csi_objects.go index 5935a71a791..f70ee46b56f 100644 --- a/test/e2e/storage/drivers/csi_objects.go +++ b/test/e2e/storage/drivers/csi_objects.go @@ -20,6 +20,7 @@ limitations under the License. package drivers import ( + "context" "fmt" "io/ioutil" "os" @@ -92,7 +93,7 @@ func createGCESecrets(client clientset.Interface, ns string) { }, } - _, err = client.CoreV1().Secrets(ns).Create(s) + _, err = client.CoreV1().Secrets(ns).Create(context.TODO(), s) if !apierrors.IsAlreadyExists(err) { framework.ExpectNoError(err, "Failed to create Secret %v", s.GetName()) } diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index a286a8a14ca..9bb819cc080 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -36,6 +36,7 @@ limitations under the License. package drivers import ( + "context" "fmt" "os/exec" "strconv" @@ -182,7 +183,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf }, func() { framework.ExpectNoError(e2epod.DeletePodWithWait(cs, n.externalProvisionerPod)) clusterRoleBindingName := ns.Name + "--" + "cluster-admin" - cs.RbacV1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0)) + cs.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBindingName, metav1.NewDeleteOptions(0)) } } @@ -324,7 +325,7 @@ func (v *glusterVolume) DeleteVolume() { name := v.prefix + "-server" framework.Logf("Deleting Gluster endpoints %q...", name) - err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil) + err := cs.CoreV1().Endpoints(ns.Name).Delete(context.TODO(), name, nil) if err != nil { if !apierrors.IsNotFound(err) { framework.Failf("Gluster delete endpoints failed: %v", err) @@ -505,7 +506,7 @@ func newRBDServer(cs clientset.Interface, namespace string) (config volume.TestC Type: "kubernetes.io/rbd", } - secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret) + secret, err := cs.CoreV1().Secrets(config.Namespace).Create(context.TODO(), secret) if err != nil { framework.Failf("Failed to create secrets for Ceph RBD: %v", err) } @@ -943,7 +944,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v }, } // h.prepPod will be reused in cleanupDriver. - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(prepPod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), prepPod) framework.ExpectNoError(err, "while creating hostPath init pod") err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) @@ -965,7 +966,7 @@ func (v *hostPathSymlinkVolume) DeleteVolume() { cmd := fmt.Sprintf("rm -rf %v&& rm -rf %v", v.targetPath, v.sourcePath) v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd} - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(v.prepPod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), v.prepPod) framework.ExpectNoError(err, "while creating hostPath teardown pod") err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) @@ -1935,7 +1936,7 @@ func cleanUpVolumeServerWithSecret(f *framework.Framework, serverPod *v1.Pod, se if secret != nil { framework.Logf("Deleting server secret %q...", secret.Name) - err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{}) + err := cs.CoreV1().Secrets(ns.Name).Delete(context.TODO(), secret.Name, &metav1.DeleteOptions{}) if err != nil { framework.Logf("Delete secret failed: %v", err) } diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 2dce4be34c2..1c74d95b23e 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "strconv" @@ -77,7 +78,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { } var err error - if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -94,7 +95,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { }, } - if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -146,15 +147,15 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { defer func() { ginkgo.By("Cleaning up the secret") - if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, nil); err != nil { framework.Failf("unable to delete secret %v: %v", secret.Name, err) } ginkgo.By("Cleaning up the configmap") - if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, nil); err != nil { framework.Failf("unable to delete configmap %v: %v", configMap.Name, err) } ginkgo.By("Cleaning up the pod") - if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete pod %v: %v", pod.Name, err) } }() @@ -252,17 +253,17 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle }, } - if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(gitServerSvc); err != nil { + if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), gitServerSvc); err != nil { framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) } return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() { ginkgo.By("Cleaning up the git server pod") - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) } ginkgo.By("Cleaning up the git server svc") - if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), gitServerSvc.Name, nil); err != nil { framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) } } @@ -302,7 +303,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { "data-1": "value-1", }, } - _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap) framework.ExpectNoError(err) } return @@ -311,7 +312,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { func deleteConfigMaps(f *framework.Framework, configMapNames []string) { ginkgo.By("Cleaning up the configMaps") for _, configMapName := range configMapNames { - err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil) + err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMapName, nil) framework.ExpectNoError(err, "unable to delete configMap %v", configMapName) } } @@ -397,7 +398,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume }, }, } - _, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rc) + _, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rc) framework.ExpectNoError(err, "error creating replication controller") defer func() { diff --git a/test/e2e/storage/ephemeral_volume.go b/test/e2e/storage/ephemeral_volume.go index 057d21e9c78..ba0b810530f 100644 --- a/test/e2e/storage/ephemeral_volume.go +++ b/test/e2e/storage/ephemeral_volume.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "strings" "time" @@ -54,7 +55,7 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() { for _, testSource := range invalidEphemeralSource("pod-ephm-test") { ginkgo.It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() { pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source) - pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) // Allow it to sleep for 30 seconds diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index fdeb760efd6..5bdf8fd7a6a 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -17,6 +17,7 @@ limitations under the License. package external import ( + "context" "flag" "io/ioutil" @@ -273,7 +274,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(config *testsuites.Pe case d.StorageClass.FromName: sc = &storagev1.StorageClass{Provisioner: d.DriverInfo.Name} case d.StorageClass.FromExistingClassName != "": - sc, err = f.ClientSet.StorageV1().StorageClasses().Get(d.StorageClass.FromExistingClassName, metav1.GetOptions{}) + sc, err = f.ClientSet.StorageV1().StorageClasses().Get(context.TODO(), d.StorageClass.FromExistingClassName, metav1.GetOptions{}) framework.ExpectNoError(err, "getting storage class %s", d.StorageClass.FromExistingClassName) case d.StorageClass.FromFile != "": var ok bool diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 5c192787fbe..d1f82ced048 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "path" "time" @@ -90,7 +91,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { Provisioner: "flex-expand", } - resizableSc, err = c.StorageV1().StorageClasses().Create(newStorageClass(test, ns, "resizing")) + resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing")) if err != nil { fmt.Printf("storage class creation error: %v\n", err) } @@ -101,7 +102,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { StorageClassName: &(resizableSc.Name), ClaimSize: "2Gi", }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err, "Error creating pvc") }) @@ -154,7 +155,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { ginkgo.By("Creating a deployment with the provisioned volume") deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") framework.ExpectNoError(err, "Failed creating deployment %v", err) - defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) + defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, &metav1.DeleteOptions{}) ginkgo.By("Expanding current pvc") newSize := resource.MustParse("6Gi") diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index 887d6261a97..a91d6326859 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "path" @@ -83,7 +84,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa Provisioner: "flex-expand", } - resizableSc, err = c.StorageV1().StorageClasses().Create(newStorageClass(test, ns, "resizing")) + resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing")) if err != nil { fmt.Printf("storage class creation error: %v\n", err) } @@ -94,7 +95,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa StorageClassName: &(resizableSc.Name), ClaimSize: "2Gi", }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err, "Error creating pvc: %v", err) }) @@ -187,7 +188,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa // createNginxPod creates an nginx pod. func createNginxPod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) { pod := makeNginxPod(namespace, nodeSelector, pvclaims) - pod, err := client.CoreV1().Pods(namespace).Create(pod) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } @@ -197,7 +198,7 @@ func createNginxPod(client clientset.Interface, namespace string, nodeSelector m return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) } // get fresh pod info - pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return pod, fmt.Errorf("pod Get API error: %v", err) } diff --git a/test/e2e/storage/generic_persistent_volume-disruptive.go b/test/e2e/storage/generic_persistent_volume-disruptive.go index 8be789bdbf1..e4eb34e0c6e 100644 --- a/test/e2e/storage/generic_persistent_volume-disruptive.go +++ b/test/e2e/storage/generic_persistent_volume-disruptive.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" @@ -96,7 +97,7 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string ClaimSize: test.ClaimSize, VolumeMode: &test.VolumeMode, }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err, "Error creating pvc") pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvs, err := e2epv.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 9d7b25c19bc..99599a72a35 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "time" @@ -81,7 +82,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { AllowVolumeExpansion: true, DelayBinding: true, } - resizableSc, err = c.StorageV1().StorageClasses().Create(newStorageClass(test, ns, "resizing")) + resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing")) framework.ExpectNoError(err, "Error creating resizable storage class") framework.ExpectEqual(*resizableSc.AllowVolumeExpansion, true) @@ -90,7 +91,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { StorageClassName: &(resizableSc.Name), VolumeMode: &test.VolumeMode, }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err, "Error creating pvc") }) @@ -121,7 +122,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { ginkgo.By("Creating a deployment with selected PVC") deployment, err := e2edeploy.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") framework.ExpectNoError(err, "Failed creating deployment %v", err) - defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) + defer c.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, &metav1.DeleteOptions{}) // PVC should be bound at this point ginkgo.By("Checking for bound PVC") diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index 180a20e926d..d197fbafe06 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "net" "time" @@ -302,7 +303,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv. pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "") pod.Spec.NodeName = nodeName framework.Logf("Creating NFS client pod.") - pod, err = c.CoreV1().Pods(ns).Create(pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.Logf("NFS client Pod %q created on Node %q", pod.Name, nodeName) framework.ExpectNoError(err) defer func() { @@ -313,11 +314,11 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv. err = e2epod.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(err, fmt.Sprintf("Pod %q timed out waiting for phase: Running", pod.Name)) // Return created api objects - pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return pod, pv, pvc } diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index 66932c286be..a81d196ef51 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "math/rand" "strings" @@ -145,12 +146,12 @@ var _ = utils.SIGDescribe("Pod Disks", func() { // if all test pods are RO then need a RW pod to format pd ginkgo.By("creating RW fmt Pod to ensure PD is formatted") fmtPod = testPDPod([]string{diskName}, host0Name, false, 1) - _, err = podClient.Create(fmtPod) + _, err = podClient.Create(context.TODO(), fmtPod) framework.ExpectNoError(err, "Failed to create fmtPod") framework.ExpectNoError(f.WaitForPodRunningSlow(fmtPod.Name)) ginkgo.By("deleting the fmtPod") - framework.ExpectNoError(podClient.Delete(fmtPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete fmtPod") + framework.ExpectNoError(podClient.Delete(context.TODO(), fmtPod.Name, metav1.NewDeleteOptions(0)), "Failed to delete fmtPod") framework.Logf("deleted fmtPod %q", fmtPod.Name) ginkgo.By("waiting for PD to detach") framework.ExpectNoError(waitForPDDetach(diskName, host0Name)) @@ -165,15 +166,15 @@ var _ = utils.SIGDescribe("Pod Disks", func() { ginkgo.By("defer: cleaning up PD-RW test environment") framework.Logf("defer cleanup errors can usually be ignored") if fmtPod != nil { - podClient.Delete(fmtPod.Name, podDelOpt) + podClient.Delete(context.TODO(), fmtPod.Name, podDelOpt) } - podClient.Delete(host0Pod.Name, podDelOpt) - podClient.Delete(host1Pod.Name, podDelOpt) + podClient.Delete(context.TODO(), host0Pod.Name, podDelOpt) + podClient.Delete(context.TODO(), host1Pod.Name, podDelOpt) detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name}) }() ginkgo.By("creating host0Pod on node0") - _, err = podClient.Create(host0Pod) + _, err = podClient.Create(context.TODO(), host0Pod) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) framework.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name) @@ -190,21 +191,21 @@ var _ = utils.SIGDescribe("Pod Disks", func() { ginkgo.By("verifying PD is present in node0's VolumeInUse list") framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */)) ginkgo.By("deleting host0Pod") // delete this pod before creating next pod - framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod") + framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, podDelOpt), "Failed to delete host0Pod") framework.Logf("deleted host0Pod %q", host0Pod.Name) e2epod.WaitForPodToDisappear(cs, host0Pod.Namespace, host0Pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) framework.Logf("deleted host0Pod %q disappeared", host0Pod.Name) } ginkgo.By("creating host1Pod on node1") - _, err = podClient.Create(host1Pod) + _, err = podClient.Create(context.TODO(), host1Pod) framework.ExpectNoError(err, "Failed to create host1Pod") framework.ExpectNoError(f.WaitForPodRunningSlow(host1Pod.Name)) framework.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name) if readOnly { ginkgo.By("deleting host0Pod") - framework.ExpectNoError(podClient.Delete(host0Pod.Name, podDelOpt), "Failed to delete host0Pod") + framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, podDelOpt), "Failed to delete host0Pod") framework.Logf("deleted host0Pod %q", host0Pod.Name) } else { ginkgo.By("verifying PD contents in host1Pod") @@ -216,7 +217,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { } ginkgo.By("deleting host1Pod") - framework.ExpectNoError(podClient.Delete(host1Pod.Name, podDelOpt), "Failed to delete host1Pod") + framework.ExpectNoError(podClient.Delete(context.TODO(), host1Pod.Name, podDelOpt), "Failed to delete host1Pod") framework.Logf("deleted host1Pod %q", host1Pod.Name) ginkgo.By("Test completed successfully, waiting for PD to detach from both nodes") @@ -268,7 +269,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { ginkgo.By("defer: cleaning up PD-RW test environment") framework.Logf("defer cleanup errors can usually be ignored") if host0Pod != nil { - podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)) + podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0)) } for _, diskName := range diskNames { detachAndDeletePDs(diskName, []types.NodeName{host0Name}) @@ -279,7 +280,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { framework.Logf("PD Read/Writer Iteration #%v", i) ginkgo.By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers)) host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers) - _, err = podClient.Create(host0Pod) + _, err = podClient.Create(context.TODO(), host0Pod) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) @@ -304,7 +305,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { verifyPDContentsViaContainer(ns, f, host0Pod.Name, containerName, fileAndContentToVerify) ginkgo.By("deleting host0Pod") - framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod") + framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0)), "Failed to delete host0Pod") } ginkgo.By(fmt.Sprintf("Test completed successfully, waiting for %d PD(s) to detach from node0", numPDs)) for _, diskName := range diskNames { @@ -359,7 +360,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { ginkgo.By("defer: cleaning up PD-RW test env") framework.Logf("defer cleanup errors can usually be ignored") ginkgo.By("defer: delete host0Pod") - podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)) + podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0)) ginkgo.By("defer: detach and delete PDs") detachAndDeletePDs(diskName, []types.NodeName{host0Name}) if disruptOp == deleteNode || disruptOp == deleteNodeObj { @@ -367,7 +368,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { targetNode.ObjectMeta.SetResourceVersion("0") // need to set the resource version or else the Create() fails ginkgo.By("defer: re-create host0 node object") - _, err := nodeClient.Create(targetNode) + _, err := nodeClient.Create(context.TODO(), targetNode) framework.ExpectNoError(err, fmt.Sprintf("defer: Unable to re-create the deleted node object %q", targetNode.Name)) } ginkgo.By("defer: verify the number of ready nodes") @@ -381,7 +382,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { }() ginkgo.By("creating host0Pod on node0") - _, err = podClient.Create(host0Pod) + _, err = podClient.Create(context.TODO(), host0Pod) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) ginkgo.By("waiting for host0Pod to be running") framework.ExpectNoError(f.WaitForPodRunningSlow(host0Pod.Name)) @@ -416,9 +417,9 @@ var _ = utils.SIGDescribe("Pod Disks", func() { } else if disruptOp == deleteNodeObj { ginkgo.By("deleting host0's node api object") - framework.ExpectNoError(nodeClient.Delete(string(host0Name), metav1.NewDeleteOptions(0)), "Unable to delete host0's node object") + framework.ExpectNoError(nodeClient.Delete(context.TODO(), string(host0Name), metav1.NewDeleteOptions(0)), "Unable to delete host0's node object") ginkgo.By("deleting host0Pod") - framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Unable to delete host0Pod") + framework.ExpectNoError(podClient.Delete(context.TODO(), host0Pod.Name, metav1.NewDeleteOptions(0)), "Unable to delete host0Pod") } else if disruptOp == evictPod { evictTarget := &policyv1beta1.Eviction{ @@ -628,7 +629,7 @@ func waitForPDInVolumesInUse( } framework.Logf("Waiting for node %s's VolumesInUse Status %s PD %q", nodeName, logStr, diskName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) { - nodeObj, err := nodeClient.Get(string(nodeName), metav1.GetOptions{}) + nodeObj, err := nodeClient.Get(context.TODO(), string(nodeName), metav1.GetOptions{}) if err != nil || nodeObj == nil { framework.Logf("Failed to fetch node object %q from API server. err=%v", nodeName, err) continue diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go index d66d431c22c..df81345fabd 100644 --- a/test/e2e/storage/persistent_volumes-gce.go +++ b/test/e2e/storage/persistent_volumes-gce.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -153,7 +154,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() { ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk [Flaky]", func() { ginkgo.By("Deleting the Namespace") - err := c.CoreV1().Namespaces().Delete(ns, nil) + err := c.CoreV1().Namespaces().Delete(context.TODO(), ns, nil) framework.ExpectNoError(err) err = framework.WaitForNamespacesDeleted(c, []string{ns}, framework.DefaultNamespaceDeletionTimeout) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 8668995ae7e..2684f93a139 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "path/filepath" "strconv" @@ -325,7 +326,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { testVol := testVols[0] pod := makeLocalPodWithNodeName(config, testVol, config.nodes[1].Name) - pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) + pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod) framework.ExpectNoError(err) err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) @@ -456,7 +457,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { wg.Add(1) go func() { defer wg.Done() - w, err := config.client.CoreV1().PersistentVolumes().Watch(metav1.ListOptions{}) + w, err := config.client.CoreV1().PersistentVolumes().Watch(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) if w == nil { return @@ -475,7 +476,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { if pv.Status.Phase == v1.VolumeBound || pv.Status.Phase == v1.VolumeAvailable { continue } - pv, err = config.client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) + pv, err = config.client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { continue } @@ -486,7 +487,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { if localVolume.pv.Name != pv.Name { continue } - err = config.client.CoreV1().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{}) + err = config.client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err) pvConfig := makeLocalPVConfig(config, localVolume) localVolume.pv, err = e2epv.CreatePV(config.client, e2epv.MakePersistentVolume(pvConfig)) @@ -551,7 +552,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { } pod := e2epod.MakeSecPod(config.ns, pvcs, nil, false, "sleep 1", false, false, selinuxLabel, nil) - pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) + pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod) framework.ExpectNoError(err) pods[pod.Name] = pod numCreated++ @@ -573,7 +574,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.By("Waiting for all pods to complete successfully") const completeTimeout = 5 * time.Minute waitErr := wait.PollImmediate(time.Second, completeTimeout, func() (done bool, err error) { - podsList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{}) + podsList, err := config.client.CoreV1().Pods(config.ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -627,7 +628,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { return } ginkgo.By(fmt.Sprintf("Clean PV %s", pv.Name)) - err := config.client.CoreV1().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{}) + err := config.client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err) }) @@ -645,14 +646,14 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count)) for i := 0; i < count; i++ { pod := e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, nil, false, "", false, false, selinuxLabel, nil) - pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) + pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod) framework.ExpectNoError(err) pods[pod.Name] = pod } ginkgo.By("Wait for all pods are running") const runningTimeout = 5 * time.Minute waitErr := wait.PollImmediate(time.Second, runningTimeout, func() (done bool, err error) { - podsList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{}) + podsList, err := config.client.CoreV1().Pods(config.ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -672,7 +673,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error { framework.Logf("Deleting pod %v", pod.Name) - if err := config.client.CoreV1().Pods(config.ns).Delete(pod.Name, nil); err != nil { + if err := config.client.CoreV1().Pods(config.ns).Delete(context.TODO(), pod.Name, nil); err != nil { return err } @@ -696,7 +697,7 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp testVol := testVols[0] pod := makeLocalPodFunc(config, testVol, nodeName) - pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) + pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod) framework.ExpectNoError(err) err = e2epod.WaitForPodNameUnschedulableInNamespace(config.client, pod.Name, pod.Namespace) @@ -790,17 +791,17 @@ func setupStorageClass(config *localTestConfig, mode *storagev1.VolumeBindingMod VolumeBindingMode: mode, } - _, err := config.client.StorageV1().StorageClasses().Create(sc) + _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc) framework.ExpectNoError(err) } func cleanupStorageClass(config *localTestConfig) { - framework.ExpectNoError(config.client.StorageV1().StorageClasses().Delete(config.scName, nil)) + framework.ExpectNoError(config.client.StorageV1().StorageClasses().Delete(context.TODO(), config.scName, nil)) } // podNode wraps RunKubectl to get node where pod is running func podNodeName(config *localTestConfig, pod *v1.Pod) (string, error) { - runtimePod, runtimePodErr := config.client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + runtimePod, runtimePodErr := config.client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) return runtimePod.Spec.NodeName, runtimePodErr } @@ -926,7 +927,7 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod const bindTimeout = 10 * time.Second waitErr := wait.PollImmediate(time.Second, bindTimeout, func() (done bool, err error) { for _, volume := range volumes { - pvc, err := config.client.CoreV1().PersistentVolumeClaims(volume.pvc.Namespace).Get(volume.pvc.Name, metav1.GetOptions{}) + pvc, err := config.client.CoreV1().PersistentVolumeClaims(volume.pvc.Namespace).Get(context.TODO(), volume.pvc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get PVC %s/%s: %v", volume.pvc.Namespace, volume.pvc.Name, err) } @@ -1154,7 +1155,7 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in spec.Spec.PodManagementPolicy = appsv1.ParallelPodManagement } - ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(spec) + ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(context.TODO(), spec) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(config.client, ssReplicas, ss) diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 57de0788785..85f314e99a7 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "strings" "time" @@ -66,7 +67,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, // 1. verify each PV permits write access to a client pod ginkgo.By("Checking pod has write access to PersistentVolumes") for pvcKey := range claims { - pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name, metav1.GetOptions{}) + pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(context.TODO(), pvcKey.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("error getting pvc %q: %v", pvcKey.Name, err) } @@ -282,7 +283,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func() { ginkgo.By("Writing to the volume.") pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')") - pod, err = c.CoreV1().Pods(ns).Create(pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns)) @@ -300,7 +301,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ginkgo.By("Verifying the mount has been cleaned.") mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath pod = e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount)) - pod, err = c.CoreV1().Pods(ns).Create(pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns)) @@ -352,7 +353,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { } spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe) - ss, err := c.AppsV1().StatefulSets(ns).Create(spec) + ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, 1, ss) @@ -361,7 +362,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ss, err = e2esset.Scale(c, ss, 0) framework.ExpectNoError(err) e2esset.WaitForStatusReplicas(c, ss, 0) - err = c.AppsV1().StatefulSets(ns).Delete(ss.Name, &metav1.DeleteOptions{}) + err = c.AppsV1().StatefulSets(ns).Delete(context.TODO(), ss.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err) ginkgo.By("Creating a new Statefulset and validating the data") @@ -372,7 +373,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { validateCmd += "&& sleep 10000" spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe) - ss, err = c.AppsV1().StatefulSets(ns).Create(spec) + ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), spec) framework.ExpectNoError(err) e2esset.WaitForRunningAndReady(c, 1, ss) }) @@ -434,7 +435,7 @@ func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v func createWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) { framework.Logf("Creating nfs test pod") pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command) - runPod, err := c.CoreV1().Pods(ns).Create(pod) + runPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) if err != nil { return fmt.Errorf("pod Create API error: %v", err) } diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index cbaeecdc6d4..7fbe76e75d4 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "time" "github.com/onsi/ginkgo" @@ -76,14 +77,14 @@ var _ = utils.SIGDescribe("PV Protection", func() { // make the pv definitions pv = e2epv.MakePersistentVolume(pvConfig) // create the PV - pv, err = client.CoreV1().PersistentVolumes().Create(pv) + pv, err = client.CoreV1().PersistentVolumes().Create(context.TODO(), pv) framework.ExpectNoError(err, "Error creating PV") ginkgo.By("Waiting for PV to enter phase Available") framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second)) ginkgo.By("Checking that PV Protection finalizer is set") - pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) + pv, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While getting PV status") framework.ExpectEqual(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil), true, "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers) }) @@ -97,7 +98,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { ginkgo.It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() { ginkgo.By("Deleting the PV") - err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PV") framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout) }) @@ -105,7 +106,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { ginkgo.It("Verify that PV bound to a PVC is not removed immediately", func() { ginkgo.By("Creating a PVC") pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, nameSpace) - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err, "Error creating PVC") ginkgo.By("Waiting for PVC to become Bound") @@ -113,16 +114,16 @@ var _ = utils.SIGDescribe("PV Protection", func() { framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC") - err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PV") ginkgo.By("Checking that the PV status is Terminating") - pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) + pv, err = client.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PV status") framework.ExpectNotEqual(pv.ObjectMeta.DeletionTimestamp, nil) ginkgo.By("Deleting the PVC that is bound to the PV") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC") diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index 211e2c7d874..213f7c6b0f7 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "github.com/onsi/ginkgo" "fmt" @@ -39,7 +40,7 @@ import ( func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}) + _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns) @@ -78,7 +79,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ClaimSize: t.ClaimSize, VolumeMode: &t.VolumeMode, }, nameSpace) - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err, "Error creating PVC") pvcCreatedAndNotDeleted = true @@ -92,7 +93,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) ginkgo.By("Checking that PVC Protection finalizer is set") - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While getting PVC status") framework.ExpectEqual(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil), true, "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers) }) @@ -109,7 +110,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { framework.ExpectNoError(err, "Error terminating and deleting pod") ginkgo.By("Deleting the PVC") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) pvcCreatedAndNotDeleted = false @@ -117,11 +118,11 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ginkgo.It("Verify that PVC in active use by a pod is not removed immediately", func() { ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") ginkgo.By("Checking that the PVC status is Terminating") - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) @@ -136,11 +137,11 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ginkgo.It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func() { ginkgo.By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") - err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) + err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") ginkgo.By("Checking that the PVC status is Terminating") - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) @@ -153,7 +154,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { framework.ExpectNoError(err, "Error terminating and deleting pod") ginkgo.By("Checking again that the PVC status is Terminating") - pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 84d9bcaca52..00a82befde9 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -183,29 +184,29 @@ func testZonalFailover(c clientset.Interface, ns string) { statefulSet, service, regionalPDLabels := newStatefulSet(claimTemplate, ns) ginkgo.By("creating a StorageClass " + class.Name) - _, err := c.StorageV1().StorageClasses().Create(class) + _, err := c.StorageV1().StorageClasses().Create(context.TODO(), class) framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) - framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil), + framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, nil), "Error deleting StorageClass %s", class.Name) }() ginkgo.By("creating a StatefulSet") - _, err = c.CoreV1().Services(ns).Create(service) + _, err = c.CoreV1().Services(ns).Create(context.TODO(), service) framework.ExpectNoError(err) - _, err = c.AppsV1().StatefulSets(ns).Create(statefulSet) + _, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), statefulSet) framework.ExpectNoError(err) defer func() { framework.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name) // typically this claim has already been deleted - framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(statefulSet.Name, nil /* options */), + framework.ExpectNoError(c.AppsV1().StatefulSets(ns).Delete(context.TODO(), statefulSet.Name, nil), "Error deleting StatefulSet %s", statefulSet.Name) framework.Logf("deleting claims in namespace %s", ns) pvc := getPVC(c, ns, regionalPDLabels) - framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil), + framework.ExpectNoError(c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, nil), "Error deleting claim %s.", pvc.Name) if pvc.Spec.VolumeName != "" { err = framework.WaitForPersistentVolumeDeleted(c, pvc.Spec.VolumeName, framework.Poll, pvDeletionTimeout) @@ -227,13 +228,13 @@ func testZonalFailover(c clientset.Interface, ns string) { ginkgo.By("getting zone information from pod") pod := getPod(c, ns, regionalPDLabels) nodeName := pod.Spec.NodeName - node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) podZone := node.Labels[v1.LabelZoneFailureDomain] ginkgo.By("tainting nodes in the zone the pod is scheduled in") selector := labels.SelectorFromSet(labels.Set(map[string]string{v1.LabelZoneFailureDomain: podZone})) - nodesInZone, err := c.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()}) + nodesInZone, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) framework.ExpectNoError(err) removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone) @@ -243,7 +244,7 @@ func testZonalFailover(c clientset.Interface, ns string) { }() ginkgo.By("deleting StatefulSet pod") - err = c.CoreV1().Pods(ns).Delete(pod.Name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, &metav1.DeleteOptions{}) // Verify the pod is scheduled in the other zone. ginkgo.By("verifying the pod is scheduled in a different zone.") @@ -256,7 +257,7 @@ func testZonalFailover(c clientset.Interface, ns string) { waitErr := wait.PollImmediate(framework.Poll, statefulSetReadyTimeout, func() (bool, error) { framework.Logf("Checking whether new pod is scheduled in zone %q", otherZone) pod := getPod(c, ns, regionalPDLabels) - node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) if err != nil { return false, nil } @@ -307,13 +308,13 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) framework.ExpectNoError(err) reversePatches[node.Name] = reversePatchBytes - _, err = c.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes) framework.ExpectNoError(err) } return func() { for nodeName, reversePatch := range reversePatches { - _, err := c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, reversePatch) + _, err := c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, reversePatch) framework.ExpectNoError(err) } } @@ -436,7 +437,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s func getPVC(c clientset.Interface, ns string, pvcLabels map[string]string) *v1.PersistentVolumeClaim { selector := labels.Set(pvcLabels).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(options) + pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(context.TODO(), options) framework.ExpectNoError(err) framework.ExpectEqual(len(pvcList.Items), 1, "There should be exactly 1 PVC matched.") @@ -446,7 +447,7 @@ func getPVC(c clientset.Interface, ns string, pvcLabels map[string]string) *v1.P func getPod(c clientset.Interface, ns string, podLabels map[string]string) *v1.Pod { selector := labels.Set(podLabels).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.CoreV1().Pods(ns).List(options) + podList, err := c.CoreV1().Pods(ns).List(context.TODO(), options) framework.ExpectNoError(err) framework.ExpectEqual(len(podList.Items), 1, "There should be exactly 1 pod matched.") @@ -632,7 +633,7 @@ func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, func waitForStatefulSetReplicasReady(statefulSetName, ns string, c clientset.Interface, Poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for StatefulSet %s to have all replicas ready", timeout, statefulSetName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - sts, err := c.AppsV1().StatefulSets(ns).Get(statefulSetName, metav1.GetOptions{}) + sts, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), statefulSetName, metav1.GetOptions{}) if err != nil { framework.Logf("Get StatefulSet %s failed, ignoring for %v: %v", statefulSetName, Poll, err) continue diff --git a/test/e2e/storage/subpath.go b/test/e2e/storage/subpath.go index aeacd2908ae..7a30a3d7b92 100644 --- a/test/e2e/storage/subpath.go +++ b/test/e2e/storage/subpath.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,13 +38,13 @@ var _ = utils.SIGDescribe("Subpath", func() { ginkgo.BeforeEach(func() { ginkgo.By("Setting up data") secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, Data: map[string][]byte{"secret-key": []byte("secret-value")}} - _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret) if err != nil && !apierrors.IsAlreadyExists(err) { framework.ExpectNoError(err, "while creating secret") } configmap := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap"}, Data: map[string]string{"configmap-key": "configmap-value"}} - _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap) if err != nil && !apierrors.IsAlreadyExists(err) { framework.ExpectNoError(err, "while creating configmap") } diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index b8aff06195e..b14a8525e8d 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -227,7 +227,7 @@ func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern test ginkgo.By("creating a StorageClass " + r.Sc.Name) - r.Sc, err = cs.StorageV1().StorageClasses().Create(r.Sc) + r.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc) framework.ExpectNoError(err) if r.Sc != nil { @@ -387,12 +387,12 @@ func createPVCPVFromDynamicProvisionSC( framework.ExpectNoError(err) } - pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) var pv *v1.PersistentVolume if !isDelayedBinding(sc) { - pv, err = cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) } @@ -408,7 +408,7 @@ func isDelayedBinding(sc *storagev1.StorageClass) bool { // deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" func deleteStorageClass(cs clientset.Interface, className string) error { - err := cs.StorageV1().StorageClasses().Delete(className, nil) + err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, nil) if err != nil && !apierrors.IsNotFound(err) { return err } @@ -592,7 +592,7 @@ func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts { totOps := getVolumeOpsFromMetricsForPlugin(testutil.Metrics(controllerMetrics), pluginName) framework.Logf("Node name not specified for getVolumeOpCounts, falling back to listing nodes from API Server") - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "Error listing nodes: %v", err) if len(nodes.Items) <= nodeLimit { // For large clusters with > nodeLimit nodes it is too time consuming to diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index 712c7e64e1a..7f20b774fb8 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "flag" "fmt" "strings" @@ -257,7 +258,7 @@ func (t EphemeralTest) TestEphemeral() { StopPod(client, pod) }() framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume") - runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") actualNodeName := runningPod.Spec.NodeName @@ -321,7 +322,7 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri }) } - pod, err := c.CoreV1().Pods(ns).Create(pod) + pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err, "failed to create pod") return pod } @@ -364,7 +365,7 @@ func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) { }, } - pod, err := c.CoreV1().Pods(ns).Create(pod) + pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) switch { case err == nil: diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index 57c87899646..68a783a97c3 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "time" @@ -404,7 +405,7 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i)) } - pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") return pod.Spec.NodeName } @@ -457,7 +458,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) }() framework.ExpectNoError(err) - pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pods = append(pods, pod) framework.ExpectNoError(err, fmt.Sprintf("get pod%d", index)) actualNodeName := pod.Spec.NodeName diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 07cb1017739..a98b6b907d4 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -246,25 +246,25 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { if class != nil { framework.ExpectEqual(*claim.Spec.StorageClassName, class.Name) ginkgo.By("creating a StorageClass " + class.Name) - _, err = client.StorageV1().StorageClasses().Create(class) + _, err = client.StorageV1().StorageClasses().Create(context.TODO(), class) // The "should provision storage with snapshot data source" test already has created the class. // TODO: make class creation optional and remove the IsAlreadyExists exception framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) - class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{}) + class, err = client.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) - framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil)) + framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, nil)) }() } ginkgo.By("creating a claim") - claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) + claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim) framework.ExpectNoError(err) defer func() { framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) // typically this claim has already been deleted - err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) + err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, nil) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) } @@ -278,7 +278,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { pv := t.checkProvisioning(client, claim, class) ginkgo.By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) - framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)) + framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, nil)) // Wait for the PV to get deleted if reclaim policy is Delete. (If it's // Retain, there's no use waiting because the PV won't be auto-deleted and @@ -298,13 +298,13 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { // getBoundPV returns a PV details. func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { // Get new copy of the claim - claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { return nil, err } // Get the bound PV - pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) return pv, err } @@ -380,7 +380,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent StopPod(client, pod) }() framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) - runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") actualNodeName := runningPod.Spec.NodeName StopPod(client, pod) @@ -436,7 +436,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai command := "echo 'hello world' > /mnt/test/data" pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) - runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") actualNodeName := runningPod.Spec.NodeName StopPod(client, pod) @@ -452,7 +452,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai } pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) - runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") framework.ExpectNotEqual(runningPod.Spec.NodeName, actualNodeName, "second pod should have run on a different node") StopPod(client, pod) @@ -466,7 +466,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P namespace := claims[0].Namespace ginkgo.By("creating a storage class " + t.Class.Name) - class, err := t.Client.StorageV1().StorageClasses().Create(t.Class) + class, err := t.Client.StorageV1().StorageClasses().Create(context.TODO(), t.Class) framework.ExpectNoError(err) defer func() { err = deleteStorageClass(t.Client, class.Name) @@ -477,7 +477,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P var claimNames []string var createdClaims []*v1.PersistentVolumeClaim for _, claim := range claims { - c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) + c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim) claimNames = append(claimNames, c.Name) createdClaims = append(createdClaims, c) framework.ExpectNoError(err) @@ -523,20 +523,20 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P } // collect node details - node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + node, err := t.Client.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("re-checking the claims to see they bound") var pvs []*v1.PersistentVolume for _, claim := range createdClaims { // Get new copy of the claim - claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // make sure claim did bind err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - pv, err := t.Client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := t.Client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) pvs = append(pvs, pv) } @@ -598,7 +598,7 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command }, } - pod, err := c.CoreV1().Pods(ns).Create(pod) + pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err, "Failed to create pod: %v", err) return pod } @@ -622,7 +622,7 @@ func StopPod(c clientset.Interface, pod *v1.Pod) { func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) { for _, claim := range pvcs { // Get new copy of the claim - claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending) } @@ -639,12 +639,12 @@ func prepareSnapshotDataSourceForProvisioning( var err error if class != nil { ginkgo.By("[Initialize dataSource]creating a StorageClass " + class.Name) - _, err = client.StorageV1().StorageClasses().Create(class) + _, err = client.StorageV1().StorageClasses().Create(context.TODO(), class) framework.ExpectNoError(err) } ginkgo.By("[Initialize dataSource]creating a initClaim") - updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(initClaim) + updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(context.TODO(), initClaim) framework.ExpectNoError(err) // write namespace to the /mnt/test (= the volume). @@ -657,7 +657,7 @@ func prepareSnapshotDataSourceForProvisioning( ginkgo.By("[Initialize dataSource]checking the initClaim") // Get new copy of the initClaim - _, err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Get(updatedClaim.Name, metav1.GetOptions{}) + _, err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Get(context.TODO(), updatedClaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("[Initialize dataSource]creating a SnapshotClass") @@ -690,7 +690,7 @@ func prepareSnapshotDataSourceForProvisioning( } framework.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name) - err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(updatedClaim.Name, nil) + err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(context.TODO(), updatedClaim.Name, nil) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err) } @@ -712,12 +712,12 @@ func preparePVCDataSourceForProvisioning( var err error if class != nil { ginkgo.By("[Initialize dataSource]creating a StorageClass " + class.Name) - _, err = client.StorageV1().StorageClasses().Create(class) + _, err = client.StorageV1().StorageClasses().Create(context.TODO(), class) framework.ExpectNoError(err) } ginkgo.By("[Initialize dataSource]creating a source PVC") - sourcePVC, err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(source) + sourcePVC, err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source) framework.ExpectNoError(err) // write namespace to the /mnt/test (= the volume). @@ -732,7 +732,7 @@ func preparePVCDataSourceForProvisioning( cleanupFunc := func() { framework.Logf("deleting source PVC %q/%q", sourcePVC.Namespace, sourcePVC.Name) - err = client.CoreV1().PersistentVolumeClaims(sourcePVC.Namespace).Delete(sourcePVC.Name, nil) + err = client.CoreV1().PersistentVolumeClaims(sourcePVC.Namespace).Delete(context.TODO(), sourcePVC.Name, nil) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting source PVC %q. Error: %v", sourcePVC.Name, err) } diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 9ae6cac56ff..9c62fb34a87 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "time" @@ -130,20 +131,20 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc) ginkgo.By("creating a StorageClass " + class.Name) - class, err = cs.StorageV1().StorageClasses().Create(class) + class, err = cs.StorageV1().StorageClasses().Create(context.TODO(), class) framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) - framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil)) + framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, nil)) }() ginkgo.By("creating a claim") - pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err) defer func() { framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) // typically this claim has already been deleted - err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil) + err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, nil) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) } @@ -159,11 +160,11 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt ginkgo.By("checking the claim") // Get new copy of the claim - pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Get the bound PV - _, err = cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + _, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("creating a SnapshotClass") diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 170da823582..7de6bdbc79b 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "path/filepath" "regexp" @@ -447,7 +448,7 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name)) removeUnusedContainers(l.pod) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(l.pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), l.pod) framework.ExpectNoError(err, "while creating pod") defer func() { ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name)) @@ -733,7 +734,7 @@ func testPodFailSubpath(f *framework.Framework, pod *v1.Pod, allowContainerTermi func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string, allowContainerTerminationError bool) { ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err, "while creating pod") defer func() { e2epod.DeletePodWithWait(f.ClientSet, pod) @@ -761,7 +762,7 @@ func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod, allowContainerT } waitErr := wait.PollImmediate(framework.Poll, framework.PodStartTimeout, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -816,7 +817,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { // Start pod ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err, "while creating pod") defer func() { e2epod.DeletePodWithWait(f.ClientSet, pod) @@ -833,7 +834,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { ginkgo.By("Waiting for container to restart") restarts := int32(0) err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -868,7 +869,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { stableCount := int(0) stableThreshold := int(time.Minute / framework.Poll) err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -918,13 +919,13 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name)) removeUnusedContainers(pod) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err, "while creating pod") err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) framework.ExpectNoError(err, "while waiting for pod to be running") - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "while getting pod") var podNode *v1.Node @@ -949,7 +950,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, func formatVolume(f *framework.Framework, pod *v1.Pod) { ginkgo.By(fmt.Sprintf("Creating pod to format volume %s", pod.Name)) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod) framework.ExpectNoError(err, "while creating volume init pod") err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) diff --git a/test/e2e/storage/testsuites/topology.go b/test/e2e/storage/testsuites/topology.go index 3b7e000503c..da87021ebfd 100644 --- a/test/e2e/storage/testsuites/topology.go +++ b/test/e2e/storage/testsuites/topology.go @@ -19,6 +19,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "math/rand" @@ -179,10 +180,10 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns. framework.ExpectNoError(err) ginkgo.By("Verifying pod scheduled to correct node") - pod, err := cs.CoreV1().Pods(l.pod.Namespace).Get(l.pod.Name, metav1.GetOptions{}) + pod, err := cs.CoreV1().Pods(l.pod.Namespace).Get(context.TODO(), l.pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - node, err := cs.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) t.verifyNodeTopology(node, allowedTopologies) @@ -324,11 +325,11 @@ func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyT framework.Logf("Creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.resource.Sc, l.resource.Pvc) ginkgo.By("Creating sc") - l.resource.Sc, err = cs.StorageV1().StorageClasses().Create(l.resource.Sc) + l.resource.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), l.resource.Sc) framework.ExpectNoError(err) ginkgo.By("Creating pvc") - l.resource.Pvc, err = cs.CoreV1().PersistentVolumeClaims(l.resource.Pvc.Namespace).Create(l.resource.Pvc) + l.resource.Pvc, err = cs.CoreV1().PersistentVolumeClaims(l.resource.Pvc.Namespace).Create(context.TODO(), l.resource.Pvc) framework.ExpectNoError(err) ginkgo.By("Creating pod") @@ -342,7 +343,7 @@ func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyT e2epv.SELinuxLabel, nil) l.pod.Spec.Affinity = affinity - l.pod, err = cs.CoreV1().Pods(l.pod.Namespace).Create(l.pod) + l.pod, err = cs.CoreV1().Pods(l.pod.Namespace).Create(context.TODO(), l.pod) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index f1b3dbb3d07..bbf2c1dcf18 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "time" @@ -271,13 +272,13 @@ func ExpandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c var lastUpdateError error waitErr := wait.PollImmediate(resizePollInterval, 30*time.Second, func() (bool, error) { var err error - updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(pvcName, metav1.GetOptions{}) + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pvc %q for resizing: %v", pvcName, err) } updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = size - updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Update(updatedPVC) + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Update(context.TODO(), updatedPVC) if err != nil { framework.Logf("Error updating pvc %s: %v", pvcName, err) lastUpdateError = err @@ -298,7 +299,7 @@ func ExpandPVCSize(origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c func WaitForResizingCondition(pvc *v1.PersistentVolumeClaim, c clientset.Interface, duration time.Duration) error { waitErr := wait.PollImmediate(resizePollInterval, duration, func() (bool, error) { var err error - updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %v", pvc.Name, err) @@ -324,7 +325,7 @@ func WaitForControllerVolumeResize(pvc *v1.PersistentVolumeClaim, c clientset.In waitErr := wait.PollImmediate(resizePollInterval, duration, func() (bool, error) { pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] - pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pv %q for resizing %v", pvName, err) } @@ -348,7 +349,7 @@ func WaitForPendingFSResizeCondition(pvc *v1.PersistentVolumeClaim, c clientset. var updatedPVC *v1.PersistentVolumeClaim waitErr := wait.PollImmediate(resizePollInterval, pvcConditionSyncPeriod, func() (bool, error) { var err error - updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err) @@ -376,7 +377,7 @@ func WaitForFSResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1. var updatedPVC *v1.PersistentVolumeClaim waitErr := wait.PollImmediate(resizePollInterval, totalResizeWaitPeriod, func() (bool, error) { var err error - updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err) diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index da015e04ace..299de6b2a04 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -22,6 +22,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "math" "path/filepath" @@ -316,7 +317,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume. ginkgo.By(fmt.Sprintf("starting %s", clientPod.Name)) podsNamespacer := cs.CoreV1().Pods(config.Namespace) - clientPod, err = podsNamespacer.Create(clientPod) + clientPod, err = podsNamespacer.Create(context.TODO(), clientPod) if err != nil { return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err) } diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index 58bf8c3f844..98f6fca6110 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "regexp" "strings" @@ -163,7 +164,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte ClaimSize: claimSize, StorageClassName: &l.resource.Sc.Name, }, l.ns.Name) - pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(pvc) + pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), pvc) framework.ExpectNoError(err) l.pvcs = append(l.pvcs, pvc) } @@ -174,7 +175,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte selection := e2epod.NodeSelection{} e2epod.SetAffinity(&selection, nodeName) pod.Spec.Affinity = selection.Affinity - l.runningPod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod) + l.runningPod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) ginkgo.By("Waiting for all PVCs to get Bound") @@ -190,7 +191,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte // Use affinity to schedule everything on the right node e2epod.SetAffinity(&selection, nodeName) pod.Spec.Affinity = selection.Affinity - l.unschedulablePod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod) + l.unschedulablePod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod) framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit") ginkgo.By("Waiting for the pod to get unschedulable with the right message") @@ -219,19 +220,19 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulablePodName string, pvcs []*v1.PersistentVolumeClaim, pvNames sets.String) error { var cleanupErrors []string if runningPodName != "" { - err := cs.CoreV1().Pods(ns).Delete(runningPodName, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), runningPodName, nil) if err != nil { cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete pod %s: %s", runningPodName, err)) } } if unschedulablePodName != "" { - err := cs.CoreV1().Pods(ns).Delete(unschedulablePodName, nil) + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), unschedulablePodName, nil) if err != nil { cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete pod %s: %s", unschedulablePodName, err)) } } for _, pvc := range pvcs { - err := cs.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil) + err := cs.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvc.Name, nil) if err != nil { cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete PVC %s: %s", pvc.Name, err)) } @@ -242,7 +243,7 @@ func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulabl err := wait.Poll(5*time.Second, testSlowMultiplier*e2epv.PVDeletingTimeout, func() (bool, error) { existing := 0 for _, pvName := range pvNames.UnsortedList() { - _, err := cs.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + _, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if err == nil { existing++ } else { @@ -274,7 +275,7 @@ func waitForAllPVCsBound(cs clientset.Interface, timeout time.Duration, pvcs []* err := wait.Poll(5*time.Second, timeout, func() (bool, error) { unbound := 0 for _, pvc := range pvcs { - pvc, err := cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + pvc, err := cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -304,7 +305,7 @@ func getNodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName strin } func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *DriverInfo) (int, error) { - node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return 0, err } @@ -334,7 +335,7 @@ func getCSINodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName st // Retry with a timeout, the driver might just have been installed and kubelet takes a while to publish everything. var limit int err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) { - csiNode, err := cs.StorageV1().CSINodes().Get(nodeName, metav1.GetOptions{}) + csiNode, err := cs.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { framework.Logf("%s", err) return false, nil diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 75340458bb9..648445638be 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -17,6 +17,7 @@ limitations under the License. package testsuites import ( + "context" "fmt" "path/filepath" "strings" @@ -197,16 +198,16 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern var err error ginkgo.By("Creating sc") - l.Sc, err = l.cs.StorageV1().StorageClasses().Create(l.Sc) + l.Sc, err = l.cs.StorageV1().StorageClasses().Create(context.TODO(), l.Sc) framework.ExpectNoError(err, "Failed to create sc") ginkgo.By("Creating pv and pvc") - l.Pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.Pv) + l.Pv, err = l.cs.CoreV1().PersistentVolumes().Create(context.TODO(), l.Pv) framework.ExpectNoError(err, "Failed to create pv") // Prebind pv l.Pvc.Spec.VolumeName = l.Pv.Name - l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.Pvc) + l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc) framework.ExpectNoError(err, "Failed to create pvc") framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc") @@ -215,7 +216,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) // Setting node pod.Spec.NodeName = l.config.ClientNodeName - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod) framework.ExpectNoError(err, "Failed to create pod") defer func() { framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod") @@ -236,7 +237,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern } // Check the pod is still not running - p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(pod.Name, metav1.GetOptions{}) + p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)") framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending") }) @@ -251,11 +252,11 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern var err error ginkgo.By("Creating sc") - l.Sc, err = l.cs.StorageV1().StorageClasses().Create(l.Sc) + l.Sc, err = l.cs.StorageV1().StorageClasses().Create(context.TODO(), l.Sc) framework.ExpectNoError(err, "Failed to create sc") ginkgo.By("Creating pv and pvc") - l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.Pvc) + l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc) framework.ExpectNoError(err, "Failed to create pvc") eventSelector := fields.Set{ @@ -273,7 +274,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern } // Check the pvc is still pending - pvc, err := l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Get(l.Pvc.Name, metav1.GetOptions{}) + pvc, err := l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Get(context.TODO(), l.Pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to re-read the pvc after event (or timeout)") framework.ExpectEqual(pvc.Status.Phase, v1.ClaimPending, "PVC phase isn't pending") }) @@ -296,7 +297,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern pod = swapVolumeMode(pod) // Run the pod - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod) framework.ExpectNoError(err, "Failed to create pod") defer func() { framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod") @@ -324,7 +325,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern } // Check the pod is still not running - p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(pod.Name, metav1.GetOptions{}) + p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)") framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending") }) @@ -347,7 +348,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern } // Run the pod - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod) framework.ExpectNoError(err) defer func() { framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod)) @@ -357,10 +358,10 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern framework.ExpectNoError(err) // Reload the pod to get its node - pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(pod.Name, metav1.GetOptions{}) + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectNotEqual(pod.Spec.NodeName, "", "pod should be scheduled to a node") - node, err := l.cs.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + node, err := l.cs.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Listing mounted volumes in the pod") diff --git a/test/e2e/storage/utils/create.go b/test/e2e/storage/utils/create.go index 1d3651f4673..2a339490cd7 100644 --- a/test/e2e/storage/utils/create.go +++ b/test/e2e/storage/utils/create.go @@ -18,6 +18,7 @@ package utils import ( "bytes" + "context" "encoding/json" "fmt" imageutils "k8s.io/kubernetes/test/utils/image" @@ -397,11 +398,11 @@ func (*serviceAccountFactory) Create(f *framework.Framework, i interface{}) (fun return nil, errorItemNotSupported } client := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.GetName()) - if _, err := client.Create(item); err != nil { + if _, err := client.Create(context.TODO(), item); err != nil { return nil, errors.Wrap(err, "create ServiceAccount") } return func() error { - return client.Delete(item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) }, nil } @@ -419,11 +420,11 @@ func (*clusterRoleFactory) Create(f *framework.Framework, i interface{}) (func() framework.Logf("Define cluster role %v", item.GetName()) client := f.ClientSet.RbacV1().ClusterRoles() - if _, err := client.Create(item); err != nil { + if _, err := client.Create(context.TODO(), item); err != nil { return nil, errors.Wrap(err, "create ClusterRole") } return func() error { - return client.Delete(item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) }, nil } @@ -440,11 +441,11 @@ func (*clusterRoleBindingFactory) Create(f *framework.Framework, i interface{}) } client := f.ClientSet.RbacV1().ClusterRoleBindings() - if _, err := client.Create(item); err != nil { + if _, err := client.Create(context.TODO(), item); err != nil { return nil, errors.Wrap(err, "create ClusterRoleBinding") } return func() error { - return client.Delete(item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) }, nil } @@ -461,11 +462,11 @@ func (*roleFactory) Create(f *framework.Framework, i interface{}) (func() error, } client := f.ClientSet.RbacV1().Roles(f.Namespace.GetName()) - if _, err := client.Create(item); err != nil { + if _, err := client.Create(context.TODO(), item); err != nil { return nil, errors.Wrap(err, "create Role") } return func() error { - return client.Delete(item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) }, nil } @@ -482,11 +483,11 @@ func (*roleBindingFactory) Create(f *framework.Framework, i interface{}) (func() } client := f.ClientSet.RbacV1().RoleBindings(f.Namespace.GetName()) - if _, err := client.Create(item); err != nil { + if _, err := client.Create(context.TODO(), item); err != nil { return nil, errors.Wrap(err, "create RoleBinding") } return func() error { - return client.Delete(item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) }, nil } @@ -503,11 +504,11 @@ func (*serviceFactory) Create(f *framework.Framework, i interface{}) (func() err } client := f.ClientSet.CoreV1().Services(f.Namespace.GetName()) - if _, err := client.Create(item); err != nil { + if _, err := client.Create(context.TODO(), item); err != nil { return nil, errors.Wrap(err, "create Service") } return func() error { - return client.Delete(item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) }, nil } @@ -524,11 +525,11 @@ func (*statefulSetFactory) Create(f *framework.Framework, i interface{}) (func() } client := f.ClientSet.AppsV1().StatefulSets(f.Namespace.GetName()) - if _, err := client.Create(item); err != nil { + if _, err := client.Create(context.TODO(), item); err != nil { return nil, errors.Wrap(err, "create StatefulSet") } return func() error { - return client.Delete(item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) }, nil } @@ -545,11 +546,11 @@ func (*daemonSetFactory) Create(f *framework.Framework, i interface{}) (func() e } client := f.ClientSet.AppsV1().DaemonSets(f.Namespace.GetName()) - if _, err := client.Create(item); err != nil { + if _, err := client.Create(context.TODO(), item); err != nil { return nil, errors.Wrap(err, "create DaemonSet") } return func() error { - return client.Delete(item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) }, nil } @@ -566,11 +567,11 @@ func (*storageClassFactory) Create(f *framework.Framework, i interface{}) (func( } client := f.ClientSet.StorageV1().StorageClasses() - if _, err := client.Create(item); err != nil { + if _, err := client.Create(context.TODO(), item); err != nil { return nil, errors.Wrap(err, "create StorageClass") } return func() error { - return client.Delete(item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) }, nil } @@ -587,11 +588,11 @@ func (*csiDriverFactory) Create(f *framework.Framework, i interface{}) (func() e } client := f.ClientSet.StorageV1beta1().CSIDrivers() - if _, err := client.Create(item); err != nil { + if _, err := client.Create(context.TODO(), item); err != nil { return nil, errors.Wrap(err, "create CSIDriver") } return func() error { - return client.Delete(item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) }, nil } @@ -608,11 +609,11 @@ func (*secretFactory) Create(f *framework.Framework, i interface{}) (func() erro } client := f.ClientSet.CoreV1().Secrets(f.Namespace.GetName()) - if _, err := client.Create(item); err != nil { + if _, err := client.Create(context.TODO(), item); err != nil { return nil, errors.Wrap(err, "create Secret") } return func() error { - return client.Delete(item.GetName(), &metav1.DeleteOptions{}) + return client.Delete(context.TODO(), item.GetName(), &metav1.DeleteOptions{}) }, nil } diff --git a/test/e2e/storage/utils/host_exec.go b/test/e2e/storage/utils/host_exec.go index 906be96be7c..ab338563d98 100644 --- a/test/e2e/storage/utils/host_exec.go +++ b/test/e2e/storage/utils/host_exec.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -97,7 +98,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod { return &privileged }(true), } - pod, err := cs.CoreV1().Pods(ns.Name).Create(hostExecPod) + pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(cs, pod) framework.ExpectNoError(err) diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index cda502200e8..a2e86d85d0c 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "crypto/sha256" "encoding/base64" "fmt" @@ -118,7 +119,7 @@ func isSudoPresent(nodeIP string, provider string) bool { // address. Returns an error if the node the pod is on doesn't have an // address. func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) { - node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{}) if err != nil { return "", err } @@ -298,9 +299,9 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) if forceDelete { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, metav1.NewDeleteOptions(0)) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.NewDeleteOptions(0)) } else { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, &metav1.DeleteOptions{}) } framework.ExpectNoError(err) @@ -384,9 +385,9 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) if forceDelete { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, metav1.NewDeleteOptions(0)) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.NewDeleteOptions(0)) } else { - err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, &metav1.DeleteOptions{}) } framework.ExpectNoError(err, "Failed to delete pod.") @@ -465,7 +466,7 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) { }, }, } - pod, err := c.CoreV1().Pods(ns).Create(pod) + pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err, "Failed to create pod: %v", err) defer func() { e2epod.DeletePodOrFail(c, ns, pod.Name) @@ -535,13 +536,13 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa }, }, } - provisionerPod, err := podClient.Create(provisionerPod) + provisionerPod, err := podClient.Create(context.TODO(), provisionerPod) framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod)) ginkgo.By("locating the provisioner pod") - pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(context.TODO(), provisionerPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err) return pod @@ -578,9 +579,9 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface, }, } - roleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{}) + roleBindingClient.Delete(context.TODO(), binding.GetName(), &metav1.DeleteOptions{}) err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) { - _, err := roleBindingClient.Get(binding.GetName(), metav1.GetOptions{}) + _, err := roleBindingClient.Get(context.TODO(), binding.GetName(), metav1.GetOptions{}) return apierrors.IsNotFound(err), nil }) framework.ExpectNoError(err, "Timed out waiting for RBAC binding %s deletion: %v", binding.GetName(), err) @@ -589,7 +590,7 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface, continue } - _, err = roleBindingClient.Create(binding) + _, err = roleBindingClient.Create(context.TODO(), binding) framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err) } diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 77187be8940..6d2bccd3a19 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "time" @@ -80,7 +81,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { }) ginkgo.AfterEach(func() { - newPvc, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + newPvc, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Failed to get pvc %s/%s: %v", pvc.Namespace, pvc.Name, err) } else { @@ -92,7 +93,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } if invalidSc != nil { - err := c.StorageV1().StorageClasses().Delete(invalidSc.Name, nil) + err := c.StorageV1().StorageClasses().Delete(context.TODO(), invalidSc.Name, nil) framework.ExpectNoError(err, "Error deleting storageclass %v: %v", invalidSc.Name, err) invalidSc = nil } @@ -106,7 +107,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } ginkgo.By("Getting plugin name") - defaultClass, err := c.StorageV1().StorageClasses().Get(defaultScName, metav1.GetOptions{}) + defaultClass, err := c.StorageV1().StorageClasses().Get(context.TODO(), defaultScName, metav1.GetOptions{}) framework.ExpectNoError(err, "Error getting default storageclass: %v", err) pluginName := defaultClass.Provisioner @@ -116,14 +117,14 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { storageOpMetrics := getControllerStorageMetrics(controllerMetrics, pluginName) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) claims := []*v1.PersistentVolumeClaim{pvc} pod := e2epod.MakePod(ns, nil, claims, false, "") - pod, err = c.CoreV1().Pods(ns).Create(pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(c, pod) @@ -152,7 +153,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } ginkgo.By("Geting default storageclass") - defaultClass, err := c.StorageV1().StorageClasses().Get(defaultScName, metav1.GetOptions{}) + defaultClass, err := c.StorageV1().StorageClasses().Get(context.TODO(), defaultScName, metav1.GetOptions{}) framework.ExpectNoError(err, "Error getting default storageclass: %v", err) pluginName := defaultClass.Provisioner @@ -171,11 +172,11 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { "invalidparam": "invalidvalue", }, } - _, err = c.StorageV1().StorageClasses().Create(invalidSc) + _, err = c.StorageV1().StorageClasses().Create(context.TODO(), invalidSc) framework.ExpectNoError(err, "Error creating new storageclass: %v", err) pvc.Spec.StorageClassName = &invalidSc.Name - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err, "failed to create PVC %s/%s", pvc.Namespace, pvc.Name) framework.ExpectNotEqual(pvc, nil) @@ -183,7 +184,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.By("Creating a pod and expecting it to fail") pod := e2epod.MakePod(ns, nil, claims, false, "") - pod, err = c.CoreV1().Pods(ns).Create(pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name) err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout) @@ -203,19 +204,19 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.It("should create volume metrics with the correct PVC ref", func() { var err error - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) claims := []*v1.PersistentVolumeClaim{pvc} pod := e2epod.MakePod(ns, nil, claims, false, "") - pod, err = c.CoreV1().Pods(ns).Create(pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(err, "Error starting pod ", pod.Name) - pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Verify volume stat metrics were collected for the referenced PVC @@ -260,19 +261,19 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func() { var err error - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) claims := []*v1.PersistentVolumeClaim{pvc} pod := e2epod.MakePod(ns, nil, claims, false, "") - pod, err = c.CoreV1().Pods(ns).Create(pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(err, "Error starting pod ", pod.Name) - pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) controllerMetrics, err := metricsGrabber.GrabFromControllerManager() @@ -291,19 +292,19 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.It("should create volume metrics in Volume Manager", func() { var err error - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) claims := []*v1.PersistentVolumeClaim{pvc} pod := e2epod.MakePod(ns, nil, claims, false, "") - pod, err = c.CoreV1().Pods(ns).Create(pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(err, "Error starting pod ", pod.Name) - pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) kubeMetrics, err := metricsGrabber.GrabFromKubelet(pod.Spec.NodeName) @@ -321,7 +322,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.It("should create metrics for total number of volumes in A/D Controller", func() { var err error - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc) framework.ExpectNoError(err) framework.ExpectNotEqual(pvc, nil) @@ -335,11 +336,11 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } // Create pod - pod, err = c.CoreV1().Pods(ns).Create(pod) + pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod) framework.ExpectNoError(err) err = e2epod.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(err, "Error starting pod ", pod.Name) - pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Get updated metrics diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index c3d02c471f0..cb180dcad44 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "net" "strings" @@ -358,7 +359,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ginkgo.By("Testing " + betaTest.Name + " with beta volume provisioning") class := newBetaStorageClass(*betaTest, "beta") // we need to create the class manually, testDynamicProvisioning does not accept beta class - class, err := c.StorageV1beta1().StorageClasses().Create(class) + class, err := c.StorageV1beta1().StorageClasses().Create(context.TODO(), class) framework.ExpectNoError(err) defer deleteStorageClass(c, class.Name) @@ -454,7 +455,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1Gi", } sc := newStorageClass(test, ns, suffix) - sc, err = c.StorageV1().StorageClasses().Create(sc) + sc, err = c.StorageV1().StorageClasses().Create(context.TODO(), sc) framework.ExpectNoError(err) defer deleteStorageClass(c, sc.Name) @@ -464,7 +465,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { StorageClassName: &sc.Name, VolumeMode: &test.VolumeMode, }, ns) - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc) framework.ExpectNoError(err) defer func() { framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) @@ -494,7 +495,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } class := newStorageClass(test, ns, "race") - class, err := c.StorageV1().StorageClasses().Create(class) + class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class) framework.ExpectNoError(err) defer deleteStorageClass(c, class.Name) @@ -566,7 +567,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, } } - pv, err = c.CoreV1().PersistentVolumes().Create(pv) + pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv) framework.ExpectNoError(err) ginkgo.By("waiting for the PV to get Released") @@ -578,10 +579,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.ExpectNoError(err) ginkgo.By("changing the PV reclaim policy") - pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) + pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete - pv, err = c.CoreV1().PersistentVolumes().Update(pv) + pv, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pv) framework.ExpectNoError(err) ginkgo.By("waiting for the PV to get deleted") @@ -605,7 +606,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.ExpectNoError(err) roleName := "leader-locking-nfs-provisioner" - _, err = f.ClientSet.RbacV1().Roles(ns).Create(&rbacv1.Role{ + _, err = f.ClientSet.RbacV1().Roles(ns).Create(context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: roleName, }, @@ -690,7 +691,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: test.ClaimSize, VolumeMode: &test.VolumeMode, }, ns) - claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim) + claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim) framework.ExpectNoError(err) defer func() { framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, claim.Name, ns)) @@ -700,7 +701,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) framework.ExpectError(err) framework.Logf(err.Error()) - claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) + claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending) }) @@ -727,7 +728,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: test.ClaimSize, VolumeMode: &test.VolumeMode, }, ns) - claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim) + claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim) framework.ExpectNoError(err) defer func() { framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, claim.Name, ns)) @@ -737,7 +738,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) framework.ExpectError(err) framework.Logf(err.Error()) - claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) + claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending) }) @@ -785,11 +786,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ginkgo.By("creating a StorageClass") suffix := fmt.Sprintf("invalid-aws") class := newStorageClass(test, ns, suffix) - class, err := c.StorageV1().StorageClasses().Create(class) + class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class) framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) - framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil)) + framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, nil)) }() ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner") @@ -798,11 +799,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { StorageClassName: &class.Name, VolumeMode: &test.VolumeMode, }, ns) - claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) + claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim) framework.ExpectNoError(err) defer func() { framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) - err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) + err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, nil) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) } @@ -813,7 +814,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // key was not provisioned. If the event is not delivered, we check that the volume is not Bound for whole // ClaimProvisionTimeout in the very same loop. err = wait.Poll(time.Second, framework.ClaimProvisionTimeout, func() (bool, error) { - events, err := c.CoreV1().Events(claim.Namespace).List(metav1.ListOptions{}) + events, err := c.CoreV1().Events(claim.Namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("could not list PVC events in %s: %v", claim.Namespace, err) } @@ -823,7 +824,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } } - pvc, err := c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + pvc, err := c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) if err != nil { return true, err } @@ -844,13 +845,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) { - sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{}) + sc, err := c.StorageV1().StorageClasses().Get(context.TODO(), scName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(storageutil.IsDefaultAnnotation(sc.ObjectMeta), expectedDefault) } func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr string) { - sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{}) + sc, err := c.StorageV1().StorageClasses().Get(context.TODO(), scName, metav1.GetOptions{}) framework.ExpectNoError(err) if defaultStr == "" { @@ -864,7 +865,7 @@ func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] = defaultStr } - _, err = c.StorageV1().StorageClasses().Update(sc) + _, err = c.StorageV1().StorageClasses().Update(context.TODO(), sc) framework.ExpectNoError(err) expectedDefault := false @@ -995,13 +996,13 @@ func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod { }, }, } - provisionerPod, err := podClient.Create(provisionerPod) + provisionerPod, err := podClient.Create(context.TODO(), provisionerPod) framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod)) ginkgo.By("locating the provisioner pod") - pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{}) + pod, err := podClient.Get(context.TODO(), provisionerPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err) return pod } @@ -1014,7 +1015,7 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]* err := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) { remainingPVs = []*v1.PersistentVolume{} - allPVs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{}) + allPVs, err := c.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) if err != nil { return true, err } @@ -1036,7 +1037,7 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]* // deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" func deleteStorageClass(c clientset.Interface, className string) { - err := c.StorageV1().StorageClasses().Delete(className, nil) + err := c.StorageV1().StorageClasses().Delete(context.TODO(), className, nil) if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index 783f7152f1e..11afb9a7365 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -19,6 +19,7 @@ limitations under the License. package storage import ( + "context" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -64,11 +65,11 @@ var _ = utils.SIGDescribe("Volumes", func() { "third": "this is the third file", }, } - if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(configMap); err != nil { + if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(context.TODO(), configMap); err != nil { framework.Failf("unable to create test configmap: %v", err) } defer func() { - _ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(configMap.Name, nil) + _ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(context.TODO(), configMap.Name, nil) }() // Test one ConfigMap mounted several times to test #28502 diff --git a/test/e2e/storage/vsphere/bootstrap.go b/test/e2e/storage/vsphere/bootstrap.go index c9705c1ef3f..04cbd425f98 100644 --- a/test/e2e/storage/vsphere/bootstrap.go +++ b/test/e2e/storage/vsphere/bootstrap.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" "sync" @@ -45,7 +46,7 @@ func bootstrapOnce() { framework.Failf("Failed to bootstrap vSphere with error: %v", err) } // 2. Get all nodes - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Failf("Failed to get nodes: %v", err) } diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index ec7621d1c85..15fbf56d294 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "time" "github.com/onsi/ginkgo" @@ -206,7 +207,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { */ ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() { ginkgo.By("Deleting the Namespace") - err := c.CoreV1().Namespaces().Delete(ns, nil) + err := c.CoreV1().Namespaces().Delete(context.TODO(), ns, nil) framework.ExpectNoError(err) err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute) diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index 6cce9adf5d6..e710989e629 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "strconv" "time" @@ -120,7 +121,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { pvc = nil // Verify PV is Present, after PVC is deleted and PV status should be Failed. - pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err) err = e2epv.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second) framework.ExpectNoError(err) @@ -186,12 +187,12 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { ginkgo.By("Creating the PV for same volume path") pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil) - pv, err = c.CoreV1().PersistentVolumes().Create(pv) + pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv) framework.ExpectNoError(err) ginkgo.By("creating the pvc") pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc) framework.ExpectNoError(err) ginkgo.By("wait for the pv and pvc to bind") @@ -212,13 +213,13 @@ func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *No } ginkgo.By("creating the pv") pv = getVSpherePersistentVolumeSpec(volumePath, persistentVolumeReclaimPolicy, nil) - pv, err = c.CoreV1().PersistentVolumes().Create(pv) + pv, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv) if err != nil { return } ginkgo.By("creating the pvc") pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) - pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) + pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc) return } @@ -246,7 +247,7 @@ func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolu ginkgo.By("delete pvc") framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) - _, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) + _, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index b02cfe98760..f6749c21eea 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "time" "github.com/onsi/ginkgo" @@ -115,21 +116,21 @@ func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ginkgo.By("creating the pv with label volume-type:ssd") pvSsd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels) - pvSsd, err = c.CoreV1().PersistentVolumes().Create(pvSsd) + pvSsd, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pvSsd) if err != nil { return } ginkgo.By("creating pvc with label selector to match with volume-type:vvol") pvcVvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels) - pvcVvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvcVvol) + pvcVvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvcVvol) if err != nil { return } ginkgo.By("creating pvc with label selector to match with volume-type:ssd") pvcSsd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels) - pvcSsd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvcSsd) + pvcSsd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvcSsd) return } diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index 05801286ee7..7ab49528007 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "fmt" "strconv" @@ -135,10 +136,10 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { case storageclass4: scParams[Datastore] = datastoreName } - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(scname, scParams, nil, "")) gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty") framework.ExpectNoError(err, "Failed to create storage class") - defer client.StorageV1().StorageClasses().Delete(scname, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, nil) scArrays[index] = sc } @@ -157,7 +158,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...) } } - podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + podList, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list pods") for _, pod := range podList.Items { pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...) diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index f91bb2519ba..3ddce9fe371 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "fmt" "github.com/onsi/ginkgo" @@ -75,9 +76,9 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { scParameters := make(map[string]string) scParameters["diskformat"] = "thin" scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil, "") - sc, err := client.StorageV1().StorageClasses().Create(scSpec) + sc, err := client.StorageV1().StorageClasses().Create(context.TODO(), scSpec) framework.ExpectNoError(err) - defer client.StorageV1().StorageClasses().Delete(sc.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, nil) ginkgo.By("Creating statefulset") @@ -93,7 +94,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { // Get the list of Volumes attached to Pods before scale down volumesBeforeScaleDown := make(map[string]string) for _, sspod := range ssPodsBeforeScaleDown.Items { - _, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) + _, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) for _, volumespec := range sspod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { @@ -111,7 +112,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { // After scale down, verify vsphere volumes are detached from deleted pods ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") for _, sspod := range ssPodsBeforeScaleDown.Items { - _, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) + _, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{}) if err != nil { framework.ExpectEqual(apierrors.IsNotFound(err), true) for _, volumespec := range sspod.Spec.Volumes { @@ -139,7 +140,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { for _, sspod := range ssPodsAfterScaleUp.Items { err := e2epod.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0) framework.ExpectNoError(err) - pod, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) + pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) for _, volumespec := range pod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index 91e4a58b7c8..c89358a2911 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "fmt" "sync" @@ -88,27 +89,27 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun var err error switch scname { case storageclass1: - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass1, nil, nil, "")) case storageclass2: var scVSanParameters map[string]string scVSanParameters = make(map[string]string) scVSanParameters[PolicyHostFailuresToTolerate] = "1" - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, "")) case storageclass3: var scSPBMPolicyParameters map[string]string scSPBMPolicyParameters = make(map[string]string) scSPBMPolicyParameters[SpbmStoragePolicy] = policyName - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, "")) case storageclass4: var scWithDSParameters map[string]string scWithDSParameters = make(map[string]string) scWithDSParameters[Datastore] = datastoreName scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "") - sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), scWithDatastoreSpec) } gomega.Expect(sc).NotTo(gomega.BeNil()) framework.ExpectNoError(err) - defer client.StorageV1().StorageClasses().Delete(scname, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), scname, nil) scArrays[index] = sc } @@ -152,7 +153,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I framework.ExpectNoError(err) // Get the copy of the Pod to know the assigned node name. - pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 1bcdea88732..e58d730d86f 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -421,9 +421,9 @@ func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, n // Get vSphere Volume Path from PVC func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string { - pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), claimName, metav1.GetOptions{}) framework.ExpectNoError(err) - pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), pvclaim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) return pv.Spec.VsphereVolume.VolumePath } diff --git a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index 07d18d8d434..3596e348f79 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "github.com/onsi/ginkgo" "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -88,13 +89,13 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil) ginkgo.By("Creating pod") - pod, err := client.CoreV1().Pods(namespace).Create(podspec) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec) framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be ready") gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) // get fresh pod info - pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index f60793724c8..6f328579819 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "fmt" "strings" "time" @@ -79,9 +80,9 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { ginkgo.By("Creating Storage Class With Invalid Datastore") - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, "")) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -92,7 +93,7 @@ func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) framework.ExpectError(err) - eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) + eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index 9fa7b70394e..ed88557348e 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -110,18 +110,18 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st ginkgo.By("Creating Storage Class With DiskFormat") storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil, "") - storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec) framework.ExpectNoError(err) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass) - pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec) + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvclaimSpec) framework.ExpectNoError(err) defer func() { - client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaimSpec.Name, nil) + client.CoreV1().PersistentVolumeClaims(namespace).Delete(context.TODO(), pvclaimSpec.Name, nil) }() ginkgo.By("Waiting for claim to be in bound phase") @@ -129,11 +129,11 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st framework.ExpectNoError(err) // Get new copy of the claim - pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{}) + pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(context.TODO(), pvclaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Get the bound PV - pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), pvclaim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) /* @@ -143,7 +143,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st ginkgo.By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done") - pod, err := client.CoreV1().Pods(namespace).Create(podSpec) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podSpec) framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be running") diff --git a/test/e2e/storage/vsphere/vsphere_volume_disksize.go b/test/e2e/storage/vsphere/vsphere_volume_disksize.go index ac4ecca1c21..62c5e1fa50e 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_disksize.go +++ b/test/e2e/storage/vsphere/vsphere_volume_disksize.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "time" "github.com/onsi/ginkgo" @@ -68,9 +69,9 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { expectedDiskSize := "1Mi" ginkgo.By("Creating Storage Class") - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, "")) framework.ExpectNoError(err) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass)) @@ -82,11 +83,11 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { framework.ExpectNoError(err) ginkgo.By("Getting new copy of PVC") - pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{}) + pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(context.TODO(), pvclaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Getting PV created") - pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), pvclaim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Verifying if provisioned PV has the correct size") diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index c893e925d88..10548df60c5 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "strings" "time" @@ -130,7 +131,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, execCommand) framework.ExpectError(err) - eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{}) + eventList, err := client.CoreV1().Events(namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) // Detach and delete volume @@ -150,12 +151,12 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa } func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, "")) framework.ExpectNoError(err) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") - pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) var pvclaims []*v1.PersistentVolumeClaim @@ -182,7 +183,7 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st // detachVolume delete the volume passed in the argument and wait until volume is detached from the node, func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) { - pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) gomega.Expect(err).To(gomega.BeNil()) nodeName := pod.Spec.NodeName ginkgo.By("Deleting pod") diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index 9d7c2f93a57..7763f37c65f 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "fmt" "strconv" "time" @@ -114,14 +115,14 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil) - pod, err := client.CoreV1().Pods(namespace).Create(podspec) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec) framework.ExpectNoError(err) defer e2epod.DeletePodWithWait(client, pod) ginkgo.By("Waiting for pod to be ready") gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) - pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pods = append(pods, pod) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 4cc27cac157..ece310c2003 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -80,9 +80,9 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", ginkgo.It("verify volume status after node power off", func() { ginkgo.By("Creating a Storage Class") storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil, "") - storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), storageClassSpec) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass) @@ -99,7 +99,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", ginkgo.By("Creating a Deployment") deployment, err := e2edeploy.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err)) - defer client.AppsV1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{}) + defer client.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, &metav1.DeleteOptions{}) ginkgo.By("Get pod from the deployment") podList, err := e2edeploy.GetPodsForDeployment(client, deployment) diff --git a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go index 178720f0cc3..3cdb6a3b075 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go +++ b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "fmt" "os" "strconv" @@ -81,7 +82,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { e2epv.DeletePersistentVolumeClaim(client, claim.Name, namespace) } ginkgo.By("Deleting StorageClass") - err = client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + err = client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) framework.ExpectNoError(err) }) @@ -90,7 +91,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { ginkgo.By("Creating Storage Class") scParameters := make(map[string]string) scParameters["diskformat"] = "thin" - storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters, nil, "")) + storageclass, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("thinsc", scParameters, nil, "")) framework.ExpectNoError(err) ginkgo.By("Creating PVCs using the Storage Class") diff --git a/test/e2e/storage/vsphere/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go index 328ac210fa9..cbd24b48cee 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "fmt" "time" @@ -95,7 +96,7 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { scList := getTestStorageClasses(client, policyName, datastoreName) defer func(scList []*storagev1.StorageClass) { for _, sc := range scList { - client.StorageV1().StorageClasses().Delete(sc.Name, nil) + client.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, nil) } }(scList) @@ -133,23 +134,23 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName var err error switch scname { case storageclass1: - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass1, nil, nil, "")) case storageclass2: var scVSanParameters map[string]string scVSanParameters = make(map[string]string) scVSanParameters[PolicyHostFailuresToTolerate] = "1" - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, "")) case storageclass3: var scSPBMPolicyParameters map[string]string scSPBMPolicyParameters = make(map[string]string) scSPBMPolicyParameters[SpbmStoragePolicy] = policyName - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, "")) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, "")) case storageclass4: var scWithDSParameters map[string]string scWithDSParameters = make(map[string]string) scWithDSParameters[Datastore] = datastoreName scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "") - sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) + sc, err = client.StorageV1().StorageClasses().Create(context.TODO(), scWithDatastoreSpec) } gomega.Expect(sc).NotTo(gomega.BeNil()) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index ba864eaa7d1..859dee9e8ea 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "fmt" "strconv" "time" @@ -361,7 +362,7 @@ func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace st ginkgo.By(fmt.Sprintf("Creating pod on the node: %v", nodeName)) podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil) - pod, err = client.CoreV1().Pods(namespace).Create(podspec) + pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), podspec) framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be ready") gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index 8aa72e00fbe..1c9759bd063 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "fmt" "strconv" "time" @@ -120,13 +121,13 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, node.name)) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil) - pod, err := client.CoreV1().Pods(namespace).Create(podspec) + pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), podspec) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for pod %d to be ready", i)) gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) - pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pods = append(pods, pod) diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index 32163510cf3..6ca8cd1932d 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "fmt" "hash/fnv" "time" @@ -258,9 +259,9 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) { ginkgo.By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, "")) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -290,9 +291,9 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { ginkgo.By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, "")) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -303,16 +304,16 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) framework.ExpectError(err) - eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) + eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) } func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) { ginkgo.By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, "")) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -324,7 +325,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN _, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute) framework.ExpectError(err) - updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{}) + updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), pvclaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID) e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index 2efc45ca2f1..8cb072add27 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + "context" "fmt" "strings" "time" @@ -375,9 +376,9 @@ var _ = utils.SIGDescribe("Zone Support", func() { }) func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode)) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -417,9 +418,9 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin } func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error { - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -431,7 +432,7 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.I ginkgo.By("Creating a pod") pod := e2epod.MakePod(namespace, nil, pvclaims, false, "") - pod, err = client.CoreV1().Pods(namespace).Create(pod) + pod, err = client.CoreV1().Pods(namespace).Create(context.TODO(), pod) framework.ExpectNoError(err) defer e2epod.DeletePodWithWait(client, pod) @@ -439,7 +440,7 @@ func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.I err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) framework.ExpectError(err) - eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) + eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) // Look for PVC ProvisioningFailed event and return the message. @@ -459,9 +460,9 @@ func waitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist } func verifyPodSchedulingFails(client clientset.Interface, namespace string, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode)) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -478,9 +479,9 @@ func verifyPodSchedulingFails(client clientset.Interface, namespace string, node } func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) error { - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode)) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -491,7 +492,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) framework.ExpectError(err) - eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) + eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) framework.Logf("Failure message : %+q", eventList.Items[0].Message) @@ -499,9 +500,9 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara } func verifyPVZoneLabels(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) { - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", nil, zones, "")) + storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, "")) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) - defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, nil) ginkgo.By("Creating PVC using the storage class") pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) diff --git a/test/e2e/upgrades/apparmor.go b/test/e2e/upgrades/apparmor.go index 199107c97d3..c1d80f3ace1 100644 --- a/test/e2e/upgrades/apparmor.go +++ b/test/e2e/upgrades/apparmor.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" @@ -86,7 +87,7 @@ func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) { ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod") - pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{}) + pod, err := f.PodClient().Get(context.TODO(), t.pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Should be able to get pod") framework.ExpectEqual(pod.Status.Phase, v1.PodRunning, "Pod should stay running") gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running") @@ -100,7 +101,7 @@ func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) { ginkgo.By("Verifying nodes are AppArmor enabled") - nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list nodes") for _, node := range nodes.Items { gomega.Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{ diff --git a/test/e2e/upgrades/apps/daemonsets.go b/test/e2e/upgrades/apps/daemonsets.go index 53dd1c0a4b0..18a76520a96 100644 --- a/test/e2e/upgrades/apps/daemonsets.go +++ b/test/e2e/upgrades/apps/daemonsets.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "github.com/onsi/ginkgo" appsv1 "k8s.io/api/apps/v1" @@ -79,7 +80,7 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a DaemonSet") var err error - if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil { + if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(context.TODO(), t.daemonSet); err != nil { framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err) } @@ -126,7 +127,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) } func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector map[string]string) (bool, error) { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -146,7 +147,7 @@ func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector m func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet map[string]string, nodeNames []string) (bool, error) { selector := labels.Set(labelSet).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := f.ClientSet.CoreV1().Pods(namespace).List(options) + podList, err := f.ClientSet.CoreV1().Pods(namespace).List(context.TODO(), options) if err != nil { return false, err } @@ -175,7 +176,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet ma } func checkDaemonStatus(f *framework.Framework, namespace string, dsName string) (bool, error) { - ds, err := f.ClientSet.AppsV1().DaemonSets(namespace).Get(dsName, metav1.GetOptions{}) + ds, err := f.ClientSet.AppsV1().DaemonSets(namespace).Get(context.TODO(), dsName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/upgrades/apps/deployments.go b/test/e2e/upgrades/apps/deployments.go index 16ba1499601..f76659fab2c 100644 --- a/test/e2e/upgrades/apps/deployments.go +++ b/test/e2e/upgrades/apps/deployments.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "fmt" "time" @@ -66,7 +67,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns)) d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType) - deployment, err := deploymentClient.Create(d) + deployment, err := deploymentClient.Create(context.TODO(), d) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName)) @@ -75,7 +76,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName)) rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) framework.ExpectNoError(err) - rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()}) + rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()}) framework.ExpectNoError(err) rss := rsList.Items if len(rss) != 1 { @@ -97,7 +98,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment)) ginkgo.By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName)) - rsList, err = rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()}) + rsList, err = rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()}) framework.ExpectNoError(err) rss = rsList.Items if len(rss) != 2 { @@ -131,7 +132,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{ deploymentClient := c.AppsV1().Deployments(ns) rsClient := c.AppsV1().ReplicaSets(ns) - deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{}) + deployment, err := deploymentClient.Get(context.TODO(), deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName)) @@ -140,7 +141,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{ ginkgo.By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName)) rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) framework.ExpectNoError(err) - rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()}) + rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()}) framework.ExpectNoError(err) rss := rsList.Items if len(rss) != 2 { @@ -181,7 +182,7 @@ func (t *DeploymentUpgradeTest) Teardown(f *framework.Framework) { // waitForDeploymentRevision waits for becoming the target revision of a delopyment. func waitForDeploymentRevision(c clientset.Interface, d *appsv1.Deployment, targetRevision string) error { err := wait.PollImmediate(poll, pollLongTimeout, func() (bool, error) { - deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(d.Namespace).Get(context.TODO(), d.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/upgrades/apps/job.go b/test/e2e/upgrades/apps/job.go index 8e2b7bc87f9..90a715621e8 100644 --- a/test/e2e/upgrades/apps/job.go +++ b/test/e2e/upgrades/apps/job.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "fmt" "strings" @@ -75,7 +76,7 @@ func (t *JobUpgradeTest) Teardown(f *framework.Framework) { func ensureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error { label := labels.SelectorFromSet(labels.Set(map[string]string{e2ejob.JobSelectorKey: jobName})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(options) + pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) if err != nil { return err } diff --git a/test/e2e/upgrades/apps/replicasets.go b/test/e2e/upgrades/apps/replicasets.go index e674bb2846e..bdc0e791a34 100644 --- a/test/e2e/upgrades/apps/replicasets.go +++ b/test/e2e/upgrades/apps/replicasets.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "fmt" "time" @@ -58,7 +59,7 @@ func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns)) replicaSet := newReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage) - rs, err := c.AppsV1().ReplicaSets(ns).Create(replicaSet) + rs, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), replicaSet) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName)) @@ -79,7 +80,7 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{ // Verify the RS is the same (survives) after the upgrade ginkgo.By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName)) - upgradedRS, err := rsClient.Get(rsName, metav1.GetOptions{}) + upgradedRS, err := rsClient.Get(context.TODO(), rsName, metav1.GetOptions{}) framework.ExpectNoError(err) if upgradedRS.UID != r.UID { framework.ExpectNoError(fmt.Errorf("expected same replicaset UID: %v got: %v", r.UID, upgradedRS.UID)) diff --git a/test/e2e/upgrades/apps/statefulset.go b/test/e2e/upgrades/apps/statefulset.go index 2df6ae1af39..a8bfd362955 100644 --- a/test/e2e/upgrades/apps/statefulset.go +++ b/test/e2e/upgrades/apps/statefulset.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "github.com/onsi/ginkgo" appsv1 "k8s.io/api/apps/v1" @@ -84,12 +85,12 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) { e2esset.PauseNewPods(t.set) ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) - _, err := f.ClientSet.CoreV1().Services(ns).Create(t.service) + _, err := f.ClientSet.CoreV1().Services(ns).Create(context.TODO(), t.service) framework.ExpectNoError(err) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(t.set.Spec.Replicas) = 3 - _, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(t.set) + _, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(context.TODO(), t.set) framework.ExpectNoError(err) ginkgo.By("Saturating stateful set " + t.set.Name) diff --git a/test/e2e/upgrades/cassandra.go b/test/e2e/upgrades/cassandra.go index 6d1c04a8942..1c987f5983c 100644 --- a/test/e2e/upgrades/cassandra.go +++ b/test/e2e/upgrades/cassandra.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -150,7 +151,7 @@ func (t *CassandraUpgradeTest) addUser(name string) error { // getServiceIP is a helper method to extract the Ingress IP from the service. func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { - svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), svcName, metav1.GetOptions{}) framework.ExpectNoError(err) ingress := svc.Status.LoadBalancer.Ingress if len(ingress) == 0 { diff --git a/test/e2e/upgrades/configmaps.go b/test/e2e/upgrades/configmaps.go index 7a9a4aa1514..112ffd945f1 100644 --- a/test/e2e/upgrades/configmaps.go +++ b/test/e2e/upgrades/configmaps.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "fmt" "k8s.io/api/core/v1" @@ -57,7 +58,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a ConfigMap") var err error - if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil { + if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), t.configMap); err != nil { framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err) } diff --git a/test/e2e/upgrades/etcd.go b/test/e2e/upgrades/etcd.go index f26cde0fdec..d5bbb8e4d79 100644 --- a/test/e2e/upgrades/etcd.go +++ b/test/e2e/upgrades/etcd.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -142,7 +143,7 @@ func (t *EtcdUpgradeTest) addUser(name string) error { } func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { - svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), svcName, metav1.GetOptions{}) framework.ExpectNoError(err) ingress := svc.Status.LoadBalancer.Ingress if len(ingress) == 0 { diff --git a/test/e2e/upgrades/kube_proxy_migration.go b/test/e2e/upgrades/kube_proxy_migration.go index 47e6fdefb09..b2d154eaa2e 100644 --- a/test/e2e/upgrades/kube_proxy_migration.go +++ b/test/e2e/upgrades/kube_proxy_migration.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "fmt" "time" @@ -230,11 +231,11 @@ func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error { func getKubeProxyStaticPods(c clientset.Interface) (*v1.PodList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{clusterComponentKey: kubeProxyLabelName})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - return c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts) + return c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts) } func getKubeProxyDaemonSet(c clientset.Interface) (*appsv1.DaemonSetList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{clusterAddonLabelKey: kubeProxyLabelName})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - return c.AppsV1().DaemonSets(metav1.NamespaceSystem).List(listOpts) + return c.AppsV1().DaemonSets(metav1.NamespaceSystem).List(context.TODO(), listOpts) } diff --git a/test/e2e/upgrades/mysql.go b/test/e2e/upgrades/mysql.go index 42ca334838f..0cece723ad1 100644 --- a/test/e2e/upgrades/mysql.go +++ b/test/e2e/upgrades/mysql.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -65,7 +66,7 @@ func mysqlKubectlCreate(ns, file string) { } func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string { - svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(ns).Get(context.TODO(), svcName, metav1.GetOptions{}) framework.ExpectNoError(err) ingress := svc.Status.LoadBalancer.Ingress if len(ingress) == 0 { diff --git a/test/e2e/upgrades/secrets.go b/test/e2e/upgrades/secrets.go index bd66763f170..2919b9463d1 100644 --- a/test/e2e/upgrades/secrets.go +++ b/test/e2e/upgrades/secrets.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "fmt" "k8s.io/api/core/v1" @@ -55,7 +56,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a secret") var err error - if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil { + if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), t.secret); err != nil { framework.Failf("unable to create test secret %s: %v", t.secret.Name, err) } diff --git a/test/e2e/upgrades/storage/volume_mode.go b/test/e2e/upgrades/storage/volume_mode.go index cb082888d6a..3a253492aa8 100644 --- a/test/e2e/upgrades/storage/volume_mode.go +++ b/test/e2e/upgrades/storage/volume_mode.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "time" @@ -87,10 +88,10 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) { err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{}) + t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(context.TODO(), t.pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - t.pv, err = cs.CoreV1().PersistentVolumes().Get(t.pvc.Spec.VolumeName, metav1.GetOptions{}) + t.pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), t.pvc.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Consuming the PVC before downgrade") @@ -120,7 +121,7 @@ func (t *VolumeModeDowngradeTest) Teardown(f *framework.Framework) { framework.ExpectNoError(e2epod.DeletePodWithWait(f.ClientSet, t.pod)) ginkgo.By("Deleting the PVC") - framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(t.pvc.Name, nil)) + framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(context.TODO(), t.pvc.Name, nil)) ginkgo.By("Waiting for the PV to be deleted") framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(f.ClientSet, t.pv.Name, 5*time.Second, 20*time.Minute)) diff --git a/test/e2e/upgrades/sysctl.go b/test/e2e/upgrades/sysctl.go index 3fb4c7180a4..55e0ee68994 100644 --- a/test/e2e/upgrades/sysctl.go +++ b/test/e2e/upgrades/sysctl.go @@ -17,6 +17,7 @@ limitations under the License. package upgrades import ( + "context" "fmt" "github.com/onsi/ginkgo" @@ -53,13 +54,13 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u switch upgrade { case MasterUpgrade, ClusterUpgrade: ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade") - pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(context.TODO(), t.validPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(pod.Status.Phase, v1.PodRunning) } ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade") - pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(context.TODO(), t.invalidPod.Name, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err) } diff --git a/test/e2e/windows/density.go b/test/e2e/windows/density.go index 9b96adf1b2a..d90e966b9a6 100644 --- a/test/e2e/windows/density.go +++ b/test/e2e/windows/density.go @@ -17,6 +17,7 @@ limitations under the License. package windows import ( + "context" "fmt" "sort" "sync" @@ -185,12 +186,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() - obj, err := f.ClientSet.CoreV1().Pods(ns).List(options) + obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() - return f.ClientSet.CoreV1().Pods(ns).Watch(options) + return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options) }, }, &v1.Pod{}, @@ -264,7 +265,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { defer ginkgo.GinkgoRecover() defer wg.Done() - err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30)) + err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) err = e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), diff --git a/test/e2e/windows/dns.go b/test/e2e/windows/dns.go index 66140956cff..3a68a1ef5fa 100644 --- a/test/e2e/windows/dns.go +++ b/test/e2e/windows/dns.go @@ -17,6 +17,7 @@ limitations under the License. package windows import ( + "context" "regexp" "strings" @@ -49,12 +50,12 @@ var _ = SIGDescribe("DNS", func() { Nameservers: []string{testInjectedIP}, Searches: []string{testSearchPath}, } - testUtilsPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod) + testUtilsPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), testUtilsPod) framework.ExpectNoError(err) framework.Logf("Created pod %v", testUtilsPod) defer func() { framework.Logf("Deleting pod %s...", testUtilsPod.Name) - if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err) } }() diff --git a/test/e2e/windows/gmsa_full.go b/test/e2e/windows/gmsa_full.go index f973825ea84..ed250af805f 100644 --- a/test/e2e/windows/gmsa_full.go +++ b/test/e2e/windows/gmsa_full.go @@ -39,6 +39,7 @@ limitations under the License. package windows import ( + "context" "fmt" "io/ioutil" "os" @@ -155,7 +156,7 @@ func findPreconfiguredGmsaNodes(c clientset.Interface) []v1.Node { nodeOpts := metav1.ListOptions{ LabelSelector: gmsaFullNodeLabel, } - nodes, err := c.CoreV1().Nodes().List(nodeOpts) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), nodeOpts) if err != nil { framework.Failf("Unable to list nodes: %v", err) } @@ -312,10 +313,10 @@ func createRBACRoleForGmsa(f *framework.Framework) (string, func(), error) { } cleanUpFunc := func() { - f.ClientSet.RbacV1().ClusterRoles().Delete(roleName, &metav1.DeleteOptions{}) + f.ClientSet.RbacV1().ClusterRoles().Delete(context.TODO(), roleName, &metav1.DeleteOptions{}) } - _, err := f.ClientSet.RbacV1().ClusterRoles().Create(role) + _, err := f.ClientSet.RbacV1().ClusterRoles().Create(context.TODO(), role) if err != nil { err = errors.Wrapf(err, "unable to create RBAC cluster role %q", roleName) } @@ -332,7 +333,7 @@ func createServiceAccount(f *framework.Framework) string { Namespace: f.Namespace.Name, }, } - if _, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(account); err != nil { + if _, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), account); err != nil { framework.Failf("unable to create service account %q: %v", accountName, err) } return accountName @@ -358,7 +359,7 @@ func bindRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rb Name: rbacRoleName, }, } - f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(binding) + f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding) } // createPodWithGmsa creates a pod using the test GMSA cred spec, and returns its name. diff --git a/test/e2e/windows/kubelet_stats.go b/test/e2e/windows/kubelet_stats.go index e44b9e56107..1fcf333b4f9 100644 --- a/test/e2e/windows/kubelet_stats.go +++ b/test/e2e/windows/kubelet_stats.go @@ -17,6 +17,7 @@ limitations under the License. package windows import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -96,7 +97,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() { // findWindowsNode finds a Windows node that is Ready and Schedulable func findWindowsNode(f *framework.Framework) (v1.Node, error) { selector := labels.Set{"kubernetes.io/os": "windows"}.AsSelector() - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return v1.Node{}, err diff --git a/test/e2e/windows/memory_limits.go b/test/e2e/windows/memory_limits.go index c982c41a2ae..46df390cc08 100644 --- a/test/e2e/windows/memory_limits.go +++ b/test/e2e/windows/memory_limits.go @@ -17,6 +17,7 @@ limitations under the License. package windows import ( + "context" "crypto/tls" "encoding/json" "fmt" @@ -122,7 +123,7 @@ func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int) f.PodClient().Create(failurePods[0]) gomega.Eventually(func() bool { - eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{}) + eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, e := range eventList.Items { // Look for an event that shows FailedScheduling @@ -181,7 +182,7 @@ func newMemLimitTestPods(numPods int, imageName, podType string, memoryLimit str // getNodeMemory populates a nodeMemory struct with information from the first func getNodeMemory(f *framework.Framework) nodeMemory { selector := labels.Set{"beta.kubernetes.io/os": "windows"}.AsSelector() - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{ + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ LabelSelector: selector.String(), }) framework.ExpectNoError(err) @@ -232,7 +233,7 @@ func getNodeMemory(f *framework.Framework) nodeMemory { // getTotalAllocatableMemory gets the sum of all agent node's allocatable memory func getTotalAllocatableMemory(f *framework.Framework) *resource.Quantity { selector := labels.Set{"beta.kubernetes.io/os": "windows"}.AsSelector() - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{ + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ LabelSelector: selector.String(), }) framework.ExpectNoError(err) diff --git a/test/e2e/windows/security_context.go b/test/e2e/windows/security_context.go index 384b579bf67..ca9166a5e81 100644 --- a/test/e2e/windows/security_context.go +++ b/test/e2e/windows/security_context.go @@ -17,6 +17,7 @@ limitations under the License. package windows import ( + "context" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -48,7 +49,7 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext RunAsUserName", func() { framework.Logf("Waiting for pod %s to enter the error state.", podInvalid.Name) framework.ExpectNoError(f.WaitForPodTerminated(podInvalid.Name, "")) - podInvalid, _ = f.PodClient().Get(podInvalid.Name, metav1.GetOptions{}) + podInvalid, _ = f.PodClient().Get(context.TODO(), podInvalid.Name, metav1.GetOptions{}) podTerminatedReason := testutils.TerminatedContainers(podInvalid)[runAsUserNameContainerName] if podTerminatedReason != "ContainerCannotRun" && podTerminatedReason != "StartError" { framework.Failf("The container terminated reason was supposed to be: 'ContainerCannotRun' or 'StartError', not: '%q'", podTerminatedReason) diff --git a/test/e2e_kubeadm/bootstrap_token_test.go b/test/e2e_kubeadm/bootstrap_token_test.go index acd7775cc06..8e4d4aaf12a 100644 --- a/test/e2e_kubeadm/bootstrap_token_test.go +++ b/test/e2e_kubeadm/bootstrap_token_test.go @@ -17,6 +17,7 @@ limitations under the License. package kubeadm import ( + "context" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -50,7 +51,7 @@ var _ = Describe("bootstrap token", func() { ginkgo.It("should exist and be properly configured", func() { secrets, err := f.ClientSet.CoreV1(). Secrets(kubeSystemNamespace). - List(metav1.ListOptions{}) + List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "error reading Secrets") tokenNum := 0 diff --git a/test/e2e_kubeadm/controlplane_nodes_test.go b/test/e2e_kubeadm/controlplane_nodes_test.go index f525422717e..7edb9b04523 100644 --- a/test/e2e_kubeadm/controlplane_nodes_test.go +++ b/test/e2e_kubeadm/controlplane_nodes_test.go @@ -17,6 +17,7 @@ limitations under the License. package kubeadm import ( + "context" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -61,7 +62,7 @@ var _ = Describe("control-plane node", func() { func getControlPlaneNodes(c clientset.Interface) *corev1.NodeList { selector := labels.Set{controlPlaneTaint: ""}.AsSelector() masters, err := c.CoreV1().Nodes(). - List(metav1.ListOptions{LabelSelector: selector.String()}) + List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) framework.ExpectNoError(err, "error reading control-plane nodes") return masters diff --git a/test/e2e_kubeadm/networking_test.go b/test/e2e_kubeadm/networking_test.go index 956a3c286c6..97bd66e19a5 100644 --- a/test/e2e_kubeadm/networking_test.go +++ b/test/e2e_kubeadm/networking_test.go @@ -17,6 +17,7 @@ limitations under the License. package kubeadm import ( + "context" "net" "strings" @@ -86,7 +87,7 @@ var _ = Describe("networking [setup-networking]", func() { netCC := cc["networking"].(map[interface{}]interface{}) if ps, ok := netCC["podSubnet"]; ok { // Check that the pod CIDR allocated to the node(s) is within the kubeadm-config podCIDR. - nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "error listing nodes") for _, node := range nodes.Items { if !subnetWithinSubnet(ps.(string), node.Spec.PodCIDR) { @@ -111,7 +112,7 @@ var _ = Describe("networking [setup-networking]", func() { if ss, ok := netCC["serviceSubnet"]; ok { // Get the kubernetes service in the default namespace. // Check that service CIDR allocated is within the serviceSubnet range. - svc, err := f.ClientSet.CoreV1().Services("default").Get("kubernetes", metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{}) framework.ExpectNoError(err, "error getting Service %q from namespace %q", "kubernetes", "default") if !ipWithinSubnet(ss.(string), svc.Spec.ClusterIP) { framework.Failf("failed due to service(%v) cluster-IP %v not inside configured service subnet: %s", svc.Name, svc.Spec.ClusterIP, ss) @@ -134,7 +135,7 @@ var _ = Describe("networking [setup-networking]", func() { if _, ok := cc["networking"]; ok { netCC := cc["networking"].(map[interface{}]interface{}) if ps, ok := netCC["podSubnet"]; ok { - nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "error listing nodes") // Check that the pod CIDRs allocated to the node(s) are within the kubeadm-config podCIDR. var found bool diff --git a/test/e2e_kubeadm/nodes_test.go b/test/e2e_kubeadm/nodes_test.go index 4a252e94762..f3937f5a52a 100644 --- a/test/e2e_kubeadm/nodes_test.go +++ b/test/e2e_kubeadm/nodes_test.go @@ -17,6 +17,7 @@ limitations under the License. package kubeadm import ( + "context" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" @@ -45,7 +46,7 @@ var _ = Describe("nodes", func() { ginkgo.It("should have CRI annotation", func() { nodes, err := f.ClientSet.CoreV1().Nodes(). - List(metav1.ListOptions{}) + List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "error reading nodes") // checks that the nodes have the CRI annotation diff --git a/test/e2e_kubeadm/util.go b/test/e2e_kubeadm/util.go index 6a9fd3ba190..3facc5f65f6 100644 --- a/test/e2e_kubeadm/util.go +++ b/test/e2e_kubeadm/util.go @@ -17,6 +17,7 @@ limitations under the License. package kubeadm import ( + "context" appsv1 "k8s.io/api/apps/v1" authv1 "k8s.io/api/authorization/v1" corev1 "k8s.io/api/core/v1" @@ -35,7 +36,7 @@ import ( func ExpectServiceAccount(c clientset.Interface, namespace, name string) { _, err := c.CoreV1(). ServiceAccounts(namespace). - Get(name, metav1.GetOptions{}) + Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting ServiceAccount %q from namespace %q", name, namespace) } @@ -45,7 +46,7 @@ func ExpectServiceAccount(c clientset.Interface, namespace, name string) { func GetSecret(c clientset.Interface, namespace, name string) *corev1.Secret { r, err := c.CoreV1(). Secrets(namespace). - Get(name, metav1.GetOptions{}) + Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting Secret %q from namespace %q", name, namespace) return r } @@ -56,7 +57,7 @@ func GetSecret(c clientset.Interface, namespace, name string) *corev1.Secret { func GetConfigMap(c clientset.Interface, namespace, name string) *corev1.ConfigMap { r, err := c.CoreV1(). ConfigMaps(namespace). - Get(name, metav1.GetOptions{}) + Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting ConfigMap %q from namespace %q", name, namespace) return r } @@ -67,7 +68,7 @@ func GetConfigMap(c clientset.Interface, namespace, name string) *corev1.ConfigM func ExpectService(c clientset.Interface, namespace, name string) { _, err := c.CoreV1(). Services(namespace). - Get(name, metav1.GetOptions{}) + Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting Service %q from namespace %q", name, namespace) } @@ -77,7 +78,7 @@ func ExpectService(c clientset.Interface, namespace, name string) { func GetDeployment(c clientset.Interface, namespace, name string) *appsv1.Deployment { r, err := c.AppsV1(). Deployments(namespace). - Get(name, metav1.GetOptions{}) + Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting Deployment %q from namespace %q", name, namespace) return r } @@ -88,7 +89,7 @@ func GetDeployment(c clientset.Interface, namespace, name string) *appsv1.Deploy func GetDaemonSet(c clientset.Interface, namespace, name string) *appsv1.DaemonSet { r, err := c.AppsV1(). DaemonSets(namespace). - Get(name, metav1.GetOptions{}) + Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting DaemonSet %q from namespace %q", name, namespace) return r } @@ -99,7 +100,7 @@ func GetDaemonSet(c clientset.Interface, namespace, name string) *appsv1.DaemonS func ExpectRole(c clientset.Interface, namespace, name string) { _, err := c.RbacV1(). Roles(namespace). - Get(name, metav1.GetOptions{}) + Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting Role %q from namespace %q", name, namespace) } @@ -107,7 +108,7 @@ func ExpectRole(c clientset.Interface, namespace, name string) { func ExpectRoleBinding(c clientset.Interface, namespace, name string) { _, err := c.RbacV1(). RoleBindings(namespace). - Get(name, metav1.GetOptions{}) + Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting RoleBinding %q from namespace %q", name, namespace) } @@ -115,7 +116,7 @@ func ExpectRoleBinding(c clientset.Interface, namespace, name string) { func ExpectClusterRole(c clientset.Interface, name string) { _, err := c.RbacV1(). ClusterRoles(). - Get(name, metav1.GetOptions{}) + Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting ClusterRole %q", name) } @@ -123,7 +124,7 @@ func ExpectClusterRole(c clientset.Interface, name string) { func ExpectClusterRoleBinding(c clientset.Interface, name string) { _, err := c.RbacV1(). ClusterRoleBindings(). - Get(name, metav1.GetOptions{}) + Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting ClusterRoleBindings %q", name) } @@ -131,7 +132,7 @@ func ExpectClusterRoleBinding(c clientset.Interface, name string) { func ExpectClusterRoleBindingWithSubjectAndRole(c clientset.Interface, name, subjectKind, subject, role string) { binding, err := c.RbacV1(). ClusterRoleBindings(). - Get(name, metav1.GetOptions{}) + Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting ClusterRoleBindings %q", name) gomega.Expect(binding.Subjects).To( gomega.ContainElement(subjectMatcher( @@ -170,7 +171,7 @@ func ExpectSubjectHasAccessToResource(c clientset.Interface, subjectKind, subjec framework.Failf("invalid subjectKind %s", subjectKind) } - s, err := c.AuthorizationV1().SubjectAccessReviews().Create(sar) + s, err := c.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar) framework.ExpectNoError(err, "error getting SubjectAccessReview for %s %s to resource %+v", subjectKind, subject, *sar.Spec.ResourceAttributes) gomega.Expect(s.Status.Allowed).Should(gomega.BeTrue(), "%s %s has no access to resource %+v", subjectKind, subject, *sar.Spec.ResourceAttributes) diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 9cce7f9c2ac..6813fd186cf 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -152,7 +152,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1. f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) } else { // Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor". - w, err := f.PodClient().Watch(metav1.SingleObject(metav1.ObjectMeta{Name: pod.Name})) + w, err := f.PodClient().Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: pod.Name})) framework.ExpectNoError(err) ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) defer cancel() @@ -171,7 +171,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1. }) framework.ExpectNoError(err) } - p, err := f.PodClient().Get(pod.Name, metav1.GetOptions{}) + p, err := f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return p.Status } diff --git a/test/e2e_node/benchmark_util.go b/test/e2e_node/benchmark_util.go index ce3cf4ce849..065a546bab2 100644 --- a/test/e2e_node/benchmark_util.go +++ b/test/e2e_node/benchmark_util.go @@ -19,6 +19,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "io/ioutil" "path" @@ -156,7 +157,7 @@ func getThroughputPerfData(batchLag time.Duration, e2eLags []e2emetrics.PodLaten // name of the node, and the node capacities. func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[string]string { nodeName := framework.TestContext.NodeName - node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) cpu, ok := node.Status.Capacity[v1.ResourceCPU] diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index 051d852cd78..7f3500c8f57 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -86,7 +87,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C f.PodClientNS(kubeapi.NamespaceSystem).CreateSync(criticalPod) // Check that non-critical pods other than the besteffort have been evicted - updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, p := range updatedPodList.Items { if p.Name == nonCriticalBestEffort.Name { @@ -111,7 +112,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C }) func getNodeCPUAndMemoryCapacity(f *framework.Framework) v1.ResourceList { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) // Assuming that there is only one node, because this is a node e2e test. framework.ExpectEqual(len(nodeList.Items), 1) @@ -123,7 +124,7 @@ func getNodeCPUAndMemoryCapacity(f *framework.Framework) v1.ResourceList { } func getNodeName(f *framework.Framework) string { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) // Assuming that there is only one node, because this is a node e2e test. framework.ExpectEqual(len(nodeList.Items), 1) diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index a0875e5f68e..c0cb426a01b 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -19,6 +19,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "math" "sort" @@ -493,12 +494,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() - obj, err := f.ClientSet.CoreV1().Pods(ns).List(options) + obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() - return f.ClientSet.CoreV1().Pods(ns).Watch(options) + return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options) }, }, &v1.Pod{}, diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 41e23a8dcd6..9ad110c75df 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "path/filepath" "time" @@ -124,7 +125,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { framework.Logf("env %v", dp.Spec.Containers[0].Env) dp.Spec.NodeName = framework.TestContext.NodeName ginkgo.By("Create sample device plugin pod") - devicePluginPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp) + devicePluginPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp) framework.ExpectNoError(err) ginkgo.By("Waiting for devices to become available on the local node") @@ -138,7 +139,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { // and then use the same here devsLen := int64(2) gomega.Eventually(func() bool { - node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) return numberOfDevicesCapacity(node, resourceName) == devsLen && numberOfDevicesAllocatable(node, resourceName) == devsLen @@ -171,7 +172,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { framework.ExpectEqual(resourcesForOurPod.Containers[0].Devices[0].ResourceName, resourceName) framework.ExpectEqual(len(resourcesForOurPod.Containers[0].Devices[0].DeviceIds), 1) - pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{}) + pod1, err = f.PodClient().Get(context.TODO(), pod1.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ensurePodContainerRestart(f, pod1.Name, pod1.Name) @@ -188,7 +189,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { // Otherwise, Kubelet DeviceManager may remove the re-registered sockets after it starts. ginkgo.By("Wait for node is ready") gomega.Eventually(func() bool { - node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) for _, cond := range node.Status.Conditions { if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) { @@ -204,14 +205,14 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions) + err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions) framework.ExpectNoError(err) waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) - _, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Get(dp.Name, getOptions) + _, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), dp.Name, getOptions) framework.Logf("Trying to get dp pod after deletion. err must be non-nil. err: %v", err) framework.ExpectError(err) - devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp) + devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp) framework.ExpectNoError(err) ensurePodContainerRestart(f, pod1.Name, pod1.Name) @@ -221,7 +222,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.By("Waiting for resource to become available on the local node after re-registration") gomega.Eventually(func() bool { - node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) return numberOfDevicesCapacity(node, resourceName) == devsLen && numberOfDevicesAllocatable(node, resourceName) == devsLen @@ -236,13 +237,13 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2))) ginkgo.By("By deleting the pods and waiting for container removal") - err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions) + err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions) framework.ExpectNoError(err) waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) ginkgo.By("Waiting for stub device plugin to become unhealthy on the local node") gomega.Eventually(func() int64 { - node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) return numberOfDevicesAllocatable(node, resourceName) }, 30*time.Second, framework.Poll).Should(gomega.Equal(int64(0))) @@ -257,24 +258,24 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { framework.ExpectEqual(devIDRestart2, devID2) ginkgo.By("Re-register resources") - devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp) + devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp) framework.ExpectNoError(err) ginkgo.By("Waiting for the resource exported by the stub device plugin to become healthy on the local node") gomega.Eventually(func() int64 { - node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) return numberOfDevicesAllocatable(node, resourceName) }, 30*time.Second, framework.Poll).Should(gomega.Equal(devsLen)) ginkgo.By("by deleting the pods and waiting for container removal") - err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions) + err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions) framework.ExpectNoError(err) waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) ginkgo.By("Waiting for stub device plugin to become unavailable on the local node") gomega.Eventually(func() bool { - node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) return numberOfDevicesCapacity(node, resourceName) <= 0 }, 10*time.Minute, framework.Poll).Should(gomega.BeTrue()) @@ -314,13 +315,13 @@ func makeBusyboxPod(resourceName, cmd string) *v1.Pod { func ensurePodContainerRestart(f *framework.Framework, podName string, contName string) { var initialCount int32 var currentCount int32 - p, err := f.PodClient().Get(podName, metav1.GetOptions{}) + p, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil || len(p.Status.ContainerStatuses) < 1 { framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err) } initialCount = p.Status.ContainerStatuses[0].RestartCount gomega.Eventually(func() bool { - p, err = f.PodClient().Get(podName, metav1.GetOptions{}) + p, err = f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil || len(p.Status.ContainerStatuses) < 1 { return false } diff --git a/test/e2e_node/docker_test.go b/test/e2e_node/docker_test.go index a2e7e7589fe..5fdef6fbefb 100644 --- a/test/e2e_node/docker_test.go +++ b/test/e2e_node/docker_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "strings" "time" @@ -122,7 +123,7 @@ func isContainerRunning(podIP string) bool { // getContainerStartTime returns the start time of the container with the // containerName of the pod having the podName. func getContainerStartTime(f *framework.Framework, podName, containerName string) (time.Time, error) { - pod, err := f.PodClient().Get(podName, metav1.GetOptions{}) + pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return time.Time{}, fmt.Errorf("failed to get pod %q: %v", podName, err) } diff --git a/test/e2e_node/dynamic_kubelet_config_test.go b/test/e2e_node/dynamic_kubelet_config_test.go index 5fee470812b..4edf5053887 100644 --- a/test/e2e_node/dynamic_kubelet_config_test.go +++ b/test/e2e_node/dynamic_kubelet_config_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "reflect" "strings" @@ -89,13 +90,13 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam } // record before state so we can restore it after the test if beforeNode == nil { - node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) beforeNode = node } if source := beforeNode.Spec.ConfigSource; source != nil { if source.ConfigMap != nil { - cm, err := f.ClientSet.CoreV1().ConfigMaps(source.ConfigMap.Namespace).Get(source.ConfigMap.Name, metav1.GetOptions{}) + cm, err := f.ClientSet.CoreV1().ConfigMaps(source.ConfigMap.Namespace).Get(context.TODO(), source.ConfigMap.Name, metav1.GetOptions{}) framework.ExpectNoError(err) beforeConfigMap = cm } @@ -141,7 +142,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "correct" configmap off of the configuration from before the test correctKC := beforeKC.DeepCopy() correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-correct", correctKC) - correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(correctConfigMap) + correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap) framework.ExpectNoError(err) // fail to parse, we insert some bogus stuff into the configMap @@ -151,14 +152,14 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam "kubelet": "{0xdeadbeef}", }, } - failParseConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failParseConfigMap) + failParseConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), failParseConfigMap) framework.ExpectNoError(err) // fail to validate, we make a copy of correct and set an invalid KubeAPIQPS on kc before serializing invalidKC := correctKC.DeepCopy() invalidKC.KubeAPIQPS = -1 failValidateConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-fail-validate", invalidKC) - failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failValidateConfigMap) + failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), failValidateConfigMap) framework.ExpectNoError(err) correctSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -306,7 +307,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "lkg" configmap off of the configuration from before the test lkgKC := beforeKC.DeepCopy() lkgConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-intended-lkg", lkgKC) - lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap) + lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap) framework.ExpectNoError(err) // bad config map, we insert some bogus stuff into the configMap @@ -316,7 +317,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam "kubelet": "{0xdeadbeef}", }, } - badConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(badConfigMap) + badConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), badConfigMap) framework.ExpectNoError(err) lkgSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -372,7 +373,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam lkgKC := beforeKC.DeepCopy() combinedConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-combined", lkgKC) combinedConfigMap.Data[badConfigKey] = "{0xdeadbeef}" - combinedConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(combinedConfigMap) + combinedConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), combinedConfigMap) framework.ExpectNoError(err) lkgSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -425,7 +426,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "lkg" configmap off of the configuration from before the test lkgKC := beforeKC.DeepCopy() lkgConfigMap1 := newKubeletConfigMap("dynamic-kubelet-config-test-lkg-1", lkgKC) - lkgConfigMap1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap1) + lkgConfigMap1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap1) framework.ExpectNoError(err) lkgSource1 := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -438,7 +439,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam lkgStatus1.ConfigMap.ResourceVersion = lkgConfigMap1.ResourceVersion lkgConfigMap2 := newKubeletConfigMap("dynamic-kubelet-config-test-lkg-2", lkgKC) - lkgConfigMap2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap2) + lkgConfigMap2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap2) framework.ExpectNoError(err) lkgSource2 := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -490,14 +491,14 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we just create two configmaps with the same config but different names and toggle between them kc1 := beforeKC.DeepCopy() cm1 := newKubeletConfigMap("dynamic-kubelet-config-test-cm1", kc1) - cm1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm1) + cm1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), cm1) framework.ExpectNoError(err) // slightly change the config kc2 := kc1.DeepCopy() kc2.EventRecordQPS = kc1.EventRecordQPS + 1 cm2 := newKubeletConfigMap("dynamic-kubelet-config-test-cm2", kc2) - cm2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm2) + cm2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), cm2) framework.ExpectNoError(err) cm1Source := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{ @@ -546,7 +547,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "correct" configmap off of the configuration from before the test correctKC := beforeKC.DeepCopy() correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-in-place", correctKC) - correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(correctConfigMap) + correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap) framework.ExpectNoError(err) // we reuse the same name, namespace @@ -626,7 +627,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "lkg" configmap off of the configuration from before the test lkgKC := beforeKC.DeepCopy() lkgConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-in-place-lkg", lkgKC) - lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap) + lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), lkgConfigMap) framework.ExpectNoError(err) // bad config map, we insert some bogus stuff into the configMap @@ -705,7 +706,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "correct" configmap off of the configuration from before the test correctKC := beforeKC.DeepCopy() correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-delete-createe", correctKC) - correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(correctConfigMap) + correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap) framework.ExpectNoError(err) // we reuse the same name, namespace @@ -785,7 +786,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam // we base the "correct" configmap off of the configuration from before the test correctKC := beforeKC.DeepCopy() correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-delete-createe", correctKC) - correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(correctConfigMap) + correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), correctConfigMap) framework.ExpectNoError(err) // ensure node config source is set to the config map we will mutate in-place, @@ -903,7 +904,7 @@ func updateConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error { // (with respect to concurrency control) when you omit ResourceVersion. // We know that we won't perform concurrent updates during this test. tc.configMap.ResourceVersion = "" - cm, err := f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Update(tc.configMap) + cm, err := f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Update(context.TODO(), tc.configMap) if err != nil { return err } @@ -928,14 +929,14 @@ func recreateConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error // deleteConfigMapFunc simply deletes tc.configMap func deleteConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error { - return f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Delete(tc.configMap.Name, &metav1.DeleteOptions{}) + return f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Delete(context.TODO(), tc.configMap.Name, &metav1.DeleteOptions{}) } // createConfigMapFunc creates tc.configMap and updates the UID and ResourceVersion on tc.configMap // to match the created configMap func createConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error { tc.configMap.ResourceVersion = "" - cm, err := f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(tc.configMap) + cm, err := f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(context.TODO(), tc.configMap) if err != nil { return err } @@ -953,7 +954,7 @@ func (tc *nodeConfigTestCase) checkNodeConfigSource(f *framework.Framework) { interval = time.Second ) gomega.Eventually(func() error { - node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("checkNodeConfigSource: case %s: %v", tc.desc, err) } @@ -973,7 +974,7 @@ func (tc *nodeConfigTestCase) checkConfigStatus(f *framework.Framework) { ) errFmt := fmt.Sprintf("checkConfigStatus: case %s:", tc.desc) + " %v" gomega.Eventually(func() error { - node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { return fmt.Errorf(errFmt, err) } @@ -1047,7 +1048,7 @@ func (tc *nodeConfigTestCase) checkEvent(f *framework.Framework) { interval = time.Second ) gomega.Eventually(func() error { - events, err := f.ClientSet.CoreV1().Events("").List(metav1.ListOptions{}) + events, err := f.ClientSet.CoreV1().Events("").List(context.TODO(), metav1.ListOptions{}) if err != nil { return fmt.Errorf("checkEvent: case %s: %v", tc.desc, err) } diff --git a/test/e2e_node/e2e_node_suite_test.go b/test/e2e_node/e2e_node_suite_test.go index 3a060fbe5cc..33b381b0f3b 100644 --- a/test/e2e_node/e2e_node_suite_test.go +++ b/test/e2e_node/e2e_node_suite_test.go @@ -22,6 +22,7 @@ package e2enode import ( "bytes" + "context" "encoding/json" "flag" "fmt" @@ -302,7 +303,7 @@ func updateTestContext() error { // getNode gets node object from the apiserver. func getNode(c *clientset.Clientset) (*v1.Node, error) { - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err, "should be able to list nodes.") if nodes == nil { return nil, fmt.Errorf("the node list is nil") diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 66f70a19b66..3a29d8e4b8c 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "path/filepath" "strconv" @@ -302,11 +303,11 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ initialConfig.EvictionMinimumReclaim = map[string]string{} }) ginkgo.BeforeEach(func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) + err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{}) framework.ExpectNoError(err) }) specs := []podEvictSpec{ @@ -359,11 +360,11 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser initialConfig.EvictionMinimumReclaim = map[string]string{} }) ginkgo.BeforeEach(func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) + err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{}) framework.ExpectNoError(err) }) specs := []podEvictSpec{ @@ -412,11 +413,11 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis initialConfig.EvictionMinimumReclaim = map[string]string{} }) ginkgo.BeforeEach(func() { - _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) + _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true) }) ginkgo.AfterEach(func() { - err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) + err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), highPriorityClassName, &metav1.DeleteOptions{}) framework.ExpectNoError(err) }) specs := []podEvictSpec{ @@ -582,7 +583,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe // This function panics (via Expect) if eviction ordering is violated, or if a priority-zero pod fails. func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) error { // Gather current information - updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -654,7 +655,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe "involvedObject.namespace": f.Namespace.Name, "reason": eviction.Reason, }.AsSelector().String() - podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{FieldSelector: selector}) + podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{FieldSelector: selector}) gomega.Expect(err).To(gomega.BeNil(), "Unexpected error getting events during eviction test: %v", err) framework.ExpectEqual(len(podEvictEvents.Items), 1, "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items)) event := podEvictEvents.Items[0] diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index 983651f792b..b9833500a5d 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "strconv" "time" @@ -313,7 +314,7 @@ func getRestartingContainerCommand(path string, containerNum int, restarts int32 } func verifyPodRestartCount(f *framework.Framework, podName string, expectedNumContainers int, expectedRestartCount int32) error { - updatedPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podName, metav1.GetOptions{}) + updatedPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/e2e_node/gpu_device_plugin_test.go b/test/e2e_node/gpu_device_plugin_test.go index 2edb366fa82..f5df2d2dc3d 100644 --- a/test/e2e_node/gpu_device_plugin_test.go +++ b/test/e2e_node/gpu_device_plugin_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "os/exec" "strconv" "time" @@ -76,7 +77,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi } ginkgo.By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE") - devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(NVIDIADevicePlugin()) + devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), NVIDIADevicePlugin()) framework.ExpectNoError(err) ginkgo.By("Waiting for GPUs to become available on the local node") @@ -90,7 +91,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi }) ginkgo.AfterEach(func() { - l, err := f.PodClient().List(metav1.ListOptions{}) + l, err := f.PodClient().List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) for _, p := range l.Items { @@ -98,7 +99,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi continue } - f.PodClient().Delete(p.Name, &metav1.DeleteOptions{}) + f.PodClient().Delete(context.TODO(), p.Name, &metav1.DeleteOptions{}) } }) @@ -109,7 +110,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi deviceIDRE := "gpu devices: (nvidia[0-9]+)" devID1 := parseLog(f, p1.Name, p1.Name, deviceIDRE) - p1, err := f.PodClient().Get(p1.Name, metav1.GetOptions{}) + p1, err := f.PodClient().Get(context.TODO(), p1.Name, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("Restarting Kubelet and waiting for the current running pod to restart") @@ -134,10 +135,10 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi framework.ExpectEqual(devID1, devID2) ginkgo.By("Deleting device plugin.") - f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(devicePluginPod.Name, &metav1.DeleteOptions{}) + f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), devicePluginPod.Name, &metav1.DeleteOptions{}) ginkgo.By("Waiting for GPUs to become unavailable on the local node") gomega.Eventually(func() bool { - node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) return numberOfNVIDIAGPUs(node) <= 0 }, 10*time.Minute, framework.Poll).Should(gomega.BeTrue()) diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go index 032dac36beb..c15d2a2a541 100644 --- a/test/e2e_node/hugepages_test.go +++ b/test/e2e_node/hugepages_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "os" "os/exec" @@ -130,7 +131,7 @@ func isHugePageSupported() bool { // pollResourceAsString polls for a specified resource and capacity from node func pollResourceAsString(f *framework.Framework, resourceName string) string { - node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) amount := amountOfResourceAsString(node, resourceName) framework.Logf("amount of %v: %v", resourceName, amount) diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index 83fc9d4555b..e1392cf6e19 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" @@ -52,7 +53,7 @@ var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() { framework.ExpectNoError(e2epod.WaitTimeoutForPodNoLongerRunningInNamespace( f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) - runningPod, err := f.PodClient().Get(pod.Name, metav1.GetOptions{}) + runningPod, err := f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) status := runningPod.Status diff --git a/test/e2e_node/log_path_test.go b/test/e2e_node/log_path_test.go index a142d0c89f3..41646f326a5 100644 --- a/test/e2e_node/log_path_test.go +++ b/test/e2e_node/log_path_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -142,7 +143,7 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() { logDir := kubelet.ContainerLogsDir // get containerID from created Pod - createdLogPod, err := podClient.Get(logPodName, metav1.GetOptions{}) + createdLogPod, err := podClient.Get(context.TODO(), logPodName, metav1.GetOptions{}) logContainerID := kubecontainer.ParseContainerID(createdLogPod.Status.ContainerStatuses[0].ContainerID) framework.ExpectNoError(err, "Failed to get pod: %s", logPodName) @@ -159,7 +160,7 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() { logCRIDir := "/var/log/pods" // get podID from created Pod - createdLogPod, err := podClient.Get(logPodName, metav1.GetOptions{}) + createdLogPod, err := podClient.Get(context.TODO(), logPodName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get pod: %s", logPodName) podNs := createdLogPod.Namespace podName := createdLogPod.Name diff --git a/test/e2e_node/mirror_pod_test.go b/test/e2e_node/mirror_pod_test.go index cc0cb27f5b9..52a261b0a4b 100644 --- a/test/e2e_node/mirror_pod_test.go +++ b/test/e2e_node/mirror_pod_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" goerrors "errors" "fmt" "os" @@ -67,7 +68,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() { */ ginkgo.It("should be updated when static pod updated [NodeConformance]", func() { ginkgo.By("get mirror pod uid") - pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) uid := pod.UID @@ -82,7 +83,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() { }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) ginkgo.By("check the mirror pod container image is updated") - pod, err = f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(len(pod.Spec.Containers), 1) framework.ExpectEqual(pod.Spec.Containers[0].Image, image) @@ -94,12 +95,12 @@ var _ = framework.KubeDescribe("MirrorPod", func() { */ ginkgo.It("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() { ginkgo.By("get mirror pod uid") - pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) uid := pod.UID ginkgo.By("delete the mirror pod with grace period 30s") - err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(30)) + err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be recreated") @@ -114,12 +115,12 @@ var _ = framework.KubeDescribe("MirrorPod", func() { */ ginkgo.It("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() { ginkgo.By("get mirror pod uid") - pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) uid := pod.UID ginkgo.By("delete the mirror pod with grace period 0s") - err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(ns).Delete(context.TODO(), mirrorPodName, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) ginkgo.By("wait for the mirror pod to be recreated") @@ -176,7 +177,7 @@ func deleteStaticPod(dir, name, namespace string) error { } func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error { - _, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + _, err := cl.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil } @@ -184,7 +185,7 @@ func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) err } func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error { - pod, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + pod, err := cl.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) } @@ -195,7 +196,7 @@ func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error } func checkMirrorPodRecreatedAndRunning(cl clientset.Interface, name, namespace string, oUID types.UID) error { - pod, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + pod, err := cl.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) } @@ -231,7 +232,7 @@ func validateMirrorPod(cl clientset.Interface, mirrorPod *v1.Pod) error { if len(mirrorPod.OwnerReferences) != 1 { return fmt.Errorf("expected mirror pod %q to have a single owner reference: got %d", mirrorPod.Name, len(mirrorPod.OwnerReferences)) } - node, err := cl.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := cl.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to fetch test node: %v", err) } diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index ca25e96afe9..45cf79f73df 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -19,6 +19,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "io/ioutil" "path/filepath" @@ -191,7 +192,7 @@ func runTest(f *framework.Framework) error { // TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings. // The node may not have updated capacity and allocatable yet, so check that it happens eventually. gomega.Eventually(func() error { - nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 62e1851705c..809c304805f 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -163,7 +163,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete ginkgo.By("Create the test log file") framework.ExpectNoError(err) ginkgo.By("Create config map for the node problem detector") - _, err = c.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{ + _, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: configName}, Data: map[string]string{path.Base(configFile): config}, }) @@ -237,7 +237,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete }, }, }) - pod, err := f.PodClient().Get(name, metav1.GetOptions{}) + pod, err := f.PodClient().Get(context.TODO(), name, metav1.GetOptions{}) framework.ExpectNoError(err) // TODO: remove hardcoded kubelet volume directory path // framework.TestContext.KubeVolumeDir is currently not populated for node e2e @@ -375,13 +375,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete framework.Logf("Node Problem Detector logs:\n %s", log) } ginkgo.By("Delete the node problem detector") - f.PodClient().Delete(name, metav1.NewDeleteOptions(0)) + f.PodClient().Delete(context.TODO(), name, metav1.NewDeleteOptions(0)) ginkgo.By("Wait for the node problem detector to disappear") gomega.Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed()) ginkgo.By("Delete the config map") - c.CoreV1().ConfigMaps(ns).Delete(configName, nil) + c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), configName, nil) ginkgo.By("Clean up the events") - gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed()) + gomega.Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(context.TODO(), metav1.NewDeleteOptions(0), eventListOptions)).To(gomega.Succeed()) ginkgo.By("Clean up the node condition") patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition)) c.CoreV1().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do(context.TODO()) @@ -407,7 +407,7 @@ func injectLog(file string, timestamp time.Time, log string, num int) error { // verifyEvents verifies there are num specific events generated with given reason and message. func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, num int, reason, message string) error { - events, err := e.List(options) + events, err := e.List(context.TODO(), options) if err != nil { return err } @@ -426,7 +426,7 @@ func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, nu // verifyTotalEvents verifies there are num events in total. func verifyTotalEvents(e coreclientset.EventInterface, options metav1.ListOptions, num int) error { - events, err := e.List(options) + events, err := e.List(context.TODO(), options) if err != nil { return err } @@ -442,7 +442,7 @@ func verifyTotalEvents(e coreclientset.EventInterface, options metav1.ListOption // verifyNodeCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked func verifyNodeCondition(n coreclientset.NodeInterface, condition v1.NodeConditionType, status v1.ConditionStatus, reason, message string) error { - node, err := n.Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := n.Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/e2e_node/pods_container_manager_test.go b/test/e2e_node/pods_container_manager_test.go index ecfa1449282..0595313ad82 100644 --- a/test/e2e_node/pods_container_manager_test.go +++ b/test/e2e_node/pods_container_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "strings" "k8s.io/api/core/v1" @@ -202,7 +203,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := f.PodClient().Delete(context.TODO(), guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("pod" + podUID) f.PodClient().Create(pod) @@ -247,7 +248,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := f.PodClient().Delete(context.TODO(), bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID) f.PodClient().Create(pod) @@ -292,7 +293,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { }) ginkgo.By("Checking if the pod cgroup was deleted", func() { gp := int64(1) - err := f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) + err := f.PodClient().Delete(context.TODO(), burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) framework.ExpectNoError(err) pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID) f.PodClient().Create(pod) diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index 89dda692fd8..6ff1d4008de 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -20,6 +20,7 @@ package e2enode import ( "bytes" + "context" "fmt" "io/ioutil" "log" @@ -375,7 +376,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { defer ginkgo.GinkgoRecover() defer wg.Done() - err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30)) + err := f.PodClient().Delete(context.TODO(), pod.ObjectMeta.Name, metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index fb543e77ce4..d3920fb6018 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -19,6 +19,7 @@ limitations under the License. package e2enode import ( + "context" "fmt" "os/exec" "time" @@ -37,7 +38,7 @@ import ( // If the timeout is hit, it returns the list of currently running pods. func waitForPods(f *framework.Framework, podCount int, timeout time.Duration) (runningPods []*v1.Pod) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { - podList, err := f.PodClient().List(metav1.ListOptions{}) + podList, err := f.PodClient().List(context.TODO(), metav1.ListOptions{}) if err != nil { framework.Logf("Failed to list pods on node: %v", err) continue diff --git a/test/e2e_node/startup_probe_test.go b/test/e2e_node/startup_probe_test.go index 6d939d38dc4..a822db422e7 100644 --- a/test/e2e_node/startup_probe_test.go +++ b/test/e2e_node/startup_probe_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "time" v1 "k8s.io/api/core/v1" @@ -168,12 +169,12 @@ var _ = framework.KubeDescribe("StartupProbe [Serial] [Disruptive]", func() { } p := podClient.Create(startupPodSpec(startupProbe, readinessProbe, nil, cmd)) - p, err := podClient.Get(p.Name, metav1.GetOptions{}) + p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) f.WaitForPodReady(p.Name) - p, err = podClient.Get(p.Name, metav1.GetOptions{}) + p, err = podClient.Get(context.TODO(), p.Name, metav1.GetOptions{}) framework.ExpectNoError(err) isReady, err := testutils.PodRunningReady(p) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index fef0046f46d..d7f844d264a 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "encoding/json" "flag" "fmt" @@ -239,7 +240,7 @@ func setNodeConfigSource(f *framework.Framework, source *v1.NodeConfigSource) er nodeclient := f.ClientSet.CoreV1().Nodes() // get the node - node, err := nodeclient.Get(framework.TestContext.NodeName, metav1.GetOptions{}) + node, err := nodeclient.Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { return err } @@ -248,7 +249,7 @@ func setNodeConfigSource(f *framework.Framework, source *v1.NodeConfigSource) er node.Spec.ConfigSource = source // update to the new source - _, err = nodeclient.Update(node) + _, err = nodeclient.Update(context.TODO(), node) if err != nil { return err } @@ -317,7 +318,7 @@ func decodeConfigz(resp *http.Response) (*kubeletconfig.KubeletConfiguration, er // creates a configmap containing kubeCfg in kube-system namespace func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*v1.ConfigMap, error) { cmap := newKubeletConfigMap("testcfg", internalKC) - cmap, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cmap) + cmap, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(context.TODO(), cmap) if err != nil { return nil, err } @@ -340,7 +341,7 @@ func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfigura // listNamespaceEvents lists the events in the given namespace. func listNamespaceEvents(c clientset.Interface, ns string) error { - ls, err := c.CoreV1().Events(ns).List(metav1.ListOptions{}) + ls, err := c.CoreV1().Events(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } diff --git a/test/e2e_node/volume_manager_test.go b/test/e2e_node/volume_manager_test.go index 1a8c25d44da..fcaaffe3f53 100644 --- a/test/e2e_node/volume_manager_test.go +++ b/test/e2e_node/volume_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2enode import ( + "context" "time" "k8s.io/api/core/v1" @@ -114,7 +115,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() { }) err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) gp := int64(1) - f.PodClient().Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) + f.PodClient().Delete(context.TODO(), pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) if err == nil { break } diff --git a/test/images/agnhost/nettest/nettest.go b/test/images/agnhost/nettest/nettest.go index dd1e7204f1f..e971203d5d6 100644 --- a/test/images/agnhost/nettest/nettest.go +++ b/test/images/agnhost/nettest/nettest.go @@ -32,6 +32,7 @@ package nettest import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -315,7 +316,7 @@ func contactOthers(state *State) { //getWebserverEndpoints returns the webserver endpoints as a set of String, each in the format like "http://{ip}:{port}" func getWebserverEndpoints(client clientset.Interface) sets.String { - endpoints, err := client.CoreV1().Endpoints(namespace).Get(service, v1.GetOptions{}) + endpoints, err := client.CoreV1().Endpoints(namespace).Get(context.TODO(), service, v1.GetOptions{}) eps := sets.String{} if err != nil { state.Logf("Unable to read the endpoints for %v/%v: %v.", namespace, service, err) diff --git a/test/integration/apimachinery/watch_restart_test.go b/test/integration/apimachinery/watch_restart_test.go index 4c08e5d7c94..c9eda16af41 100644 --- a/test/integration/apimachinery/watch_restart_test.go +++ b/test/integration/apimachinery/watch_restart_test.go @@ -83,7 +83,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) { getListFunc := func(c *kubernetes.Clientset, secret *corev1.Secret) func(options metav1.ListOptions) *corev1.SecretList { return func(options metav1.ListOptions) *corev1.SecretList { options.FieldSelector = fields.OneTermEqualSelector("metadata.name", secret.Name).String() - res, err := c.CoreV1().Secrets(secret.Namespace).List(options) + res, err := c.CoreV1().Secrets(secret.Namespace).List(context.TODO(), options) if err != nil { t.Fatalf("Failed to list Secrets: %v", err) } @@ -94,7 +94,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) { getWatchFunc := func(c *kubernetes.Clientset, secret *corev1.Secret) func(options metav1.ListOptions) (watch.Interface, error) { return func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fields.OneTermEqualSelector("metadata.name", secret.Name).String() - res, err := c.CoreV1().Secrets(secret.Namespace).Watch(options) + res, err := c.CoreV1().Secrets(secret.Namespace).Watch(context.TODO(), options) if err != nil { t.Fatalf("Failed to create a watcher on Secrets: %v", err) } @@ -119,7 +119,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) { counter = counter + 1 patch := fmt.Sprintf(`{"metadata": {"annotations": {"count": "%d"}}}`, counter) - _, err := c.CoreV1().Secrets(secret.Namespace).Patch(secret.Name, types.StrategicMergePatchType, []byte(patch)) + _, err := c.CoreV1().Secrets(secret.Namespace).Patch(context.TODO(), secret.Name, types.StrategicMergePatchType, []byte(patch)) if err != nil { t.Fatalf("Failed to patch secret: %v", err) } @@ -212,7 +212,7 @@ func TestWatchRestartsIfTimeoutNotReached(t *testing.T) { t.Fatalf("Failed to create clientset: %v", err) } - secret, err := c.CoreV1().Secrets(tc.secret.Namespace).Create(tc.secret) + secret, err := c.CoreV1().Secrets(tc.secret.Namespace).Create(context.TODO(), tc.secret) if err != nil { t.Fatalf("Failed to create testing secret %s/%s: %v", tc.secret.Namespace, tc.secret.Name, err) } diff --git a/test/integration/apiserver/admissionwebhook/admission_test.go b/test/integration/apiserver/admissionwebhook/admission_test.go index e792fa19698..d1f450aaaff 100644 --- a/test/integration/apiserver/admissionwebhook/admission_test.go +++ b/test/integration/apiserver/admissionwebhook/admission_test.go @@ -459,7 +459,7 @@ func testWebhookAdmission(t *testing.T, watchCache bool) { // create CRDs etcd.CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(server.ClientConfig), false, etcd.GetCustomResourceDefinitionData()...) - if _, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { + if _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { t.Fatal(err) } @@ -1048,7 +1048,7 @@ func testPodBindingEviction(c *testContext) { zero := int64(0) forceDelete := &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background} defer func() { - err := c.clientset.CoreV1().Pods(pod.GetNamespace()).Delete(pod.GetName(), forceDelete) + err := c.clientset.CoreV1().Pods(pod.GetNamespace()).Delete(context.TODO(), pod.GetName(), forceDelete) if err != nil && !apierrors.IsNotFound(err) { c.t.Error(err) return @@ -1450,7 +1450,7 @@ func createV1beta1ValidationWebhook(client clientset.Interface, endpoint, conver fail := admissionv1beta1.Fail equivalent := admissionv1beta1.Equivalent // Attaching Admission webhook to API server - _, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&admissionv1beta1.ValidatingWebhookConfiguration{ + _, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: "admission.integration.test"}, Webhooks: []admissionv1beta1.ValidatingWebhook{ { @@ -1486,7 +1486,7 @@ func createV1beta1MutationWebhook(client clientset.Interface, endpoint, converte fail := admissionv1beta1.Fail equivalent := admissionv1beta1.Equivalent // Attaching Mutation webhook to API server - _, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionv1beta1.MutatingWebhookConfiguration{ + _, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: "mutation.integration.test"}, Webhooks: []admissionv1beta1.MutatingWebhook{ { @@ -1523,7 +1523,7 @@ func createV1ValidationWebhook(client clientset.Interface, endpoint, convertedEn equivalent := admissionv1.Equivalent none := admissionv1.SideEffectClassNone // Attaching Admission webhook to API server - _, err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(&admissionv1.ValidatingWebhookConfiguration{ + _, err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.TODO(), &admissionv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: "admissionv1.integration.test"}, Webhooks: []admissionv1.ValidatingWebhook{ { @@ -1562,7 +1562,7 @@ func createV1MutationWebhook(client clientset.Interface, endpoint, convertedEndp equivalent := admissionv1.Equivalent none := admissionv1.SideEffectClassNone // Attaching Mutation webhook to API server - _, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(&admissionv1.MutatingWebhookConfiguration{ + _, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: "mutationv1.integration.test"}, Webhooks: []admissionv1.MutatingWebhook{ { diff --git a/test/integration/apiserver/admissionwebhook/broken_webhook_test.go b/test/integration/apiserver/admissionwebhook/broken_webhook_test.go index 061bd07d590..5b10b5efa11 100644 --- a/test/integration/apiserver/admissionwebhook/broken_webhook_test.go +++ b/test/integration/apiserver/admissionwebhook/broken_webhook_test.go @@ -17,6 +17,7 @@ limitations under the License. package admissionwebhook import ( + "context" "fmt" "testing" "time" @@ -53,13 +54,13 @@ func TestBrokenWebhook(t *testing.T) { } t.Logf("Creating Deployment to ensure apiserver is functional") - _, err = client.AppsV1().Deployments("default").Create(exampleDeployment(generateDeploymentName(0))) + _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(0))) if err != nil { t.Fatalf("Failed to create deployment: %v", err) } t.Logf("Creating Broken Webhook that will block all operations on all objects") - _, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(brokenWebhookConfig(brokenWebhookName)) + _, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(context.TODO(), brokenWebhookConfig(brokenWebhookName)) if err != nil { t.Fatalf("Failed to register broken webhook: %v", err) } @@ -71,7 +72,7 @@ func TestBrokenWebhook(t *testing.T) { // test whether the webhook blocks requests t.Logf("Attempt to create Deployment which should fail due to the webhook") - _, err = client.AppsV1().Deployments("default").Create(exampleDeployment(generateDeploymentName(1))) + _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(1))) if err == nil { t.Fatalf("Expected the broken webhook to cause creating a deployment to fail, but it succeeded.") } @@ -89,13 +90,13 @@ func TestBrokenWebhook(t *testing.T) { // test whether the webhook still blocks requests after restarting t.Logf("Attempt again to create Deployment which should fail due to the webhook") - _, err = client.AppsV1().Deployments("default").Create(exampleDeployment(generateDeploymentName(2))) + _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(2))) if err == nil { t.Fatalf("Expected the broken webhook to cause creating a deployment to fail, but it succeeded.") } t.Logf("Deleting the broken webhook to fix the cluster") - err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(brokenWebhookName, nil) + err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(context.TODO(), brokenWebhookName, nil) if err != nil { t.Fatalf("Failed to delete broken webhook: %v", err) } @@ -105,7 +106,7 @@ func TestBrokenWebhook(t *testing.T) { // test if the deleted webhook no longer blocks requests t.Logf("Creating Deployment to ensure webhook is deleted") - _, err = client.AppsV1().Deployments("default").Create(exampleDeployment(generateDeploymentName(3))) + _, err = client.AppsV1().Deployments("default").Create(context.TODO(), exampleDeployment(generateDeploymentName(3))) if err != nil { t.Fatalf("Failed to create deployment: %v", err) } diff --git a/test/integration/apiserver/admissionwebhook/client_auth_test.go b/test/integration/apiserver/admissionwebhook/client_auth_test.go index e753550a310..e3be06ece3e 100644 --- a/test/integration/apiserver/admissionwebhook/client_auth_test.go +++ b/test/integration/apiserver/admissionwebhook/client_auth_test.go @@ -17,6 +17,7 @@ limitations under the License. package admissionwebhook import ( + "context" "crypto/tls" "crypto/x509" "encoding/json" @@ -152,20 +153,20 @@ plugins: t.Fatalf("unexpected error: %v", err) } - _, err = client.CoreV1().Pods("default").Create(clientAuthMarkerFixture) + _, err = client.CoreV1().Pods("default").Create(context.TODO(), clientAuthMarkerFixture) if err != nil { t.Fatal(err) } upCh := recorder.Reset() ns := "load-balance" - _, err = client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil { t.Fatal(err) } fail := admissionv1beta1.Fail - mutatingCfg, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionv1beta1.MutatingWebhookConfiguration{ + mutatingCfg, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: "admission.integration.test"}, Webhooks: []admissionv1beta1.MutatingWebhook{{ Name: "admission.integration.test", @@ -185,7 +186,7 @@ plugins: t.Fatal(err) } defer func() { - err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(mutatingCfg.GetName(), &metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), mutatingCfg.GetName(), &metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } @@ -193,7 +194,7 @@ plugins: // wait until new webhook is called if err := wait.PollImmediate(time.Millisecond*5, wait.ForeverTestTimeout, func() (bool, error) { - _, err = client.CoreV1().Pods("default").Patch(clientAuthMarkerFixture.Name, types.JSONPatchType, []byte("[]")) + _, err = client.CoreV1().Pods("default").Patch(context.TODO(), clientAuthMarkerFixture.Name, types.JSONPatchType, []byte("[]")) if t.Failed() { return true, nil } diff --git a/test/integration/apiserver/admissionwebhook/load_balance_test.go b/test/integration/apiserver/admissionwebhook/load_balance_test.go index 3281f6c9e26..ca9d6fe8158 100644 --- a/test/integration/apiserver/admissionwebhook/load_balance_test.go +++ b/test/integration/apiserver/admissionwebhook/load_balance_test.go @@ -17,6 +17,7 @@ limitations under the License. package admissionwebhook import ( + "context" "crypto/tls" "crypto/x509" "encoding/json" @@ -101,20 +102,20 @@ func TestWebhookLoadBalance(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - _, err = client.CoreV1().Pods("default").Create(loadBalanceMarkerFixture) + _, err = client.CoreV1().Pods("default").Create(context.TODO(), loadBalanceMarkerFixture) if err != nil { t.Fatal(err) } upCh := recorder.Reset() ns := "load-balance" - _, err = client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil { t.Fatal(err) } fail := admissionv1beta1.Fail - mutatingCfg, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionv1beta1.MutatingWebhookConfiguration{ + mutatingCfg, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: "admission.integration.test"}, Webhooks: []admissionv1beta1.MutatingWebhook{{ Name: "admission.integration.test", @@ -134,7 +135,7 @@ func TestWebhookLoadBalance(t *testing.T) { t.Fatal(err) } defer func() { - err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(mutatingCfg.GetName(), &metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), mutatingCfg.GetName(), &metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } @@ -142,7 +143,7 @@ func TestWebhookLoadBalance(t *testing.T) { // wait until new webhook is called the first time if err := wait.PollImmediate(time.Millisecond*5, wait.ForeverTestTimeout, func() (bool, error) { - _, err = client.CoreV1().Pods("default").Patch(loadBalanceMarkerFixture.Name, types.JSONPatchType, []byte("[]")) + _, err = client.CoreV1().Pods("default").Patch(context.TODO(), loadBalanceMarkerFixture.Name, types.JSONPatchType, []byte("[]")) select { case <-upCh: return true, nil @@ -175,7 +176,7 @@ func TestWebhookLoadBalance(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - _, err := client.CoreV1().Pods(ns).Create(pod()) + _, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod()) if err != nil { t.Error(err) } @@ -194,7 +195,7 @@ func TestWebhookLoadBalance(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - _, err := client.CoreV1().Pods(ns).Create(pod()) + _, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod()) if err != nil { t.Error(err) } diff --git a/test/integration/apiserver/admissionwebhook/reinvocation_test.go b/test/integration/apiserver/admissionwebhook/reinvocation_test.go index 9565b609fb1..ce325fe242f 100644 --- a/test/integration/apiserver/admissionwebhook/reinvocation_test.go +++ b/test/integration/apiserver/admissionwebhook/reinvocation_test.go @@ -17,6 +17,7 @@ limitations under the License. package admissionwebhook import ( + "context" "crypto/tls" "crypto/x509" "encoding/json" @@ -307,7 +308,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { } for priorityClass, priority := range map[string]int{"low-priority": 1, "high-priority": 10} { - _, err = client.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityClass}, Value: int32(priority)}) + _, err = client.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityClass}, Value: int32(priority)}) if err != nil { t.Fatal(err) } @@ -319,7 +320,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { testCaseID := strconv.Itoa(i) ns := "reinvoke-" + testCaseID nsLabels := map[string]string{"test-case": testCaseID} - _, err = client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns, Labels: nsLabels}}) + _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns, Labels: nsLabels}}) if err != nil { t.Fatal(err) } @@ -327,13 +328,13 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { // Write markers to a separate namespace to avoid cross-talk markerNs := ns + "-markers" markerNsLabels := map[string]string{"test-markers": testCaseID} - _, err = client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: markerNs, Labels: markerNsLabels}}) + _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: markerNs, Labels: markerNsLabels}}) if err != nil { t.Fatal(err) } // Create a maker object to use to check for the webhook configurations to be ready. - marker, err := client.CoreV1().Pods(markerNs).Create(newReinvocationMarkerFixture(markerNs)) + marker, err := client.CoreV1().Pods(markerNs).Create(context.TODO(), newReinvocationMarkerFixture(markerNs)) if err != nil { t.Fatal(err) } @@ -377,7 +378,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { AdmissionReviewVersions: []string{"v1beta1"}, }) - cfg, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionv1beta1.MutatingWebhookConfiguration{ + cfg, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("admission.integration.test-%d", i)}, Webhooks: webhooks, }) @@ -385,7 +386,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { t.Fatal(err) } defer func() { - err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(cfg.GetName(), &metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), cfg.GetName(), &metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } @@ -393,7 +394,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { // wait until new webhook is called the first time if err := wait.PollImmediate(time.Millisecond*5, wait.ForeverTestTimeout, func() (bool, error) { - _, err = client.CoreV1().Pods(markerNs).Patch(marker.Name, types.JSONPatchType, []byte("[]")) + _, err = client.CoreV1().Pods(markerNs).Patch(context.TODO(), marker.Name, types.JSONPatchType, []byte("[]")) select { case <-upCh: return true, nil @@ -421,7 +422,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) { if tt.initialPriorityClass != "" { pod.Spec.PriorityClassName = tt.initialPriorityClass } - obj, err := client.CoreV1().Pods(ns).Create(pod) + obj, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod) if tt.expectError { if err == nil { diff --git a/test/integration/apiserver/admissionwebhook/timeout_test.go b/test/integration/apiserver/admissionwebhook/timeout_test.go index 5a2986c0907..62912ef7c9c 100644 --- a/test/integration/apiserver/admissionwebhook/timeout_test.go +++ b/test/integration/apiserver/admissionwebhook/timeout_test.go @@ -175,7 +175,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { t.Fatalf("unexpected error: %v", err) } - _, err = client.CoreV1().Pods("default").Create(timeoutMarkerFixture) + _, err = client.CoreV1().Pods("default").Create(context.TODO(), timeoutMarkerFixture) if err != nil { t.Fatal(err) } @@ -184,7 +184,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { t.Run(tt.name, func(t *testing.T) { upCh := recorder.Reset() ns := fmt.Sprintf("reinvoke-%d", i) - _, err = client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil { t.Fatal(err) } @@ -209,7 +209,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { AdmissionReviewVersions: []string{"v1beta1"}, }) } - mutatingCfg, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionv1beta1.MutatingWebhookConfiguration{ + mutatingCfg, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("admission.integration.test-%d", i)}, Webhooks: mutatingWebhooks, }) @@ -217,7 +217,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { t.Fatal(err) } defer func() { - err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(mutatingCfg.GetName(), &metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), mutatingCfg.GetName(), &metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } @@ -243,7 +243,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { AdmissionReviewVersions: []string{"v1beta1"}, }) } - validatingCfg, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&admissionv1beta1.ValidatingWebhookConfiguration{ + validatingCfg, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("admission.integration.test-%d", i)}, Webhooks: validatingWebhooks, }) @@ -251,7 +251,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { t.Fatal(err) } defer func() { - err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(validatingCfg.GetName(), &metav1.DeleteOptions{}) + err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(context.TODO(), validatingCfg.GetName(), &metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } @@ -259,7 +259,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) { // wait until new webhook is called the first time if err := wait.PollImmediate(time.Millisecond*5, wait.ForeverTestTimeout, func() (bool, error) { - _, err = client.CoreV1().Pods("default").Patch(timeoutMarkerFixture.Name, types.JSONPatchType, []byte("[]")) + _, err = client.CoreV1().Pods("default").Patch(context.TODO(), timeoutMarkerFixture.Name, types.JSONPatchType, []byte("[]")) select { case <-upCh: return true, nil diff --git a/test/integration/apiserver/apiserver_test.go b/test/integration/apiserver/apiserver_test.go index 22e64468cd2..737f2a3b8cb 100644 --- a/test/integration/apiserver/apiserver_test.go +++ b/test/integration/apiserver/apiserver_test.go @@ -227,7 +227,7 @@ func Test202StatusCode(t *testing.T) { // 1. Create the resource without any finalizer and then delete it without setting DeleteOptions. // Verify that server returns 200 in this case. - rs, err := rsClient.Create(newRS(ns.Name)) + rs, err := rsClient.Create(context.TODO(), newRS(ns.Name)) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -237,7 +237,7 @@ func Test202StatusCode(t *testing.T) { // Verify that the apiserver still returns 200 since DeleteOptions.OrphanDependents is not set. rs = newRS(ns.Name) rs.ObjectMeta.Finalizers = []string{"kube.io/dummy-finalizer"} - rs, err = rsClient.Create(rs) + rs, err = rsClient.Create(context.TODO(), rs) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -246,7 +246,7 @@ func Test202StatusCode(t *testing.T) { // 3. Create the resource and then delete it with DeleteOptions.OrphanDependents=false. // Verify that the server still returns 200 since the resource is immediately deleted. rs = newRS(ns.Name) - rs, err = rsClient.Create(rs) + rs, err = rsClient.Create(context.TODO(), rs) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -256,7 +256,7 @@ func Test202StatusCode(t *testing.T) { // Verify that the server returns 202 in this case. rs = newRS(ns.Name) rs.ObjectMeta.Finalizers = []string{"kube.io/dummy-finalizer"} - rs, err = rsClient.Create(rs) + rs, err = rsClient.Create(context.TODO(), rs) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -293,13 +293,13 @@ func TestListResourceVersion0(t *testing.T) { for i := 0; i < 10; i++ { rs := newRS(ns.Name) rs.Name = fmt.Sprintf("test-%d", i) - if _, err := rsClient.Create(rs); err != nil { + if _, err := rsClient.Create(context.TODO(), rs); err != nil { t.Fatal(err) } } pagerFn := func(opts metav1.ListOptions) (runtime.Object, error) { - return rsClient.List(opts) + return rsClient.List(context.TODO(), opts) } p := pager.New(pager.SimplePageFunc(pagerFn)) @@ -332,7 +332,7 @@ func TestAPIListChunking(t *testing.T) { for i := 0; i < 4; i++ { rs := newRS(ns.Name) rs.Name = fmt.Sprintf("test-%d", i) - if _, err := rsClient.Create(rs); err != nil { + if _, err := rsClient.Create(context.TODO(), rs); err != nil { t.Fatal(err) } } @@ -343,7 +343,7 @@ func TestAPIListChunking(t *testing.T) { PageSize: 1, PageFn: pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { calls++ - list, err := rsClient.List(opts) + list, err := rsClient.List(context.TODO(), opts) if err != nil { return nil, err } @@ -353,7 +353,7 @@ func TestAPIListChunking(t *testing.T) { if calls == 2 { rs := newRS(ns.Name) rs.Name = "test-5" - if _, err := rsClient.Create(rs); err != nil { + if _, err := rsClient.Create(context.TODO(), rs); err != nil { t.Fatal(err) } } @@ -407,11 +407,11 @@ func TestNameInFieldSelector(t *testing.T) { ns := framework.CreateTestingNamespace(fmt.Sprintf("ns%d", i), s, t) defer framework.DeleteTestingNamespace(ns, s, t) - _, err := clientSet.CoreV1().Secrets(ns.Name).Create(makeSecret("foo")) + _, err := clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), makeSecret("foo")) if err != nil { t.Errorf("Couldn't create secret: %v", err) } - _, err = clientSet.CoreV1().Secrets(ns.Name).Create(makeSecret("bar")) + _, err = clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), makeSecret("bar")) if err != nil { t.Errorf("Couldn't create secret: %v", err) } @@ -458,7 +458,7 @@ func TestNameInFieldSelector(t *testing.T) { opts := metav1.ListOptions{ FieldSelector: tc.selector, } - secrets, err := clientSet.CoreV1().Secrets(tc.namespace).List(opts) + secrets, err := clientSet.CoreV1().Secrets(tc.namespace).List(context.TODO(), opts) if err != nil { t.Errorf("%s: Unexpected error: %v", tc.selector, err) } @@ -534,7 +534,7 @@ func TestMetadataClient(t *testing.T) { name: "list, get, patch, and delete via metadata client", want: func(t *testing.T) { ns := "metadata-builtin" - svc, err := clientset.CoreV1().Services(ns).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + svc, err := clientset.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) if err != nil { t.Fatalf("unable to create service: %v", err) } @@ -674,11 +674,11 @@ func TestMetadataClient(t *testing.T) { name: "watch via metadata client", want: func(t *testing.T) { ns := "metadata-watch" - svc, err := clientset.CoreV1().Services(ns).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + svc, err := clientset.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(ns).Patch("test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(ns).Patch(context.TODO(), "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { t.Fatalf("unable to patch cr: %v", err) } @@ -1136,11 +1136,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify columns on services", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - svc, err := clientset.CoreV1().Services(testNamespace).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + svc, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { t.Fatalf("unable to update service: %v", err) } return svc, "", "services" @@ -1154,11 +1154,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", includeObject: metav1.IncludeNone, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1172,11 +1172,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", includeObject: metav1.IncludeObject, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-3"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-3"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1196,11 +1196,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify partial metadata object on config maps", accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"test": "0"}}}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"test": "0"}}}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -1213,11 +1213,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify partial metadata object on config maps in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"test": "0"}}}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"test": "0"}}}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -1392,11 +1392,11 @@ func TestTransform(t *testing.T) { name: "v1 verify columns on services", accept: "application/json;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - svc, err := clientset.CoreV1().Services(testNamespace).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-5"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + svc, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-5"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { t.Fatalf("unable to update service: %v", err) } return svc, "", "services" @@ -1410,11 +1410,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1", includeObject: metav1.IncludeNone, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-6"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-6"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1428,11 +1428,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1", includeObject: metav1.IncludeObject, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-7"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-7"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1452,11 +1452,11 @@ func TestTransform(t *testing.T) { name: "v1 verify partial metadata object on config maps", accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-3", Annotations: map[string]string{"test": "0"}}}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-3", Annotations: map[string]string{"test": "0"}}}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -1469,11 +1469,11 @@ func TestTransform(t *testing.T) { name: "v1 verify partial metadata object on config maps in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-4", Annotations: map[string]string{"test": "0"}}}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-4", Annotations: map[string]string{"test": "0"}}}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`)); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" diff --git a/test/integration/apiserver/apply/apply_test.go b/test/integration/apiserver/apply/apply_test.go index e4fcfa4e222..921ef1f7644 100644 --- a/test/integration/apiserver/apply/apply_test.go +++ b/test/integration/apiserver/apply/apply_test.go @@ -753,7 +753,7 @@ func TestApplyRemoveContainerPort(t *testing.T) { t.Fatalf("Failed to remove container port using Apply patch: %v", err) } - deployment, err := client.AppsV1().Deployments("default").Get("deployment", metav1.GetOptions{}) + deployment, err := client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to retrieve object: %v", err) } @@ -961,7 +961,7 @@ func TestApplyConvertsManagedFieldsVersion(t *testing.T) { t.Fatalf("Failed to apply object: %v", err) } - object, err := client.AppsV1().Deployments("default").Get("deployment", metav1.GetOptions{}) + object, err := client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to retrieve object: %v", err) } diff --git a/test/integration/apiserver/certreload/certreload_test.go b/test/integration/apiserver/certreload/certreload_test.go index 97bdcfcdfeb..f35d263203f 100644 --- a/test/integration/apiserver/certreload/certreload_test.go +++ b/test/integration/apiserver/certreload/certreload_test.go @@ -18,6 +18,7 @@ package podlogs import ( "bytes" + "context" "fmt" "io/ioutil" "path" @@ -145,7 +146,7 @@ MnVCuBwfwDXCAiEAw/1TA+CjPq9JC5ek1ifR0FybTURjeQqYkKpve1dveps= func waitForConfigMapCAContent(t *testing.T, kubeClient kubernetes.Interface, key, content string, count int) func() (bool, error) { return func() (bool, error) { - clusterAuthInfo, err := kubeClient.CoreV1().ConfigMaps("kube-system").Get("extension-apiserver-authentication", metav1.GetOptions{}) + clusterAuthInfo, err := kubeClient.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "extension-apiserver-authentication", metav1.GetOptions{}) if apierrors.IsNotFound(err) { return false, nil } diff --git a/test/integration/apiserver/max_json_patch_operations_test.go b/test/integration/apiserver/max_json_patch_operations_test.go index ab9dec8d2e9..1d701e7f012 100644 --- a/test/integration/apiserver/max_json_patch_operations_test.go +++ b/test/integration/apiserver/max_json_patch_operations_test.go @@ -51,7 +51,7 @@ func TestMaxJSONPatchOperations(t *testing.T) { Name: "test", }, } - _, err := clientSet.CoreV1().Secrets("default").Create(secret) + _, err := clientSet.CoreV1().Secrets("default").Create(context.TODO(), secret) if err != nil { t.Fatal(err) } diff --git a/test/integration/apiserver/max_request_body_bytes_test.go b/test/integration/apiserver/max_request_body_bytes_test.go index a619556e5e7..7e1c9cedffc 100644 --- a/test/integration/apiserver/max_request_body_bytes_test.go +++ b/test/integration/apiserver/max_request_body_bytes_test.go @@ -58,7 +58,7 @@ func TestMaxResourceSize(t *testing.T) { Name: "test", }, } - _, err := clientSet.CoreV1().Secrets("default").Create(secret) + _, err := clientSet.CoreV1().Secrets("default").Create(context.TODO(), secret) if err != nil { t.Fatal(err) } diff --git a/test/integration/apiserver/patch_test.go b/test/integration/apiserver/patch_test.go index 883eda2d83e..85b334dc5f6 100644 --- a/test/integration/apiserver/patch_test.go +++ b/test/integration/apiserver/patch_test.go @@ -64,7 +64,7 @@ func TestPatchConflicts(t *testing.T) { } // Create the object we're going to conflict on - clientSet.CoreV1().Secrets(ns.Name).Create(secret) + clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), secret) client := clientSet.CoreV1().RESTClient() successes := int32(0) diff --git a/test/integration/apiserver/podlogs/podlogs_test.go b/test/integration/apiserver/podlogs/podlogs_test.go index 989de18a6d1..64bde0824fb 100644 --- a/test/integration/apiserver/podlogs/podlogs_test.go +++ b/test/integration/apiserver/podlogs/podlogs_test.go @@ -82,7 +82,7 @@ func TestInsecurePodLogs(t *testing.T) { t.Fatal(err) } - node, err := clientSet.CoreV1().Nodes().Create(&corev1.Node{ + node, err := clientSet.CoreV1().Nodes().Create(context.TODO(), &corev1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "fake"}, }) if err != nil { @@ -101,19 +101,19 @@ func TestInsecurePodLogs(t *testing.T) { }, }, } - node, err = clientSet.CoreV1().Nodes().UpdateStatus(node) + node, err = clientSet.CoreV1().Nodes().UpdateStatus(context.TODO(), node) if err != nil { t.Fatal(err) } - _, err = clientSet.CoreV1().Namespaces().Create(&corev1.Namespace{ + _, err = clientSet.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "ns"}, }) if err != nil { t.Fatal(err) } - _, err = clientSet.CoreV1().ServiceAccounts("ns").Create(&corev1.ServiceAccount{ + _, err = clientSet.CoreV1().ServiceAccounts("ns").Create(context.TODO(), &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "ns"}, }) if err != nil { @@ -121,7 +121,7 @@ func TestInsecurePodLogs(t *testing.T) { } falseRef := false - pod, err := clientSet.CoreV1().Pods("ns").Create(&corev1.Pod{ + pod, err := clientSet.CoreV1().Pods("ns").Create(context.TODO(), &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "ns"}, Spec: corev1.PodSpec{ Containers: []corev1.Container{ diff --git a/test/integration/auth/accessreview_test.go b/test/integration/auth/accessreview_test.go index 55fc9b2afc6..2be729a5f6e 100644 --- a/test/integration/auth/accessreview_test.go +++ b/test/integration/auth/accessreview_test.go @@ -123,7 +123,7 @@ func TestSubjectAccessReview(t *testing.T) { } for _, test := range tests { - response, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(test.sar) + response, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), test.sar) switch { case err == nil && len(test.expectedError) == 0: @@ -207,7 +207,7 @@ func TestSelfSubjectAccessReview(t *testing.T) { for _, test := range tests { username = test.username - response, err := clientset.AuthorizationV1().SelfSubjectAccessReviews().Create(test.sar) + response, err := clientset.AuthorizationV1().SelfSubjectAccessReviews().Create(context.TODO(), test.sar) switch { case err == nil && len(test.expectedError) == 0: @@ -325,7 +325,7 @@ func TestLocalSubjectAccessReview(t *testing.T) { } for _, test := range tests { - response, err := clientset.AuthorizationV1().LocalSubjectAccessReviews(test.namespace).Create(test.sar) + response, err := clientset.AuthorizationV1().LocalSubjectAccessReviews(test.namespace).Create(context.TODO(), test.sar) switch { case err == nil && len(test.expectedError) == 0: diff --git a/test/integration/auth/dynamic_client_test.go b/test/integration/auth/dynamic_client_test.go index b5feb8b5688..4a5fc4dd6f5 100644 --- a/test/integration/auth/dynamic_client_test.go +++ b/test/integration/auth/dynamic_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package auth import ( + "context" "io/ioutil" "os" "testing" @@ -106,7 +107,7 @@ func TestDynamicClientBuilder(t *testing.T) { // We want to trigger token rotation here by deleting service account // the dynamic client was using. - if err = dymClient.CoreV1().ServiceAccounts(ns).Delete(saName, nil); err != nil { + if err = dymClient.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), saName, nil); err != nil { t.Fatalf("delete service account %s failed: %v", saName, err) } time.Sleep(time.Second * 10) @@ -117,12 +118,12 @@ func TestDynamicClientBuilder(t *testing.T) { } func testClientBuilder(dymClient clientset.Interface, ns, saName string) error { - _, err := dymClient.CoreV1().Namespaces().Get(ns, metav1.GetOptions{}) + _, err := dymClient.CoreV1().Namespaces().Get(context.TODO(), ns, metav1.GetOptions{}) if err != nil { return err } - _, err = dymClient.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{}) + _, err = dymClient.CoreV1().ServiceAccounts(ns).Get(context.TODO(), saName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/integration/auth/node_test.go b/test/integration/auth/node_test.go index f0ac5042191..50e7deb8d08 100644 --- a/test/integration/auth/node_test.go +++ b/test/integration/auth/node_test.go @@ -96,24 +96,24 @@ func TestNodeAuthorizer(t *testing.T) { } // Create objects - if _, err := superuserClient.CoreV1().Namespaces().Create(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns"}}); err != nil { + if _, err := superuserClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns"}}); err != nil { t.Fatal(err) } - if _, err := superuserClient.CoreV1().Secrets("ns").Create(&corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}}); err != nil { + if _, err := superuserClient.CoreV1().Secrets("ns").Create(context.TODO(), &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mysecret"}}); err != nil { t.Fatal(err) } - if _, err := superuserClient.CoreV1().Secrets("ns").Create(&corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mypvsecret"}}); err != nil { + if _, err := superuserClient.CoreV1().Secrets("ns").Create(context.TODO(), &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "mypvsecret"}}); err != nil { t.Fatal(err) } - if _, err := superuserClient.CoreV1().ConfigMaps("ns").Create(&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}); err != nil { + if _, err := superuserClient.CoreV1().ConfigMaps("ns").Create(context.TODO(), &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}); err != nil { t.Fatal(err) } - if _, err := superuserClient.CoreV1().ConfigMaps("ns").Create(&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmapconfigsource"}}); err != nil { + if _, err := superuserClient.CoreV1().ConfigMaps("ns").Create(context.TODO(), &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmapconfigsource"}}); err != nil { t.Fatal(err) } pvName := "mypv" - if _, err := superuserClientExternal.StorageV1().VolumeAttachments().Create(&storagev1.VolumeAttachment{ + if _, err := superuserClientExternal.StorageV1().VolumeAttachments().Create(context.TODO(), &storagev1.VolumeAttachment{ ObjectMeta: metav1.ObjectMeta{Name: "myattachment"}, Spec: storagev1.VolumeAttachmentSpec{ Attacher: "foo", @@ -123,7 +123,7 @@ func TestNodeAuthorizer(t *testing.T) { }); err != nil { t.Fatal(err) } - if _, err := superuserClient.CoreV1().PersistentVolumeClaims("ns").Create(&corev1.PersistentVolumeClaim{ + if _, err := superuserClient.CoreV1().PersistentVolumeClaims("ns").Create(context.TODO(), &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{Name: "mypvc"}, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}, @@ -133,7 +133,7 @@ func TestNodeAuthorizer(t *testing.T) { t.Fatal(err) } - if _, err := superuserClient.CoreV1().PersistentVolumes().Create(&corev1.PersistentVolume{ + if _, err := superuserClient.CoreV1().PersistentVolumes().Create(context.TODO(), &corev1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{Name: "mypv"}, Spec: corev1.PersistentVolumeSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}, @@ -147,50 +147,50 @@ func TestNodeAuthorizer(t *testing.T) { getSecret := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoreV1().Secrets("ns").Get("mysecret", metav1.GetOptions{}) + _, err := client.CoreV1().Secrets("ns").Get(context.TODO(), "mysecret", metav1.GetOptions{}) return err } } getPVSecret := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoreV1().Secrets("ns").Get("mypvsecret", metav1.GetOptions{}) + _, err := client.CoreV1().Secrets("ns").Get(context.TODO(), "mypvsecret", metav1.GetOptions{}) return err } } getConfigMap := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoreV1().ConfigMaps("ns").Get("myconfigmap", metav1.GetOptions{}) + _, err := client.CoreV1().ConfigMaps("ns").Get(context.TODO(), "myconfigmap", metav1.GetOptions{}) return err } } getConfigMapConfigSource := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoreV1().ConfigMaps("ns").Get("myconfigmapconfigsource", metav1.GetOptions{}) + _, err := client.CoreV1().ConfigMaps("ns").Get(context.TODO(), "myconfigmapconfigsource", metav1.GetOptions{}) return err } } getPVC := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoreV1().PersistentVolumeClaims("ns").Get("mypvc", metav1.GetOptions{}) + _, err := client.CoreV1().PersistentVolumeClaims("ns").Get(context.TODO(), "mypvc", metav1.GetOptions{}) return err } } getPV := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoreV1().PersistentVolumes().Get("mypv", metav1.GetOptions{}) + _, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), "mypv", metav1.GetOptions{}) return err } } getVolumeAttachment := func(client clientset.Interface) func() error { return func() error { - _, err := client.StorageV1().VolumeAttachments().Get("myattachment", metav1.GetOptions{}) + _, err := client.StorageV1().VolumeAttachments().Get(context.TODO(), "myattachment", metav1.GetOptions{}) return err } } createNode2NormalPod := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoreV1().Pods("ns").Create(&corev1.Pod{ + _, err := client.CoreV1().Pods("ns").Create(context.TODO(), &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, Spec: corev1.PodSpec{ NodeName: "node2", @@ -208,7 +208,7 @@ func TestNodeAuthorizer(t *testing.T) { updateNode2NormalPodStatus := func(client clientset.Interface) func() error { return func() error { startTime := metav1.NewTime(time.Now()) - _, err := client.CoreV1().Pods("ns").UpdateStatus(&corev1.Pod{ + _, err := client.CoreV1().Pods("ns").UpdateStatus(context.TODO(), &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "node2normalpod"}, Status: corev1.PodStatus{StartTime: &startTime}, }) @@ -218,13 +218,13 @@ func TestNodeAuthorizer(t *testing.T) { deleteNode2NormalPod := func(client clientset.Interface) func() error { return func() error { zero := int64(0) - return client.CoreV1().Pods("ns").Delete("node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + return client.CoreV1().Pods("ns").Delete(context.TODO(), "node2normalpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) } } createNode2MirrorPod := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoreV1().Pods("ns").Create(&corev1.Pod{ + _, err := client.CoreV1().Pods("ns").Create(context.TODO(), &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "node2mirrorpod", Annotations: map[string]string{corev1.MirrorPodAnnotationKey: "true"}, @@ -240,19 +240,19 @@ func TestNodeAuthorizer(t *testing.T) { deleteNode2MirrorPod := func(client clientset.Interface) func() error { return func() error { zero := int64(0) - return client.CoreV1().Pods("ns").Delete("node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) + return client.CoreV1().Pods("ns").Delete(context.TODO(), "node2mirrorpod", &metav1.DeleteOptions{GracePeriodSeconds: &zero}) } } createNode2 := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoreV1().Nodes().Create(&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}}) + _, err := client.CoreV1().Nodes().Create(context.TODO(), &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}}) return err } } setNode2ConfigSource := func(client clientset.Interface) func() error { return func() error { - node2, err := client.CoreV1().Nodes().Get("node2", metav1.GetOptions{}) + node2, err := client.CoreV1().Nodes().Get(context.TODO(), "node2", metav1.GetOptions{}) if err != nil { return err } @@ -263,24 +263,24 @@ func TestNodeAuthorizer(t *testing.T) { KubeletConfigKey: "kubelet", }, } - _, err = client.CoreV1().Nodes().Update(node2) + _, err = client.CoreV1().Nodes().Update(context.TODO(), node2) return err } } unsetNode2ConfigSource := func(client clientset.Interface) func() error { return func() error { - node2, err := client.CoreV1().Nodes().Get("node2", metav1.GetOptions{}) + node2, err := client.CoreV1().Nodes().Get(context.TODO(), "node2", metav1.GetOptions{}) if err != nil { return err } node2.Spec.ConfigSource = nil - _, err = client.CoreV1().Nodes().Update(node2) + _, err = client.CoreV1().Nodes().Update(context.TODO(), node2) return err } } updateNode2Status := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoreV1().Nodes().UpdateStatus(&corev1.Node{ + _, err := client.CoreV1().Nodes().UpdateStatus(context.TODO(), &corev1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "node2"}, Status: corev1.NodeStatus{}, }) @@ -289,7 +289,7 @@ func TestNodeAuthorizer(t *testing.T) { } deleteNode2 := func(client clientset.Interface) func() error { return func() error { - return client.CoreV1().Nodes().Delete("node2", nil) + return client.CoreV1().Nodes().Delete(context.TODO(), "node2", nil) } } createNode2NormalPodEviction := func(client clientset.Interface) func() error { @@ -331,7 +331,7 @@ func TestNodeAuthorizer(t *testing.T) { capacity++ statusString := fmt.Sprintf("{\"status\": {\"capacity\": {\"storage\": \"%dG\"}}}", capacity) patchBytes := []byte(statusString) - _, err := client.CoreV1().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status") + _, err := client.CoreV1().PersistentVolumeClaims("ns").Patch(context.TODO(), "mypvc", types.StrategicMergePatchType, patchBytes, "status") return err } } @@ -339,14 +339,14 @@ func TestNodeAuthorizer(t *testing.T) { updatePVCPhase := func(client clientset.Interface) func() error { return func() error { patchBytes := []byte(`{"status":{"phase": "Bound"}}`) - _, err := client.CoreV1().PersistentVolumeClaims("ns").Patch("mypvc", types.StrategicMergePatchType, patchBytes, "status") + _, err := client.CoreV1().PersistentVolumeClaims("ns").Patch(context.TODO(), "mypvc", types.StrategicMergePatchType, patchBytes, "status") return err } } getNode1Lease := func(client clientset.Interface) func() error { return func() error { - _, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Get("node1", metav1.GetOptions{}) + _, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Get(context.TODO(), "node1", metav1.GetOptions{}) return err } } @@ -363,18 +363,18 @@ func TestNodeAuthorizer(t *testing.T) { RenewTime: &metav1.MicroTime{Time: time.Now()}, }, } - _, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Create(lease) + _, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Create(context.TODO(), lease) return err } } updateNode1Lease := func(client clientset.Interface) func() error { return func() error { - lease, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Get("node1", metav1.GetOptions{}) + lease, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Get(context.TODO(), "node1", metav1.GetOptions{}) if err != nil { return err } lease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now()} - _, err = client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Update(lease) + _, err = client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Update(context.TODO(), lease) return err } } @@ -382,19 +382,19 @@ func TestNodeAuthorizer(t *testing.T) { return func() error { node1LeaseDurationSeconds++ bs := []byte(fmt.Sprintf(`{"spec": {"leaseDurationSeconds": %d}}`, node1LeaseDurationSeconds)) - _, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Patch("node1", types.StrategicMergePatchType, bs) + _, err := client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Patch(context.TODO(), "node1", types.StrategicMergePatchType, bs) return err } } deleteNode1Lease := func(client clientset.Interface) func() error { return func() error { - return client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Delete("node1", &metav1.DeleteOptions{}) + return client.CoordinationV1().Leases(corev1.NamespaceNodeLease).Delete(context.TODO(), "node1", &metav1.DeleteOptions{}) } } getNode1CSINode := func(client clientset.Interface) func() error { return func() error { - _, err := client.StorageV1().CSINodes().Get("node1", metav1.GetOptions{}) + _, err := client.StorageV1().CSINodes().Get(context.TODO(), "node1", metav1.GetOptions{}) return err } } @@ -414,13 +414,13 @@ func TestNodeAuthorizer(t *testing.T) { }, }, } - _, err := client.StorageV1().CSINodes().Create(nodeInfo) + _, err := client.StorageV1().CSINodes().Create(context.TODO(), nodeInfo) return err } } updateNode1CSINode := func(client clientset.Interface) func() error { return func() error { - nodeInfo, err := client.StorageV1().CSINodes().Get("node1", metav1.GetOptions{}) + nodeInfo, err := client.StorageV1().CSINodes().Get(context.TODO(), "node1", metav1.GetOptions{}) if err != nil { return err } @@ -431,7 +431,7 @@ func TestNodeAuthorizer(t *testing.T) { TopologyKeys: []string{"com.example.csi/rack"}, }, } - _, err = client.StorageV1().CSINodes().Update(nodeInfo) + _, err = client.StorageV1().CSINodes().Update(context.TODO(), nodeInfo) return err } } @@ -439,13 +439,13 @@ func TestNodeAuthorizer(t *testing.T) { return func() error { bs := []byte(fmt.Sprintf(`{"csiDrivers": [ { "driver": "net.example.storage.driver2", "nodeID": "net.example.storage/node1", "topologyKeys": [ "net.example.storage/region" ] } ] }`)) // StrategicMergePatch is unsupported by CRs. Falling back to MergePatch - _, err := client.StorageV1().CSINodes().Patch("node1", types.MergePatchType, bs) + _, err := client.StorageV1().CSINodes().Patch(context.TODO(), "node1", types.MergePatchType, bs) return err } } deleteNode1CSINode := func(client clientset.Interface) func() error { return func() error { - return client.StorageV1().CSINodes().Delete("node1", &metav1.DeleteOptions{}) + return client.StorageV1().CSINodes().Delete(context.TODO(), "node1", &metav1.DeleteOptions{}) } } diff --git a/test/integration/auth/rbac_test.go b/test/integration/auth/rbac_test.go index 13f0ccff6c6..c443ce043cf 100644 --- a/test/integration/auth/rbac_test.go +++ b/test/integration/auth/rbac_test.go @@ -127,25 +127,25 @@ type bootstrapRoles struct { // client should be authenticated as the RBAC super user. func (b bootstrapRoles) bootstrap(client clientset.Interface) error { for _, r := range b.clusterRoles { - _, err := client.RbacV1().ClusterRoles().Create(&r) + _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), &r) if err != nil { return fmt.Errorf("failed to make request: %v", err) } } for _, r := range b.roles { - _, err := client.RbacV1().Roles(r.Namespace).Create(&r) + _, err := client.RbacV1().Roles(r.Namespace).Create(context.TODO(), &r) if err != nil { return fmt.Errorf("failed to make request: %v", err) } } for _, r := range b.clusterRoleBindings { - _, err := client.RbacV1().ClusterRoleBindings().Create(&r) + _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &r) if err != nil { return fmt.Errorf("failed to make request: %v", err) } } for _, r := range b.roleBindings { - _, err := client.RbacV1().RoleBindings(r.Namespace).Create(&r) + _, err := client.RbacV1().RoleBindings(r.Namespace).Create(context.TODO(), &r) if err != nil { return fmt.Errorf("failed to make request: %v", err) } @@ -652,7 +652,7 @@ func TestBootstrapping(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{BearerToken: superUser, Host: s.URL}) - watcher, err := clientset.RbacV1().ClusterRoles().Watch(metav1.ListOptions{ResourceVersion: "0"}) + watcher, err := clientset.RbacV1().ClusterRoles().Watch(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -668,7 +668,7 @@ func TestBootstrapping(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - clusterRoles, err := clientset.RbacV1().ClusterRoles().List(metav1.ListOptions{}) + clusterRoles, err := clientset.RbacV1().ClusterRoles().List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -715,7 +715,7 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { // Modify the default RBAC discovery ClusterRoleBidnings to look more like the defaults that // existed prior to v1.14, but with user modifications. t.Logf("Modifying default `system:discovery` ClusterRoleBinding") - discRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get("system:discovery", metav1.GetOptions{}) + discRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:discovery", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:discovery` ClusterRoleBinding: %v", err) } @@ -728,21 +728,21 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { APIGroup: "rbac.authorization.k8s.io", }, } - if discRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(discRoleBinding); err != nil { + if discRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), discRoleBinding); err != nil { t.Fatalf("Failed to update `system:discovery` ClusterRoleBinding: %v", err) } t.Logf("Modifying default `system:basic-user` ClusterRoleBinding") - basicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get("system:basic-user", metav1.GetOptions{}) + basicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:basic-user", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:basic-user` ClusterRoleBinding: %v", err) } basicUserRoleBinding.Annotations["rbac.authorization.kubernetes.io/autoupdate"] = "false" basicUserRoleBinding.Annotations["rbac-discovery-upgrade-test"] = "pass" - if basicUserRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(basicUserRoleBinding); err != nil { + if basicUserRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), basicUserRoleBinding); err != nil { t.Fatalf("Failed to update `system:basic-user` ClusterRoleBinding: %v", err) } t.Logf("Deleting default `system:public-info-viewer` ClusterRoleBinding") - if err = client.RbacV1().ClusterRoleBindings().Delete("system:public-info-viewer", &metav1.DeleteOptions{}); err != nil { + if err = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "system:public-info-viewer", &metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete `system:public-info-viewer` ClusterRoleBinding: %v", err) } @@ -756,21 +756,21 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { client = clientset.NewForConfigOrDie(&restclient.Config{BearerToken: superUser, Host: s.URL}) - newDiscRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get("system:discovery", metav1.GetOptions{}) + newDiscRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:discovery", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:discovery` ClusterRoleBinding: %v", err) } if !reflect.DeepEqual(newDiscRoleBinding, discRoleBinding) { t.Errorf("`system:discovery` should have been unmodified. Wanted: %v, got %v", discRoleBinding, newDiscRoleBinding) } - newBasicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get("system:basic-user", metav1.GetOptions{}) + newBasicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:basic-user", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:basic-user` ClusterRoleBinding: %v", err) } if !reflect.DeepEqual(newBasicUserRoleBinding, basicUserRoleBinding) { t.Errorf("`system:basic-user` should have been unmodified. Wanted: %v, got %v", basicUserRoleBinding, newBasicUserRoleBinding) } - publicInfoViewerRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get("system:public-info-viewer", metav1.GetOptions{}) + publicInfoViewerRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:public-info-viewer", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:public-info-viewer` ClusterRoleBinding: %v", err) } diff --git a/test/integration/auth/svcaccttoken_test.go b/test/integration/auth/svcaccttoken_test.go index a7ab4fc4068..e090b1e152e 100644 --- a/test/integration/auth/svcaccttoken_test.go +++ b/test/integration/auth/svcaccttoken_test.go @@ -17,6 +17,7 @@ limitations under the License. package auth import ( + "context" "crypto/ecdsa" "encoding/base64" "encoding/json" @@ -88,13 +89,13 @@ func TestServiceAccountTokenCreate(t *testing.T) { serviceaccount.NewValidator(serviceaccountgetter.NewGetterFromClient( gcs, v1listers.NewSecretLister(newIndexer(func(namespace, name string) (interface{}, error) { - return gcs.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) + return gcs.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) })), v1listers.NewServiceAccountLister(newIndexer(func(namespace, name string) (interface{}, error) { - return gcs.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + return gcs.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) })), v1listers.NewPodLister(newIndexer(func(namespace, name string) (interface{}, error) { - return gcs.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + return gcs.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) })), )), ), @@ -161,13 +162,13 @@ func TestServiceAccountTokenCreate(t *testing.T) { }, } - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } sa, delSvcAcct := createDeleteSvcAcct(t, cs, sa) defer delSvcAcct() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) if err != nil { t.Fatalf("err: %v", err) } @@ -199,13 +200,13 @@ func TestServiceAccountTokenCreate(t *testing.T) { }, } - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } sa, del := createDeleteSvcAcct(t, cs, sa) defer del() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { t.Fatalf("expected err creating token bound to nonexistant pod but got: %#v", resp) } pod, delPod := createDeletePod(t, cs, pod) @@ -213,17 +214,17 @@ func TestServiceAccountTokenCreate(t *testing.T) { // right uid treq.Spec.BoundObjectRef.UID = pod.UID - if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil { + if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { t.Fatalf("err: %v", err) } // wrong uid treq.Spec.BoundObjectRef.UID = wrongUID - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { t.Fatalf("expected err creating token bound to pod with wrong uid but got: %#v", resp) } // no uid treq.Spec.BoundObjectRef.UID = noUID - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) if err != nil { t.Fatalf("err: %v", err) } @@ -262,13 +263,13 @@ func TestServiceAccountTokenCreate(t *testing.T) { }, } - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } sa, del := createDeleteSvcAcct(t, cs, sa) defer del() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { t.Fatalf("expected err creating token bound to nonexistant secret but got: %#v", resp) } secret, delSecret := createDeleteSecret(t, cs, secret) @@ -276,17 +277,17 @@ func TestServiceAccountTokenCreate(t *testing.T) { // right uid treq.Spec.BoundObjectRef.UID = secret.UID - if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil { + if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { t.Fatalf("err: %v", err) } // wrong uid treq.Spec.BoundObjectRef.UID = wrongUID - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { t.Fatalf("expected err creating token bound to secret with wrong uid but got: %#v", resp) } // no uid treq.Spec.BoundObjectRef.UID = noUID - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) if err != nil { t.Fatalf("err: %v", err) } @@ -320,7 +321,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { _, del = createDeletePod(t, cs, otherpod) defer del() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err == nil { t.Fatalf("expected err but got: %#v", resp) } }) @@ -335,7 +336,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { sa, del := createDeleteSvcAcct(t, cs, sa) defer del() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) if err != nil { t.Fatalf("err: %v", err) } @@ -374,7 +375,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { sa, del := createDeleteSvcAcct(t, cs, sa) defer del() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) if err != nil { t.Fatalf("err: %v", err) } @@ -390,7 +391,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { sa, del := createDeleteSvcAcct(t, cs, sa) defer del() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq) if err != nil { t.Fatalf("err: %v", err) } @@ -418,7 +419,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer originalDelPod() treq.Spec.BoundObjectRef.UID = originalPod.UID - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { t.Fatalf("err: %v", err) } @@ -459,7 +460,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer originalDelSecret() treq.Spec.BoundObjectRef.UID = originalSecret.UID - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { t.Fatalf("err: %v", err) } @@ -502,7 +503,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer originalDelSecret() treq.Spec.BoundObjectRef.UID = originalSecret.UID - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { t.Fatalf("err: %v", err) } @@ -546,7 +547,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer originalDelSecret() treq.Spec.BoundObjectRef.UID = originalSecret.UID - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(sa.Name, treq); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq); err != nil { t.Fatalf("err: %v", err) } @@ -571,7 +572,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { func doTokenReview(t *testing.T, cs clientset.Interface, treq *authenticationv1.TokenRequest, expectErr bool) authenticationv1.UserInfo { t.Helper() - trev, err := cs.AuthenticationV1().TokenReviews().Create(&authenticationv1.TokenReview{ + trev, err := cs.AuthenticationV1().TokenReviews().Create(context.TODO(), &authenticationv1.TokenReview{ Spec: authenticationv1.TokenReviewSpec{ Token: treq.Status.Token, }, @@ -642,7 +643,7 @@ func getPayload(t *testing.T, b string) string { func createDeleteSvcAcct(t *testing.T, cs clientset.Interface, sa *v1.ServiceAccount) (*v1.ServiceAccount, func()) { t.Helper() - sa, err := cs.CoreV1().ServiceAccounts(sa.Namespace).Create(sa) + sa, err := cs.CoreV1().ServiceAccounts(sa.Namespace).Create(context.TODO(), sa) if err != nil { t.Fatalf("err: %v", err) } @@ -653,7 +654,7 @@ func createDeleteSvcAcct(t *testing.T, cs clientset.Interface, sa *v1.ServiceAcc return } done = true - if err := cs.CoreV1().ServiceAccounts(sa.Namespace).Delete(sa.Name, nil); err != nil { + if err := cs.CoreV1().ServiceAccounts(sa.Namespace).Delete(context.TODO(), sa.Name, nil); err != nil { t.Fatalf("err: %v", err) } } @@ -661,7 +662,7 @@ func createDeleteSvcAcct(t *testing.T, cs clientset.Interface, sa *v1.ServiceAcc func createDeletePod(t *testing.T, cs clientset.Interface, pod *v1.Pod) (*v1.Pod, func()) { t.Helper() - pod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod) + pod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) if err != nil { t.Fatalf("err: %v", err) } @@ -672,7 +673,7 @@ func createDeletePod(t *testing.T, cs clientset.Interface, pod *v1.Pod) (*v1.Pod return } done = true - if err := cs.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { + if err := cs.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, nil); err != nil { t.Fatalf("err: %v", err) } } @@ -680,7 +681,7 @@ func createDeletePod(t *testing.T, cs clientset.Interface, pod *v1.Pod) (*v1.Pod func createDeleteSecret(t *testing.T, cs clientset.Interface, sec *v1.Secret) (*v1.Secret, func()) { t.Helper() - sec, err := cs.CoreV1().Secrets(sec.Namespace).Create(sec) + sec, err := cs.CoreV1().Secrets(sec.Namespace).Create(context.TODO(), sec) if err != nil { t.Fatalf("err: %v", err) } @@ -691,7 +692,7 @@ func createDeleteSecret(t *testing.T, cs clientset.Interface, sec *v1.Secret) (* return } done = true - if err := cs.CoreV1().Secrets(sec.Namespace).Delete(sec.Name, nil); err != nil { + if err := cs.CoreV1().Secrets(sec.Namespace).Delete(context.TODO(), sec.Name, nil); err != nil { t.Fatalf("err: %v", err) } } diff --git a/test/integration/client/client_test.go b/test/integration/client/client_test.go index e75d32b8193..d6b792effaa 100644 --- a/test/integration/client/client_test.go +++ b/test/integration/client/client_test.go @@ -59,7 +59,7 @@ func TestClient(t *testing.T) { t.Errorf("expected %#v, got %#v", e, a) } - pods, err := client.CoreV1().Pods("default").List(metav1.ListOptions{}) + pods, err := client.CoreV1().Pods("default").List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -82,14 +82,14 @@ func TestClient(t *testing.T) { }, } - got, err := client.CoreV1().Pods("default").Create(pod) + got, err := client.CoreV1().Pods("default").Create(context.TODO(), pod) if err == nil { t.Fatalf("unexpected non-error: %v", got) } // get a created pod pod.Spec.Containers[0].Image = "an-image" - got, err = client.CoreV1().Pods("default").Create(pod) + got, err = client.CoreV1().Pods("default").Create(context.TODO(), pod) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -98,7 +98,7 @@ func TestClient(t *testing.T) { } // pod is shown, but not scheduled - pods, err = client.CoreV1().Pods("default").List(metav1.ListOptions{}) + pods, err = client.CoreV1().Pods("default").List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -151,7 +151,7 @@ func TestAtomicPut(t *testing.T) { }, } rcs := c.CoreV1().ReplicationControllers("default") - rc, err := rcs.Create(&rcBody) + rc, err := rcs.Create(context.TODO(), &rcBody) if err != nil { t.Fatalf("Failed creating atomicRC: %v", err) } @@ -168,7 +168,7 @@ func TestAtomicPut(t *testing.T) { go func(l, v string) { defer wg.Done() for { - tmpRC, err := rcs.Get(rc.Name, metav1.GetOptions{}) + tmpRC, err := rcs.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Error getting atomicRC: %v", err) continue @@ -180,7 +180,7 @@ func TestAtomicPut(t *testing.T) { tmpRC.Spec.Selector[l] = v tmpRC.Spec.Template.Labels[l] = v } - _, err = rcs.Update(tmpRC) + _, err = rcs.Update(context.TODO(), tmpRC) if err != nil { if apierrors.IsConflict(err) { // This is what we expect. @@ -194,7 +194,7 @@ func TestAtomicPut(t *testing.T) { }(label, value) } wg.Wait() - rc, err = rcs.Get(rc.Name, metav1.GetOptions{}) + rc, err = rcs.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed getting atomicRC after writers are complete: %v", err) } @@ -227,7 +227,7 @@ func TestPatch(t *testing.T) { }, } pods := c.CoreV1().Pods("default") - _, err := pods.Create(&podBody) + _, err := pods.Create(context.TODO(), &podBody) if err != nil { t.Fatalf("Failed creating patchpods: %v", err) } @@ -286,7 +286,7 @@ func TestPatch(t *testing.T) { if err != nil { t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err) } - pod, err := pods.Get(name, metav1.GetOptions{}) + pod, err := pods.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed getting patchpod: %v", err) } @@ -299,7 +299,7 @@ func TestPatch(t *testing.T) { if err != nil { t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err) } - pod, err = pods.Get(name, metav1.GetOptions{}) + pod, err = pods.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed getting patchpod: %v", err) } @@ -312,7 +312,7 @@ func TestPatch(t *testing.T) { if err != nil { t.Fatalf("Failed updating patchpod with patch type %s: %v", k, err) } - pod, err = pods.Get(name, metav1.GetOptions{}) + pod, err = pods.Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed getting patchpod: %v", err) } @@ -357,7 +357,7 @@ func TestPatchWithCreateOnUpdate(t *testing.T) { } // Create the endpoint (endpoints set AllowCreateOnUpdate=true) to get a UID and resource version - createdEndpoint, err := c.CoreV1().Endpoints("default").Update(endpointTemplate) + createdEndpoint, err := c.CoreV1().Endpoints("default").Update(context.TODO(), endpointTemplate) if err != nil { t.Fatalf("Failed creating endpoint: %v", err) } @@ -476,7 +476,7 @@ func TestSingleWatch(t *testing.T) { rv1 := "" for i := 0; i < 10; i++ { event := mkEvent(i) - got, err := client.CoreV1().Events("default").Create(event) + got, err := client.CoreV1().Events("default").Create(context.TODO(), event) if err != nil { t.Fatalf("Failed creating event %#q: %v", event, err) } @@ -569,7 +569,7 @@ func TestMultiWatch(t *testing.T) { for i := 0; i < watcherCount; i++ { watchesStarted.Add(1) name := fmt.Sprintf("multi-watch-%v", i) - got, err := client.CoreV1().Pods("default").Create(&v1.Pod{ + got, err := client.CoreV1().Pods("default").Create(context.TODO(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: labels.Set{"watchlabel": name}, @@ -590,7 +590,7 @@ func TestMultiWatch(t *testing.T) { LabelSelector: labels.Set{"watchlabel": name}.AsSelector().String(), ResourceVersion: rv, } - w, err := client.CoreV1().Pods("default").Watch(options) + w, err := client.CoreV1().Pods("default").Watch(context.TODO(), options) if err != nil { panic(fmt.Sprintf("watch error for %v: %v", name, err)) } @@ -639,7 +639,7 @@ func TestMultiWatch(t *testing.T) { if !ok { return } - if _, err := client.CoreV1().Events("default").Create(dummyEvent(i)); err != nil { + if _, err := client.CoreV1().Events("default").Create(context.TODO(), dummyEvent(i)); err != nil { panic(fmt.Sprintf("couldn't make an event: %v", err)) } changeMade <- i @@ -676,7 +676,7 @@ func TestMultiWatch(t *testing.T) { return } name := fmt.Sprintf("unrelated-%v", i) - _, err := client.CoreV1().Pods("default").Create(&v1.Pod{ + _, err := client.CoreV1().Pods("default").Create(context.TODO(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -710,13 +710,13 @@ func TestMultiWatch(t *testing.T) { for i := 0; i < watcherCount; i++ { go func(i int) { name := fmt.Sprintf("multi-watch-%v", i) - pod, err := client.CoreV1().Pods("default").Get(name, metav1.GetOptions{}) + pod, err := client.CoreV1().Pods("default").Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { panic(fmt.Sprintf("Couldn't get %v: %v", name, err)) } pod.Spec.Containers[0].Image = imageutils.GetPauseImageName() sentTimes <- timePair{time.Now(), name} - if _, err := client.CoreV1().Pods("default").Update(pod); err != nil { + if _, err := client.CoreV1().Pods("default").Update(context.TODO(), pod); err != nil { panic(fmt.Sprintf("Couldn't make %v: %v", name, err)) } }(i) @@ -754,7 +754,7 @@ func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace s }, }, } - pod, err := c.CoreV1().Pods(namespace).Create(&podBody) + pod, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &podBody) if err != nil { t.Fatalf("Failed creating selflinktest pod: %v", err) } @@ -762,7 +762,7 @@ func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace s t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err) } - podList, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Errorf("Failed listing pods: %v", err) } diff --git a/test/integration/client/dynamic_client_test.go b/test/integration/client/dynamic_client_test.go index 930829c666d..27076038873 100644 --- a/test/integration/client/dynamic_client_test.go +++ b/test/integration/client/dynamic_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "fmt" "reflect" "testing" @@ -64,7 +65,7 @@ func TestDynamicClient(t *testing.T) { }, } - actual, err := client.CoreV1().Pods("default").Create(pod) + actual, err := client.CoreV1().Pods("default").Create(context.TODO(), pod) if err != nil { t.Fatalf("unexpected error when creating pod: %v", err) } @@ -109,7 +110,7 @@ func TestDynamicClient(t *testing.T) { t.Fatalf("unexpected error when deleting pod: %v", err) } - list, err := client.CoreV1().Pods("default").List(metav1.ListOptions{}) + list, err := client.CoreV1().Pods("default").List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error when listing pods: %v", err) } @@ -149,7 +150,7 @@ func TestDynamicClientWatch(t *testing.T) { rv1 := "" for i := 0; i < 10; i++ { event := mkEvent(i) - got, err := client.CoreV1().Events("default").Create(event) + got, err := client.CoreV1().Events("default").Create(context.TODO(), event) if err != nil { t.Fatalf("Failed creating event %#q: %v", event, err) } diff --git a/test/integration/configmap/configmap_test.go b/test/integration/configmap/configmap_test.go index 64df992e580..e0287112425 100644 --- a/test/integration/configmap/configmap_test.go +++ b/test/integration/configmap/configmap_test.go @@ -19,6 +19,7 @@ package configmap // This file tests use of the configMap API resource. import ( + "context" "testing" "k8s.io/api/core/v1" @@ -56,7 +57,7 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) }, } - if _, err := client.CoreV1().ConfigMaps(cfg.Namespace).Create(&cfg); err != nil { + if _, err := client.CoreV1().ConfigMaps(cfg.Namespace).Create(context.TODO(), &cfg); err != nil { t.Errorf("unable to create test configMap: %v", err) } defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name) @@ -111,14 +112,14 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) } pod.ObjectMeta.Name = "uses-configmap" - if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) } func deleteConfigMapOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.CoreV1().ConfigMaps(ns).Delete(name, nil); err != nil { + if err := c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), name, nil); err != nil { t.Errorf("unable to delete ConfigMap %v: %v", name, err) } } diff --git a/test/integration/cronjob/cronjob_test.go b/test/integration/cronjob/cronjob_test.go index 6f5d0116bdd..9bc52315707 100644 --- a/test/integration/cronjob/cronjob_test.go +++ b/test/integration/cronjob/cronjob_test.go @@ -17,6 +17,7 @@ limitations under the License. package cronjob import ( + "context" "fmt" "net/http/httptest" "testing" @@ -88,7 +89,7 @@ func newCronJob(name, namespace, schedule string) *batchv1beta1.CronJob { func cleanupCronJobs(t *testing.T, cjClient clientbatchv1beta1.CronJobInterface, name string) { deletePropagation := metav1.DeletePropagationForeground - err := cjClient.Delete(name, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation}) + err := cjClient.Delete(context.TODO(), name, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation}) if err != nil { t.Errorf("Failed to delete CronJob: %v", err) } @@ -96,7 +97,7 @@ func cleanupCronJobs(t *testing.T, cjClient clientbatchv1beta1.CronJobInterface, func validateJobAndPod(t *testing.T, clientSet clientset.Interface, namespace string) { if err := wait.PollImmediate(1*time.Second, 120*time.Second, func() (bool, error) { - jobs, err := clientSet.BatchV1().Jobs(namespace).List(metav1.ListOptions{}) + jobs, err := clientSet.BatchV1().Jobs(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list jobs: %v", err) } @@ -117,7 +118,7 @@ func validateJobAndPod(t *testing.T, clientSet clientset.Interface, namespace st } } - pods, err := clientSet.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + pods, err := clientSet.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } @@ -162,7 +163,7 @@ func TestCronJobLaunchesPodAndCleansUp(t *testing.T) { go cjc.Run(stopCh) go jc.Run(1, stopCh) - _, err := cjClient.Create(newCronJob(cronJobName, ns.Name, "* * * * ?")) + _, err := cjClient.Create(context.TODO(), newCronJob(cronJobName, ns.Name, "* * * * ?")) if err != nil { t.Fatalf("Failed to create CronJob: %v", err) } diff --git a/test/integration/daemonset/daemonset_test.go b/test/integration/daemonset/daemonset_test.go index 5c0add82f92..b81a123d6eb 100644 --- a/test/integration/daemonset/daemonset_test.go +++ b/test/integration/daemonset/daemonset_test.go @@ -143,7 +143,7 @@ func newDaemonSet(name, namespace string) *apps.DaemonSet { } func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet) { - ds, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{}) + ds, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Failed to get DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err) return @@ -159,14 +159,14 @@ func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet) // force update to avoid version conflict ds.ResourceVersion = "" - if ds, err = cs.AppsV1().DaemonSets(ds.Namespace).Update(ds); err != nil { + if ds, err = cs.AppsV1().DaemonSets(ds.Namespace).Update(context.TODO(), ds); err != nil { t.Errorf("Failed to update DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err) return } // Wait for the daemon set controller to kill all the daemon pods. if err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) { - updatedDS, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{}) + updatedDS, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -178,7 +178,7 @@ func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet) falseVar := false deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} - if err := cs.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions); err != nil { + if err := cs.AppsV1().DaemonSets(ds.Namespace).Delete(context.TODO(), ds.Name, deleteOptions); err != nil { t.Errorf("Failed to delete DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err) } } @@ -248,7 +248,7 @@ func newNode(name string, label map[string]string) *v1.Node { func addNodes(nodeClient corev1client.NodeInterface, startIndex, numNodes int, label map[string]string, t *testing.T) { for i := startIndex; i < startIndex+numNodes; i++ { - _, err := nodeClient.Create(newNode(fmt.Sprintf("node-%d", i), label)) + _, err := nodeClient.Create(context.TODO(), newNode(fmt.Sprintf("node-%d", i), label)) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -288,7 +288,7 @@ func validateDaemonSetPodsAndMarkReady( Phase: v1.PodRunning, Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}, } - _, err := podClient.UpdateStatus(podCopy) + _, err := podClient.UpdateStatus(context.TODO(), podCopy) if err != nil { return false, err } @@ -305,7 +305,7 @@ func validateDaemonSetPodsAndMarkReady( // gets unschedulable status. func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return false, nil } @@ -341,7 +341,7 @@ func waitForPodsCreated(podInformer cache.SharedIndexInformer, num int) error { func waitForDaemonSetAndControllerRevisionCreated(c clientset.Interface, name string, namespace string) error { return wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { - ds, err := c.AppsV1().DaemonSets(namespace).Get(name, metav1.GetOptions{}) + ds, err := c.AppsV1().DaemonSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } @@ -349,7 +349,7 @@ func waitForDaemonSetAndControllerRevisionCreated(c clientset.Interface, name st return false, nil } - revs, err := c.AppsV1().ControllerRevisions(namespace).List(metav1.ListOptions{}) + revs, err := c.AppsV1().ControllerRevisions(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -375,7 +375,7 @@ func hashAndNameForDaemonSet(ds *apps.DaemonSet) (string, string) { } func validateDaemonSetCollisionCount(dsClient appstyped.DaemonSetInterface, dsName string, expCount int32, t *testing.T) { - ds, err := dsClient.Get(dsName, metav1.GetOptions{}) + ds, err := dsClient.Get(context.TODO(), dsName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to look up DaemonSet: %v", err) } @@ -391,7 +391,7 @@ func validateDaemonSetStatus( expectedNumberReady int32, t *testing.T) { if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { - ds, err := dsClient.Get(dsName, metav1.GetOptions{}) + ds, err := dsClient.Get(context.TODO(), dsName, metav1.GetOptions{}) if err != nil { return false, err } @@ -404,12 +404,12 @@ func validateDaemonSetStatus( func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string, updateFunc func(*apps.DaemonSet)) *apps.DaemonSet { var ds *apps.DaemonSet if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newDS, err := dsClient.Get(dsName, metav1.GetOptions{}) + newDS, err := dsClient.Get(context.TODO(), dsName, metav1.GetOptions{}) if err != nil { return err } updateFunc(newDS) - ds, err = dsClient.Update(newDS) + ds, err = dsClient.Update(context.TODO(), newDS) return err }); err != nil { t.Fatalf("Failed to update DaemonSet: %v", err) @@ -447,13 +447,13 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(ds) + _, err := dsClient.Create(context.TODO(), ds) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } defer cleanupDaemonSets(t, clientset, ds) - _, err = nodeClient.Create(newNode("single-node", nil)) + _, err = nodeClient.Create(context.TODO(), newNode("single-node", nil)) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -486,7 +486,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(ds) + _, err := dsClient.Create(context.TODO(), ds) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -550,7 +550,7 @@ func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) { }, } - _, err := dsClient.Create(ds) + _, err := dsClient.Create(context.TODO(), ds) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -591,7 +591,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(ds) + _, err := dsClient.Create(context.TODO(), ds) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -602,7 +602,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { node.Status.Conditions = []v1.NodeCondition{ {Type: v1.NodeReady, Status: v1.ConditionFalse}, } - _, err = nodeClient.Create(node) + _, err = nodeClient.Create(context.TODO(), node) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -639,7 +639,7 @@ func TestInsufficientCapacityNode(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.Template.Spec = resourcePodSpec("", "120M", "75m") ds.Spec.UpdateStrategy = *strategy - ds, err := dsClient.Create(ds) + ds, err := dsClient.Create(context.TODO(), ds) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -648,7 +648,7 @@ func TestInsufficientCapacityNode(t *testing.T) { node := newNode("node-with-limited-memory", nil) node.Status.Allocatable = allocatableResources("100M", "200m") - _, err = nodeClient.Create(node) + _, err = nodeClient.Create(context.TODO(), node) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -667,7 +667,7 @@ func TestInsufficientCapacityNode(t *testing.T) { node1 := newNode("node-with-enough-memory", nil) node1.Status.Allocatable = allocatableResources("200M", "2000m") - _, err = nodeClient.Create(node1) + _, err = nodeClient.Create(context.TODO(), node1) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -700,7 +700,7 @@ func TestLaunchWithHashCollision(t *testing.T) { setupScheduler(ctx, t, clientset, informers) // Create single node - _, err := nodeClient.Create(newNode("single-node", nil)) + _, err := nodeClient.Create(context.TODO(), newNode("single-node", nil)) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -714,7 +714,7 @@ func TestLaunchWithHashCollision(t *testing.T) { MaxUnavailable: &oneIntString, }, } - ds, err := dsClient.Create(orgDs) + ds, err := dsClient.Create(context.TODO(), orgDs) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -725,7 +725,7 @@ func TestLaunchWithHashCollision(t *testing.T) { t.Fatalf("Failed to create DaemonSet: %v", err) } - ds, err = dsClient.Get(ds.Name, metav1.GetOptions{}) + ds, err = dsClient.Get(context.TODO(), ds.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get DaemonSet: %v", err) } @@ -736,7 +736,7 @@ func TestLaunchWithHashCollision(t *testing.T) { // Look up the ControllerRevision for the DaemonSet _, name := hashAndNameForDaemonSet(ds) - revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{}) + revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil || revision == nil { t.Fatalf("Failed to look up ControllerRevision: %v", err) } @@ -758,7 +758,7 @@ func TestLaunchWithHashCollision(t *testing.T) { Data: revision.Data, Revision: revision.Revision + 1, } - _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(newRevision) + _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), newRevision) if err != nil { t.Fatalf("Failed to create ControllerRevision: %v", err) } @@ -811,7 +811,7 @@ func TestTaintedNode(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - ds, err := dsClient.Create(ds) + ds, err := dsClient.Create(context.TODO(), ds) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -820,13 +820,13 @@ func TestTaintedNode(t *testing.T) { nodeWithTaint := newNode("node-with-taint", nil) nodeWithTaint.Spec.Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}} - _, err = nodeClient.Create(nodeWithTaint) + _, err = nodeClient.Create(context.TODO(), nodeWithTaint) if err != nil { t.Fatalf("Failed to create nodeWithTaint: %v", err) } nodeWithoutTaint := newNode("node-without-taint", nil) - _, err = nodeClient.Create(nodeWithoutTaint) + _, err = nodeClient.Create(context.TODO(), nodeWithoutTaint) if err != nil { t.Fatalf("Failed to create nodeWithoutTaint: %v", err) } @@ -835,13 +835,13 @@ func TestTaintedNode(t *testing.T) { validateDaemonSetStatus(dsClient, ds.Name, 1, t) // remove taint from nodeWithTaint - nodeWithTaint, err = nodeClient.Get("node-with-taint", metav1.GetOptions{}) + nodeWithTaint, err = nodeClient.Get(context.TODO(), "node-with-taint", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to retrieve nodeWithTaint: %v", err) } nodeWithTaintCopy := nodeWithTaint.DeepCopy() nodeWithTaintCopy.Spec.Taints = []v1.Taint{} - _, err = nodeClient.Update(nodeWithTaintCopy) + _, err = nodeClient.Update(context.TODO(), nodeWithTaintCopy) if err != nil { t.Fatalf("Failed to update nodeWithTaint: %v", err) } @@ -877,7 +877,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy ds.Spec.Template.Spec.HostNetwork = true - _, err := dsClient.Create(ds) + _, err := dsClient.Create(context.TODO(), ds) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -894,7 +894,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) { }, } - _, err = nodeClient.Create(node) + _, err = nodeClient.Create(context.TODO(), node) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -912,7 +912,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) { }, } - _, err = nodeClient.Create(nodeNU) + _, err = nodeClient.Create(context.TODO(), nodeNU) if err != nil { t.Fatalf("Failed to create node: %v", err) } diff --git a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go index a22ca2506cc..74ef786b7f2 100644 --- a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go +++ b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go @@ -17,6 +17,7 @@ limitations under the License. package defaulttolerationseconds import ( + "context" "testing" "k8s.io/api/core/v1" @@ -56,7 +57,7 @@ func TestAdmission(t *testing.T) { }, } - updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(&pod) + updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), &pod) if err != nil { t.Fatalf("error creating pod: %v", err) } diff --git a/test/integration/deployment/deployment_test.go b/test/integration/deployment/deployment_test.go index b521a9bc530..eeeb4ca0bd4 100644 --- a/test/integration/deployment/deployment_test.go +++ b/test/integration/deployment/deployment_test.go @@ -17,6 +17,7 @@ limitations under the License. package deployment import ( + "context" "fmt" "strings" "testing" @@ -46,7 +47,7 @@ func TestNewDeployment(t *testing.T) { tester.deployment.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -92,7 +93,7 @@ func TestNewDeployment(t *testing.T) { if err != nil { t.Fatalf("failed to parse deployment %s selector: %v", name, err) } - pods, err := c.CoreV1().Pods(ns.Name).List(metav1.ListOptions{LabelSelector: selector.String()}) + pods, err := c.CoreV1().Pods(ns.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { t.Fatalf("failed to list pods of deployment %s: %v", name, err) } @@ -136,7 +137,7 @@ func TestDeploymentRollingUpdate(t *testing.T) { // Create a deployment. var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -217,21 +218,21 @@ func TestDeploymentSelectorImmutability(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, int32(20))} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create apps/v1 deployment %s: %v", tester.deployment.Name, err) } // test to ensure apps/v1 selector is immutable newSelectorLabels := map[string]string{"name_apps_v1beta1": "test_apps_v1beta1"} - deploymentAppsV1, err := c.AppsV1().Deployments(ns.Name).Get(name, metav1.GetOptions{}) + deploymentAppsV1, err := c.AppsV1().Deployments(ns.Name).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get apps/v1 deployment %s: %v", name, err) } newSelectorLabels = map[string]string{"name_apps_v1": "test_apps_v1"} deploymentAppsV1.Spec.Selector.MatchLabels = newSelectorLabels deploymentAppsV1.Spec.Template.Labels = newSelectorLabels - _, err = c.AppsV1().Deployments(ns.Name).Update(deploymentAppsV1) + _, err = c.AppsV1().Deployments(ns.Name).Update(context.TODO(), deploymentAppsV1) if err == nil { t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1 deployment %s", deploymentAppsV1.Name) } @@ -257,7 +258,7 @@ func TestPausedDeployment(t *testing.T) { tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -358,7 +359,7 @@ func TestScalePausedDeployment(t *testing.T) { tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -439,7 +440,7 @@ func TestDeploymentHashCollision(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -473,7 +474,7 @@ func TestDeploymentHashCollision(t *testing.T) { // Expect deployment collision counter to increment if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - d, err := c.AppsV1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{}) + d, err := c.AppsV1().Deployments(ns.Name).Get(context.TODO(), tester.deployment.Name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -542,7 +543,7 @@ func TestFailedDeployment(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} tester.deployment.Spec.ProgressDeadlineSeconds = &three var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -600,7 +601,7 @@ func TestOverlappingDeployments(t *testing.T) { var err error var rss []*apps.ReplicaSet for _, tester := range testers { - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) dname := tester.deployment.Name if err != nil { t.Fatalf("failed to create deployment %q: %v", dname, err) @@ -646,7 +647,7 @@ func TestOverlappingDeployments(t *testing.T) { // Verify replicaset of both deployments has updated number of replicas for i, tester := range testers { - rs, err := c.AppsV1().ReplicaSets(ns.Name).Get(rss[i].Name, metav1.GetOptions{}) + rs, err := c.AppsV1().ReplicaSets(ns.Name).Get(context.TODO(), rss[i].Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get replicaset %q: %v", rss[i].Name, err) } @@ -676,7 +677,7 @@ func TestScaledRolloutDeployment(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)} tester.deployment.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3) tester.deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", name, err) } @@ -706,7 +707,7 @@ func TestScaledRolloutDeployment(t *testing.T) { } // Verify the deployment has minimum available replicas after 2nd rollout - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Get(name, metav1.GetOptions{}) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get deployment %q: %v", name, err) } @@ -716,7 +717,7 @@ func TestScaledRolloutDeployment(t *testing.T) { } // Wait for old replicaset of 1st rollout to have desired replicas - firstRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(firstRS.Name, metav1.GetOptions{}) + firstRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(context.TODO(), firstRS.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get replicaset %q: %v", firstRS.Name, err) } @@ -757,7 +758,7 @@ func TestScaledRolloutDeployment(t *testing.T) { } rss := []*apps.ReplicaSet{firstRS, secondRS, thirdRS} for _, curRS := range rss { - curRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{}) + curRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(context.TODO(), curRS.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err) } @@ -783,7 +784,7 @@ func TestScaledRolloutDeployment(t *testing.T) { } // Verify the deployment has minimum available replicas after 4th rollout - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Get(name, metav1.GetOptions{}) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get deployment %q: %v", name, err) } @@ -793,7 +794,7 @@ func TestScaledRolloutDeployment(t *testing.T) { } // Wait for old replicaset of 3rd rollout to have desired replicas - thirdRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(thirdRS.Name, metav1.GetOptions{}) + thirdRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(context.TODO(), thirdRS.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get replicaset %q: %v", thirdRS.Name, err) } @@ -834,7 +835,7 @@ func TestScaledRolloutDeployment(t *testing.T) { } rss = []*apps.ReplicaSet{thirdRS, fourthRS, fifthRS} for _, curRS := range rss { - curRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{}) + curRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(context.TODO(), curRS.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err) } @@ -861,7 +862,7 @@ func TestSpecReplicasChange(t *testing.T) { tester.deployment.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType tester.deployment.Spec.Strategy.RollingUpdate = nil var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -919,7 +920,7 @@ func TestDeploymentAvailableCondition(t *testing.T) { // progressDeadlineSeconds must be greater than minReadySeconds tester.deployment.Spec.ProgressDeadlineSeconds = pointer.Int32Ptr(7200) var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1002,7 +1003,7 @@ func testRSControllerRefPatch(t *testing.T, tester *deploymentTester, rs *apps.R } if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{}) + newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -1011,7 +1012,7 @@ func testRSControllerRefPatch(t *testing.T, tester *deploymentTester, rs *apps.R t.Fatalf("failed to wait for controllerRef of the replicaset %q to become nil: %v", rs.Name, err) } - newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{}) + newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to obtain replicaset %q: %v", rs.Name, err) } @@ -1036,7 +1037,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) { replicas := int32(1) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1084,11 +1085,11 @@ func testScalingUsingScaleSubresource(t *testing.T, tester *deploymentTester, re ns := tester.deployment.Namespace deploymentName := tester.deployment.Name deploymentClient := tester.c.AppsV1().Deployments(ns) - deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{}) + deployment, err := deploymentClient.Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err) } - scale, err := tester.c.AppsV1().Deployments(ns).GetScale(deploymentName, metav1.GetOptions{}) + scale, err := tester.c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain scale subresource for deployment %q: %v", deploymentName, err) } @@ -1097,18 +1098,18 @@ func testScalingUsingScaleSubresource(t *testing.T, tester *deploymentTester, re } if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - scale, err := tester.c.AppsV1().Deployments(ns).GetScale(deploymentName, metav1.GetOptions{}) + scale, err := tester.c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return err } scale.Spec.Replicas = replicas - _, err = tester.c.AppsV1().Deployments(ns).UpdateScale(deploymentName, scale) + _, err = tester.c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deploymentName, scale) return err }); err != nil { t.Fatalf("Failed to set .Spec.Replicas of scale subresource for deployment %q: %v", deploymentName, err) } - deployment, err = deploymentClient.Get(deploymentName, metav1.GetOptions{}) + deployment, err = deploymentClient.Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err) } @@ -1128,7 +1129,7 @@ func TestDeploymentScaleSubresource(t *testing.T) { replicas := int32(2) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1172,7 +1173,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { replicas := int32(1) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} var err error - tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1226,7 +1227,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { // Wait for the controllerRef of the replicaset to become nil rsClient := tester.c.AppsV1().ReplicaSets(ns.Name) if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - rs, err = rsClient.Get(rs.Name, metav1.GetOptions{}) + rs, err = rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -1264,7 +1265,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { // Wait for the deployment to adopt the old replicaset if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - rs, err := rsClient.Get(rs.Name, metav1.GetOptions{}) + rs, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go index dc2b3a13f1c..afbd5e40f3d 100644 --- a/test/integration/deployment/util.go +++ b/test/integration/deployment/util.go @@ -17,6 +17,7 @@ limitations under the License. package deployment import ( + "context" "fmt" "net/http/httptest" "sync" @@ -210,7 +211,7 @@ func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image str func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error { addPodConditionReady(pod, metav1.Now()) - _, err := c.CoreV1().Pods(ns).UpdateStatus(pod) + _, err := c.CoreV1().Pods(ns).UpdateStatus(context.TODO(), pod) return err } @@ -256,7 +257,7 @@ func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) { } func (d *deploymentTester) deploymentComplete() (bool, error) { - latest, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) + latest, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(context.TODO(), d.deployment.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -330,7 +331,7 @@ func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) er } func (d *deploymentTester) getNewReplicaSet() (*apps.ReplicaSet, error) { - deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) + deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(context.TODO(), d.deployment.Name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed retrieving deployment %s: %v", d.deployment.Name, err) } @@ -394,7 +395,7 @@ func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) { if err != nil { return nil, fmt.Errorf("failed to parse deployment selector: %v", err) } - pods, err := d.c.CoreV1().Pods(d.deployment.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + pods, err := d.c.CoreV1().Pods(d.deployment.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, fmt.Errorf("failed to list deployment pods, will retry later: %v", err) } @@ -446,7 +447,7 @@ func (d *deploymentTester) scaleDeployment(newReplicas int32) error { // waitForReadyReplicas waits for number of ready replicas to equal number of replicas. func (d *deploymentTester) waitForReadyReplicas() error { if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) + deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(context.TODO(), d.deployment.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err) } @@ -484,7 +485,7 @@ func (d *deploymentTester) markUpdatedPodsReadyWithoutComplete() error { // Verify all replicas fields of DeploymentStatus have desired count. // Immediately return an error when found a non-matching replicas field. func (d *deploymentTester) checkDeploymentStatusReplicasFields(replicas, updatedReplicas, readyReplicas, availableReplicas, unavailableReplicas int32) error { - deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) + deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(context.TODO(), d.deployment.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err) } diff --git a/test/integration/disruption/disruption_test.go b/test/integration/disruption/disruption_test.go index 11cab796dfc..b4ac572ba65 100644 --- a/test/integration/disruption/disruption_test.go +++ b/test/integration/disruption/disruption_test.go @@ -17,6 +17,7 @@ limitations under the License. package disruption import ( + "context" "fmt" "testing" "time" @@ -156,13 +157,13 @@ func TestPDBWithScaleSubresource(t *testing.T) { }, }, } - if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(nsName).Create(pdb); err != nil { + if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(nsName).Create(context.TODO(), pdb); err != nil { t.Errorf("Error creating PodDisruptionBudget: %v", err) } waitPDBStable(t, clientSet, 4, nsName, pdb.Name) - newPdb, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(nsName).Get(pdb.Name, metav1.GetOptions{}) + newPdb, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(nsName).Get(context.TODO(), pdb.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Error getting PodDisruptionBudget: %v", err) } @@ -197,18 +198,18 @@ func createPod(t *testing.T, name, namespace, labelValue string, clientSet clien }, }, } - _, err := clientSet.CoreV1().Pods(namespace).Create(pod) + _, err := clientSet.CoreV1().Pods(namespace).Create(context.TODO(), pod) if err != nil { t.Error(err) } addPodConditionReady(pod) - if _, err := clientSet.CoreV1().Pods(namespace).UpdateStatus(pod); err != nil { + if _, err := clientSet.CoreV1().Pods(namespace).UpdateStatus(context.TODO(), pod); err != nil { t.Error(err) } } func createNs(t *testing.T, name string, clientSet clientset.Interface) { - _, err := clientSet.CoreV1().Namespaces().Create(&v1.Namespace{ + _, err := clientSet.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -255,7 +256,7 @@ func newCustomResourceDefinition() *apiextensionsv1beta1.CustomResourceDefinitio func waitPDBStable(t *testing.T, clientSet clientset.Interface, podNum int32, ns, pdbName string) { if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) { - pdb, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns).Get(pdbName, metav1.GetOptions{}) + pdb, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), pdbName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/integration/dryrun/dryrun_test.go b/test/integration/dryrun/dryrun_test.go index d3edf307488..1d6acd77218 100644 --- a/test/integration/dryrun/dryrun_test.go +++ b/test/integration/dryrun/dryrun_test.go @@ -17,6 +17,7 @@ limitations under the License. package dryrun import ( + "context" "testing" v1 "k8s.io/api/core/v1" @@ -232,7 +233,7 @@ func TestDryRun(t *testing.T) { // create CRDs so we can make sure that custom resources do not get lost etcd.CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(s.ClientConfig), false, etcd.GetCustomResourceDefinitionData()...) - if _, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { + if _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { t.Fatal(err) } diff --git a/test/integration/etcd/crd_overlap_storage_test.go b/test/integration/etcd/crd_overlap_storage_test.go index 1966582a90f..a1cdc258b78 100644 --- a/test/integration/etcd/crd_overlap_storage_test.go +++ b/test/integration/etcd/crd_overlap_storage_test.go @@ -17,6 +17,7 @@ limitations under the License. package etcd import ( + "context" "encoding/json" "strings" "testing" @@ -85,7 +86,7 @@ func TestOverlappingCustomResourceAPIService(t *testing.T) { } // Verify APIServices can be listed - apiServices, err := apiServiceClient.APIServices().List(metav1.ListOptions{}) + apiServices, err := apiServiceClient.APIServices().List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -98,7 +99,7 @@ func TestOverlappingCustomResourceAPIService(t *testing.T) { } // Create a CRD defining an overlapping apiregistration.k8s.io apiservices resource with an incompatible schema - crdCRD, err := crdClient.CustomResourceDefinitions().Create(&apiextensionsv1.CustomResourceDefinition{ + crdCRD, err := crdClient.CustomResourceDefinitions().Create(context.TODO(), &apiextensionsv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ Name: "apiservices.apiregistration.k8s.io", Annotations: map[string]string{"api-approved.kubernetes.io": "unapproved, testing only"}, @@ -132,7 +133,7 @@ func TestOverlappingCustomResourceAPIService(t *testing.T) { // Wait until it is established if err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - crd, err := crdClient.CustomResourceDefinitions().Get(crdCRD.Name, metav1.GetOptions{}) + crd, err := crdClient.CustomResourceDefinitions().Get(context.TODO(), crdCRD.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -161,7 +162,7 @@ func TestOverlappingCustomResourceAPIService(t *testing.T) { } // Creating v1 succeeds (built-in validation, not CR validation) - testAPIService, err := apiServiceClient.APIServices().Create(&apiregistrationv1.APIService{ + testAPIService, err := apiServiceClient.APIServices().Create(context.TODO(), &apiregistrationv1.APIService{ ObjectMeta: metav1.ObjectMeta{Name: "v1.example.com"}, Spec: apiregistrationv1.APIServiceSpec{ Group: "example.com", @@ -173,7 +174,7 @@ func TestOverlappingCustomResourceAPIService(t *testing.T) { if err != nil { t.Fatal(err) } - err = apiServiceClient.APIServices().Delete(testAPIService.Name, &metav1.DeleteOptions{}) + err = apiServiceClient.APIServices().Delete(context.TODO(), testAPIService.Name, &metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } @@ -196,14 +197,14 @@ func TestOverlappingCustomResourceAPIService(t *testing.T) { } // Delete the overlapping CRD - err = crdClient.CustomResourceDefinitions().Delete(crdCRD.Name, &metav1.DeleteOptions{}) + err = crdClient.CustomResourceDefinitions().Delete(context.TODO(), crdCRD.Name, &metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } // Make sure the CRD deletion succeeds if err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - crd, err := crdClient.CustomResourceDefinitions().Get(crdCRD.Name, metav1.GetOptions{}) + crd, err := crdClient.CustomResourceDefinitions().Get(context.TODO(), crdCRD.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil } @@ -219,7 +220,7 @@ func TestOverlappingCustomResourceAPIService(t *testing.T) { // Make sure APIService objects are not removed time.Sleep(5 * time.Second) - finalAPIServices, err := apiServiceClient.APIServices().List(metav1.ListOptions{}) + finalAPIServices, err := apiServiceClient.APIServices().List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -243,7 +244,7 @@ func TestOverlappingCustomResourceCustomResourceDefinition(t *testing.T) { } // Verify CustomResourceDefinitions can be listed - crds, err := crdClient.CustomResourceDefinitions().List(metav1.ListOptions{}) + crds, err := crdClient.CustomResourceDefinitions().List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -256,7 +257,7 @@ func TestOverlappingCustomResourceCustomResourceDefinition(t *testing.T) { } // Create a CRD defining an overlapping apiregistration.k8s.io apiservices resource with an incompatible schema - crdCRD, err := crdClient.CustomResourceDefinitions().Create(&apiextensionsv1.CustomResourceDefinition{ + crdCRD, err := crdClient.CustomResourceDefinitions().Create(context.TODO(), &apiextensionsv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ Name: "customresourcedefinitions.apiextensions.k8s.io", Annotations: map[string]string{"api-approved.kubernetes.io": "unapproved, testing only"}, @@ -295,7 +296,7 @@ func TestOverlappingCustomResourceCustomResourceDefinition(t *testing.T) { // Wait until it is established if err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - crd, err := crdClient.CustomResourceDefinitions().Get(crdCRD.Name, metav1.GetOptions{}) + crd, err := crdClient.CustomResourceDefinitions().Get(context.TODO(), crdCRD.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -324,7 +325,7 @@ func TestOverlappingCustomResourceCustomResourceDefinition(t *testing.T) { } // Updating v1 succeeds (built-in validation, not CR validation) - _, err = crdClient.CustomResourceDefinitions().Patch(crdCRD.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"updated"}}}`)) + _, err = crdClient.CustomResourceDefinitions().Patch(context.TODO(), crdCRD.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"updated"}}}`)) if err != nil { t.Fatal(err) } @@ -347,14 +348,14 @@ func TestOverlappingCustomResourceCustomResourceDefinition(t *testing.T) { } // Delete the overlapping CRD - err = crdClient.CustomResourceDefinitions().Delete(crdCRD.Name, &metav1.DeleteOptions{}) + err = crdClient.CustomResourceDefinitions().Delete(context.TODO(), crdCRD.Name, &metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } // Make sure the CRD deletion succeeds if err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - crd, err := crdClient.CustomResourceDefinitions().Get(crdCRD.Name, metav1.GetOptions{}) + crd, err := crdClient.CustomResourceDefinitions().Get(context.TODO(), crdCRD.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil } @@ -370,7 +371,7 @@ func TestOverlappingCustomResourceCustomResourceDefinition(t *testing.T) { // Make sure other CustomResourceDefinition objects are not removed time.Sleep(5 * time.Second) - finalCRDs, err := crdClient.CustomResourceDefinitions().List(metav1.ListOptions{}) + finalCRDs, err := crdClient.CustomResourceDefinitions().List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/integration/etcd/etcd_cross_group_test.go b/test/integration/etcd/etcd_cross_group_test.go index aa99d61ec9a..bd8bdd1211b 100644 --- a/test/integration/etcd/etcd_cross_group_test.go +++ b/test/integration/etcd/etcd_cross_group_test.go @@ -44,7 +44,7 @@ func TestCrossGroupStorage(t *testing.T) { crossGroupResources := map[schema.GroupVersionKind][]Resource{} - master.Client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) + master.Client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) // Group by persisted GVK for _, resourceToPersist := range master.Resources { diff --git a/test/integration/etcd/etcd_storage_path_test.go b/test/integration/etcd/etcd_storage_path_test.go index 4bc8a6c51e8..a7e7e16f861 100644 --- a/test/integration/etcd/etcd_storage_path_test.go +++ b/test/integration/etcd/etcd_storage_path_test.go @@ -57,7 +57,7 @@ func TestEtcdStoragePath(t *testing.T) { client := &allClient{dynamicClient: master.Dynamic} - if _, err := master.Client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { + if _, err := master.Client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}); err != nil { t.Fatal(err) } diff --git a/test/integration/etcd/server.go b/test/integration/etcd/server.go index 2bc49d23f6c..8fbc76b3fb0 100644 --- a/test/integration/etcd/server.go +++ b/test/integration/etcd/server.go @@ -311,7 +311,7 @@ func CreateTestCRDs(t *testing.T, client apiextensionsclientset.Interface, skipC } func createTestCRD(t *testing.T, client apiextensionsclientset.Interface, skipCrdExistsInDiscovery bool, crd *apiextensionsv1beta1.CustomResourceDefinition) { - if _, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil { + if _, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd); err != nil { t.Fatalf("Failed to create %s CRD; %v", crd.Name, err) } if skipCrdExistsInDiscovery { @@ -329,7 +329,7 @@ func createTestCRD(t *testing.T, client apiextensionsclientset.Interface, skipCr func waitForEstablishedCRD(client apiextensionsclientset.Interface, name string) error { return wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/integration/events/events_test.go b/test/integration/events/events_test.go index ecda5e2d976..e6c64106d21 100644 --- a/test/integration/events/events_test.go +++ b/test/integration/events/events_test.go @@ -17,6 +17,7 @@ limitations under the License. package events import ( + "context" "testing" "time" @@ -69,7 +70,7 @@ func TestEventCompatibility(t *testing.T) { newBroadcaster.StartRecordingToSink(stopCh) newRecorder.Eventf(regarding, related, v1.EventTypeNormal, "memoryPressure", "killed", "memory pressure") err = wait.PollImmediate(100*time.Millisecond, 20*time.Second, func() (done bool, err error) { - v1beta1Events, err := client.EventsV1beta1().Events("").List(metav1.ListOptions{}) + v1beta1Events, err := client.EventsV1beta1().Events("").List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -78,7 +79,7 @@ func TestEventCompatibility(t *testing.T) { return false, nil } - events, err := client.CoreV1().Events("").List(metav1.ListOptions{}) + events, err := client.CoreV1().Events("").List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } diff --git a/test/integration/evictions/evictions_test.go b/test/integration/evictions/evictions_test.go index da3cd0e89bb..bcf80694bd0 100644 --- a/test/integration/evictions/evictions_test.go +++ b/test/integration/evictions/evictions_test.go @@ -17,6 +17,7 @@ limitations under the License. package evictions import ( + "context" "fmt" "net/http/httptest" @@ -81,12 +82,12 @@ func TestConcurrentEvictionRequests(t *testing.T) { podName := fmt.Sprintf(podNameFormat, i) pod := newPod(podName) - if _, err := clientSet.CoreV1().Pods(ns.Name).Create(pod); err != nil { + if _, err := clientSet.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod: %v", err) } addPodConditionReady(pod) - if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil { + if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod); err != nil { t.Fatal(err) } } @@ -94,7 +95,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { waitToObservePods(t, informers.Core().V1().Pods().Informer(), numOfEvictions, v1.PodRunning) pdb := newPDB() - if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil { + if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(context.TODO(), pdb); err != nil { t.Errorf("Failed to create PodDisruptionBudget: %v", err) } @@ -130,7 +131,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { // should not return here otherwise we would leak the pod } - _, err = clientSet.CoreV1().Pods(ns.Name).Get(podName, metav1.GetOptions{}) + _, err = clientSet.CoreV1().Pods(ns.Name).Get(context.TODO(), podName, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): atomic.AddUint32(&numberPodsEvicted, 1) @@ -144,7 +145,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { } // delete pod which still exists due to error - e := clientSet.CoreV1().Pods(ns.Name).Delete(podName, deleteOption) + e := clientSet.CoreV1().Pods(ns.Name).Delete(context.TODO(), podName, deleteOption) if e != nil { errCh <- e } @@ -156,7 +157,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { close(errCh) var errList []error - if err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil { + if err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Delete(context.TODO(), pdb.Name, deleteOption); err != nil { errList = append(errList, fmt.Errorf("Failed to delete PodDisruptionBudget: %v", err)) } for err := range errCh { @@ -195,25 +196,25 @@ func TestTerminalPodEviction(t *testing.T) { GracePeriodSeconds: &gracePeriodSeconds, } pod := newPod("test-terminal-pod1") - if _, err := clientSet.CoreV1().Pods(ns.Name).Create(pod); err != nil { + if _, err := clientSet.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod: %v", err) } addPodConditionSucceeded(pod) - if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil { + if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod); err != nil { t.Fatal(err) } waitToObservePods(t, informers.Core().V1().Pods().Informer(), 1, v1.PodSucceeded) pdb := newPDB() - if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil { + if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Create(context.TODO(), pdb); err != nil { t.Errorf("Failed to create PodDisruptionBudget: %v", err) } waitPDBStable(t, clientSet, 1, ns.Name, pdb.Name) - pdbList, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) + pdbList, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Error while listing pod disruption budget") } @@ -235,7 +236,7 @@ func TestTerminalPodEviction(t *testing.T) { if err != nil { t.Fatalf("Eviction of pod failed %v", err) } - pdbList, err = clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) + pdbList, err = clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Error while listing pod disruption budget") } @@ -245,7 +246,7 @@ func TestTerminalPodEviction(t *testing.T) { t.Fatalf("Expected the pdb generation to be of same value %v but got %v", newPdb.Status.ObservedGeneration, oldPdb.Status.ObservedGeneration) } - if err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil { + if err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns.Name).Delete(context.TODO(), pdb.Name, deleteOption); err != nil { t.Fatalf("Failed to delete pod disruption budget") } } @@ -382,7 +383,7 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN func waitPDBStable(t *testing.T, clientSet clientset.Interface, podNum int32, ns, pdbName string) { if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) { - pdb, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns).Get(pdbName, metav1.GetOptions{}) + pdb, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), pdbName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/integration/examples/apiserver_test.go b/test/integration/examples/apiserver_test.go index eebbac91e42..bfa8fbdef2f 100644 --- a/test/integration/examples/apiserver_test.go +++ b/test/integration/examples/apiserver_test.go @@ -111,7 +111,7 @@ func TestAggregatedAPIServer(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = aggregatorClient.ApiregistrationV1beta1().APIServices().Create(&apiregistrationv1beta1.APIService{ + _, err = aggregatorClient.ApiregistrationV1beta1().APIServices().Create(context.TODO(), &apiregistrationv1beta1.APIService{ ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.wardle.example.com"}, Spec: apiregistrationv1beta1.APIServiceSpec{ Service: &apiregistrationv1beta1.ServiceReference{ diff --git a/test/integration/examples/webhook_test.go b/test/integration/examples/webhook_test.go index d5ce5db6bc7..a22ed046877 100644 --- a/test/integration/examples/webhook_test.go +++ b/test/integration/examples/webhook_test.go @@ -17,6 +17,7 @@ limitations under the License. package apiserver import ( + "context" "sync/atomic" "testing" "time" @@ -63,7 +64,7 @@ func TestWebhookLoopback(t *testing.T) { }) fail := admissionv1beta1.Fail - _, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionv1beta1.MutatingWebhookConfiguration{ + _, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: "webhooktest.example.com"}, Webhooks: []admissionv1beta1.MutatingWebhook{{ Name: "webhooktest.example.com", @@ -82,7 +83,7 @@ func TestWebhookLoopback(t *testing.T) { } err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (done bool, err error) { - _, err = client.CoreV1().ConfigMaps("default").Create(&v1.ConfigMap{ + _, err = client.CoreV1().ConfigMaps("default").Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "webhook-test"}, Data: map[string]string{"invalid key": "value"}, }) diff --git a/test/integration/framework/perf_utils.go b/test/integration/framework/perf_utils.go index f18a3d262f3..5013f9f76ab 100644 --- a/test/integration/framework/perf_utils.go +++ b/test/integration/framework/perf_utils.go @@ -17,6 +17,7 @@ limitations under the License. package framework import ( + "context" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -89,7 +90,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error { for i := 0; i < numNodes; i++ { var err error for retry := 0; retry < retries; retry++ { - _, err = p.client.CoreV1().Nodes().Create(baseNode) + _, err = p.client.CoreV1().Nodes().Create(context.TODO(), baseNode) if err == nil || !testutils.IsRetryableAPIError(err) { break } @@ -124,7 +125,7 @@ func (p *IntegrationTestNodePreparer) CleanupNodes() error { klog.Fatalf("Error listing nodes: %v", err) } for i := range nodes.Items { - if err := p.client.CoreV1().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil { + if err := p.client.CoreV1().Nodes().Delete(context.TODO(), nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil { klog.Errorf("Error while deleting Node: %v", err) } } diff --git a/test/integration/framework/test_server.go b/test/integration/framework/test_server.go index 865a5f96c3a..8bb5f097345 100644 --- a/test/integration/framework/test_server.go +++ b/test/integration/framework/test_server.go @@ -156,10 +156,10 @@ func StartTestServer(t *testing.T, stopCh <-chan struct{}, setup TestServerSetup return false, nil } - if _, err := kubeClient.CoreV1().Namespaces().Get("default", metav1.GetOptions{}); err != nil { + if _, err := kubeClient.CoreV1().Namespaces().Get(context.TODO(), "default", metav1.GetOptions{}); err != nil { return false, nil } - if _, err := kubeClient.CoreV1().Namespaces().Get("kube-system", metav1.GetOptions{}); err != nil { + if _, err := kubeClient.CoreV1().Namespaces().Get(context.TODO(), "kube-system", metav1.GetOptions{}); err != nil { return false, nil } diff --git a/test/integration/garbagecollector/cluster_scoped_owner_test.go b/test/integration/garbagecollector/cluster_scoped_owner_test.go index b1a68f812b6..20b8a09c88f 100644 --- a/test/integration/garbagecollector/cluster_scoped_owner_test.go +++ b/test/integration/garbagecollector/cluster_scoped_owner_test.go @@ -17,6 +17,7 @@ limitations under the License. package garbagecollector import ( + "context" "io" "net/http" "strings" @@ -74,7 +75,7 @@ func TestClusterScopedOwners(t *testing.T) { defer deleteNamespaceOrDie(ns.Name, clientSet, t) t.Log("Create a pair of objects") - pv, err := clientSet.CoreV1().PersistentVolumes().Create(&v1.PersistentVolume{ + pv, err := clientSet.CoreV1().PersistentVolumes().Create(context.TODO(), &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{Name: "pv-valid"}, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/foo"}}, @@ -85,7 +86,7 @@ func TestClusterScopedOwners(t *testing.T) { if err != nil { t.Fatal(err) } - if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(&v1.ConfigMap{ + if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "cm-valid", OwnerReferences: []metav1.OwnerReference{{Kind: "PersistentVolume", APIVersion: "v1", Name: pv.Name, UID: pv.UID}}, @@ -95,7 +96,7 @@ func TestClusterScopedOwners(t *testing.T) { } t.Log("Create a namespaced object with a missing parent") - if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(&v1.ConfigMap{ + if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "cm-missing", Labels: map[string]string{"missing": "true"}, @@ -106,7 +107,7 @@ func TestClusterScopedOwners(t *testing.T) { } t.Log("Create a namespaced object with a missing type parent") - if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(&v1.ConfigMap{ + if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Create(context.TODO(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "cm-invalid", OwnerReferences: []metav1.OwnerReference{{Kind: "UnknownType", APIVersion: "unknown.group/v1", Name: "invalid-name", UID: types.UID("invalid-uid")}}, @@ -117,7 +118,7 @@ func TestClusterScopedOwners(t *testing.T) { // wait for deletable children to go away if err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) { - _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-missing", metav1.GetOptions{}) + _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get(context.TODO(), "cm-missing", metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): return true, nil @@ -136,12 +137,12 @@ func TestClusterScopedOwners(t *testing.T) { time.Sleep(5 * time.Second) // ensure children with unverifiable parents don't get reaped - if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-invalid", metav1.GetOptions{}); err != nil { + if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get(context.TODO(), "cm-invalid", metav1.GetOptions{}); err != nil { t.Fatalf("child with invalid ownerRef is unexpectedly missing: %v", err) } // ensure children with present parents don't get reaped - if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get("cm-valid", metav1.GetOptions{}); err != nil { + if _, err := clientSet.CoreV1().ConfigMaps(ns.Name).Get(context.TODO(), "cm-valid", metav1.GetOptions{}); err != nil { t.Fatalf("child with valid ownerRef is unexpectedly missing: %v", err) } } diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index f78060b5ea8..735cad84b4d 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -17,6 +17,7 @@ limitations under the License. package garbagecollector import ( + "context" "fmt" "strconv" "strings" @@ -292,11 +293,11 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work func createNamespaceOrDie(name string, c clientset.Interface, t *testing.T) *v1.Namespace { ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} - if _, err := c.CoreV1().Namespaces().Create(ns); err != nil { + if _, err := c.CoreV1().Namespaces().Create(context.TODO(), ns); err != nil { t.Fatalf("failed to create namespace: %v", err) } falseVar := false - _, err := c.CoreV1().ServiceAccounts(ns.Name).Create(&v1.ServiceAccount{ + _, err := c.CoreV1().ServiceAccounts(ns.Name).Create(context.TODO(), &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{Name: "default"}, AutomountServiceAccountToken: &falseVar, }) @@ -309,7 +310,7 @@ func createNamespaceOrDie(name string, c clientset.Interface, t *testing.T) *v1. func deleteNamespaceOrDie(name string, c clientset.Interface, t *testing.T) { zero := int64(0) background := metav1.DeletePropagationBackground - err := c.CoreV1().Namespaces().Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}) + err := c.CoreV1().Namespaces().Delete(context.TODO(), name, &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}) if err != nil { t.Fatalf("failed to delete namespace %q: %v", name, err) } @@ -328,16 +329,16 @@ func TestCascadingDeletion(t *testing.T) { rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) podClient := clientSet.CoreV1().Pods(ns.Name) - toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name)) + toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name)) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } - remainingRC, err := rcClient.Create(newOwnerRC(remainingRCName, ns.Name)) + remainingRC, err := rcClient.Create(context.TODO(), newOwnerRC(remainingRCName, ns.Name)) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } - rcs, err := rcClient.List(metav1.ListOptions{}) + rcs, err := rcClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list replication controllers: %v", err) } @@ -347,7 +348,7 @@ func TestCascadingDeletion(t *testing.T) { // this pod should be cascadingly deleted. pod := newPod(garbageCollectedPodName, ns.Name, []metav1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}}) - _, err = podClient.Create(pod) + _, err = podClient.Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -357,20 +358,20 @@ func TestCascadingDeletion(t *testing.T) { {UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}, {UID: remainingRC.ObjectMeta.UID, Name: remainingRCName}, }) - _, err = podClient.Create(pod) + _, err = podClient.Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } // this pod shouldn't be cascadingly deleted, because it doesn't have an owner. pod = newPod(independentPodName, ns.Name, []metav1.OwnerReference{}) - _, err = podClient.Create(pod) + _, err = podClient.Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } // set up watch - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } @@ -378,7 +379,7 @@ func TestCascadingDeletion(t *testing.T) { t.Fatalf("Expect only 3 pods") } // delete one of the replication controller - if err := rcClient.Delete(toBeDeletedRCName, getNonOrphanOptions()); err != nil { + if err := rcClient.Delete(context.TODO(), toBeDeletedRCName, getNonOrphanOptions()); err != nil { t.Fatalf("failed to delete replication controller: %v", err) } // sometimes the deletion of the RC takes long time to be observed by @@ -393,10 +394,10 @@ func TestCascadingDeletion(t *testing.T) { t.Fatalf("expect pod %s to be garbage collected, got err= %v", garbageCollectedPodName, err) } // checks the garbage collect doesn't delete pods it shouldn't delete. - if _, err := podClient.Get(independentPodName, metav1.GetOptions{}); err != nil { + if _, err := podClient.Get(context.TODO(), independentPodName, metav1.GetOptions{}); err != nil { t.Fatal(err) } - if _, err := podClient.Get(oneValidOwnerPodName, metav1.GetOptions{}); err != nil { + if _, err := podClient.Get(context.TODO(), oneValidOwnerPodName, metav1.GetOptions{}); err != nil { t.Fatal(err) } } @@ -415,13 +416,13 @@ func TestCreateWithNonExistentOwner(t *testing.T) { podClient := clientSet.CoreV1().Pods(ns.Name) pod := newPod(garbageCollectedPodName, ns.Name, []metav1.OwnerReference{{UID: "doesn't matter", Name: toBeDeletedRCName}}) - _, err := podClient.Create(pod) + _, err := podClient.Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } // set up watch - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } @@ -442,7 +443,7 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet rcName := "test.rc." + nameSuffix rc := newOwnerRC(rcName, namespace) rc.ObjectMeta.Finalizers = initialFinalizers - rc, err := rcClient.Create(rc) + rc, err := rcClient.Create(context.TODO(), rc) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } @@ -452,7 +453,7 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet for j := 0; j < 3; j++ { podName := "test.pod." + nameSuffix + "-" + strconv.Itoa(j) pod := newPod(podName, namespace, []metav1.OwnerReference{{UID: rc.ObjectMeta.UID, Name: rc.ObjectMeta.Name}}) - createdPod, err := podClient.Create(pod) + createdPod, err := podClient.Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -490,7 +491,7 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet } } // delete the rc - if err := rcClient.Delete(rc.ObjectMeta.Name, options); err != nil { + if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, options); err != nil { t.Fatalf("failed to delete replication controller: %v", err) } } @@ -498,7 +499,7 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) { rcClient := clientSet.CoreV1().ReplicationControllers(namespace) podClient := clientSet.CoreV1().Pods(namespace) - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) } @@ -507,7 +508,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa ret = false t.Logf("expect %d pods, got %d pods", podNum, len(pods.Items)) } - rcs, err := rcClient.List(metav1.ListOptions{}) + rcs, err := rcClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list replication controllers: %v", err) } @@ -561,7 +562,7 @@ func TestStressingCascadingDeletion(t *testing.T) { // verify the remaining pods all have "orphan" in their names. podClient := clientSet.CoreV1().Pods(ns.Name) - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -593,7 +594,7 @@ func TestOrphaning(t *testing.T) { rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set toBeDeletedRC := newOwnerRC(toBeDeletedRCName, ns.Name) - toBeDeletedRC, err := rcClient.Create(toBeDeletedRC) + toBeDeletedRC, err := rcClient.Create(context.TODO(), toBeDeletedRC) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } @@ -604,7 +605,7 @@ func TestOrphaning(t *testing.T) { for i := 0; i < podsNum; i++ { podName := garbageCollectedPodName + strconv.Itoa(i) pod := newPod(podName, ns.Name, []metav1.OwnerReference{{UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRCName}}) - createdPod, err := podClient.Create(pod) + createdPod, err := podClient.Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -626,13 +627,13 @@ func TestOrphaning(t *testing.T) { t.Fatalf("Failed to observe pods in GC graph for %s: %v", toBeDeletedRC.Name, err) } - err = rcClient.Delete(toBeDeletedRCName, getOrphanOptions()) + err = rcClient.Delete(context.TODO(), toBeDeletedRCName, getOrphanOptions()) if err != nil { t.Fatalf("Failed to gracefully delete the rc: %v", err) } // verify the toBeDeleteRC is deleted if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) { - rcs, err := rcClient.List(metav1.ListOptions{}) + rcs, err := rcClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -646,7 +647,7 @@ func TestOrphaning(t *testing.T) { } // verify pods don't have the ownerPod as an owner anymore - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } @@ -672,11 +673,11 @@ func TestSolidOwnerDoesNotBlockWaitingOwner(t *testing.T) { podClient := clientSet.CoreV1().Pods(ns.Name) rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set - toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name)) + toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name)) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } - remainingRC, err := rcClient.Create(newOwnerRC(remainingRCName, ns.Name)) + remainingRC, err := rcClient.Create(context.TODO(), newOwnerRC(remainingRCName, ns.Name)) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } @@ -685,18 +686,18 @@ func TestSolidOwnerDoesNotBlockWaitingOwner(t *testing.T) { {UID: toBeDeletedRC.ObjectMeta.UID, Name: toBeDeletedRC.Name, BlockOwnerDeletion: &trueVar}, {UID: remainingRC.ObjectMeta.UID, Name: remainingRC.Name}, }) - _, err = podClient.Create(pod) + _, err = podClient.Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } - err = rcClient.Delete(toBeDeletedRCName, getForegroundOptions()) + err = rcClient.Delete(context.TODO(), toBeDeletedRCName, getForegroundOptions()) if err != nil { t.Fatalf("Failed to delete the rc: %v", err) } // verify the toBeDeleteRC is deleted if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) { - _, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{}) + _, err := rcClient.Get(context.TODO(), toBeDeletedRC.Name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return true, nil @@ -709,7 +710,7 @@ func TestSolidOwnerDoesNotBlockWaitingOwner(t *testing.T) { } // verify pods don't have the toBeDeleteRC as an owner anymore - pod, err = podClient.Get("pod", metav1.GetOptions{}) + pod, err = podClient.Get(context.TODO(), "pod", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } @@ -732,7 +733,7 @@ func TestNonBlockingOwnerRefDoesNotBlock(t *testing.T) { podClient := clientSet.CoreV1().Pods(ns.Name) rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set - toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name)) + toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name)) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } @@ -749,22 +750,22 @@ func TestNonBlockingOwnerRefDoesNotBlock(t *testing.T) { }) // adding finalizer that no controller handles, so that the pod won't be deleted pod2.ObjectMeta.Finalizers = []string{"x/y"} - _, err = podClient.Create(pod1) + _, err = podClient.Create(context.TODO(), pod1) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } - _, err = podClient.Create(pod2) + _, err = podClient.Create(context.TODO(), pod2) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } - err = rcClient.Delete(toBeDeletedRCName, getForegroundOptions()) + err = rcClient.Delete(context.TODO(), toBeDeletedRCName, getForegroundOptions()) if err != nil { t.Fatalf("Failed to delete the rc: %v", err) } // verify the toBeDeleteRC is deleted if err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) { - _, err := rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{}) + _, err := rcClient.Get(context.TODO(), toBeDeletedRC.Name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return true, nil @@ -777,7 +778,7 @@ func TestNonBlockingOwnerRefDoesNotBlock(t *testing.T) { } // verify pods are still there - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } @@ -798,14 +799,14 @@ func TestDoubleDeletionWithFinalizer(t *testing.T) { podClient := clientSet.CoreV1().Pods(ns.Name) pod := newPod("lucy", ns.Name, nil) pod.ObjectMeta.Finalizers = []string{"x/y"} - if _, err := podClient.Create(pod); err != nil { + if _, err := podClient.Create(context.TODO(), pod); err != nil { t.Fatalf("Failed to create pod: %v", err) } - if err := podClient.Delete(pod.Name, getForegroundOptions()); err != nil { + if err := podClient.Delete(context.TODO(), pod.Name, getForegroundOptions()); err != nil { t.Fatalf("Failed to delete pod: %v", err) } if err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { - returnedPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + returnedPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -819,11 +820,11 @@ func TestDoubleDeletionWithFinalizer(t *testing.T) { } // step 2: deletes the pod one more time and checks if there's only the custom finalizer left - if err := podClient.Delete(pod.Name, getForegroundOptions()); err != nil { + if err := podClient.Delete(context.TODO(), pod.Name, getForegroundOptions()); err != nil { t.Fatalf("Failed to delete pod: %v", err) } if err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { - returnedPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + returnedPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -838,11 +839,11 @@ func TestDoubleDeletionWithFinalizer(t *testing.T) { // step 3: removes the custom finalizer and checks if the pod was removed patch := []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`) - if _, err := podClient.Patch(pod.Name, types.JSONPatchType, patch); err != nil { + if _, err := podClient.Patch(context.TODO(), pod.Name, types.JSONPatchType, patch); err != nil { t.Fatalf("Failed to update pod: %v", err) } if err := wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) { - _, err := podClient.Get(pod.Name, metav1.GetOptions{}) + _, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("Failed waiting for pod %q to be deleted", pod.Name) @@ -860,7 +861,7 @@ func TestBlockingOwnerRefDoesBlock(t *testing.T) { podClient := clientSet.CoreV1().Pods(ns.Name) rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set - toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name)) + toBeDeletedRC, err := rcClient.Create(context.TODO(), newOwnerRC(toBeDeletedRCName, ns.Name)) if err != nil { t.Fatalf("Failed to create replication controller: %v", err) } @@ -870,7 +871,7 @@ func TestBlockingOwnerRefDoesBlock(t *testing.T) { }) // adding finalizer that no controller handles, so that the pod won't be deleted pod.ObjectMeta.Finalizers = []string{"x/y"} - _, err = podClient.Create(pod) + _, err = podClient.Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -889,19 +890,19 @@ func TestBlockingOwnerRefDoesBlock(t *testing.T) { t.Fatalf("failed to wait for garbage collector to be synced") } - err = rcClient.Delete(toBeDeletedRCName, getForegroundOptions()) + err = rcClient.Delete(context.TODO(), toBeDeletedRCName, getForegroundOptions()) if err != nil { t.Fatalf("Failed to delete the rc: %v", err) } time.Sleep(15 * time.Second) // verify the toBeDeleteRC is NOT deleted - _, err = rcClient.Get(toBeDeletedRC.Name, metav1.GetOptions{}) + _, err = rcClient.Get(context.TODO(), toBeDeletedRC.Name, metav1.GetOptions{}) if err != nil { t.Errorf("unexpected error: %v", err) } // verify pods are still there - pods, err := podClient.List(metav1.ListOptions{}) + pods, err := podClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list pods: %v", err) } @@ -994,14 +995,14 @@ func TestMixedRelationships(t *testing.T) { // Create a core dependent resource. coreDependent := newConfigMap(ns.Name, names.SimpleNameGenerator.GenerateName("dependent")) link(t, customOwner, coreDependent) - coreDependent, err = configMapClient.Create(coreDependent) + coreDependent, err = configMapClient.Create(context.TODO(), coreDependent) if err != nil { t.Fatalf("failed to create dependent: %v", err) } t.Logf("created core dependent %q", coreDependent.GetName()) // Create a core owner resource. - coreOwner, err := configMapClient.Create(newConfigMap(ns.Name, names.SimpleNameGenerator.GenerateName("owner"))) + coreOwner, err := configMapClient.Create(context.TODO(), newConfigMap(ns.Name, names.SimpleNameGenerator.GenerateName("owner"))) if err != nil { t.Fatalf("failed to create owner: %v", err) } @@ -1044,14 +1045,14 @@ func TestMixedRelationships(t *testing.T) { } // Delete the core owner. - err = configMapClient.Delete(coreOwner.GetName(), &metav1.DeleteOptions{PropagationPolicy: &foreground}) + err = configMapClient.Delete(context.TODO(), coreOwner.GetName(), &metav1.DeleteOptions{PropagationPolicy: &foreground}) if err != nil { t.Fatalf("failed to delete owner resource %q: %v", coreOwner.GetName(), err) } // Ensure the owner is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { - _, err := configMapClient.Get(coreOwner.GetName(), metav1.GetOptions{}) + _, err := configMapClient.Get(context.TODO(), coreOwner.GetName(), metav1.GetOptions{}) return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for owner resource %q to be deleted", coreOwner.GetName()) @@ -1107,7 +1108,7 @@ func testCRDDeletion(t *testing.T, ctx *testContext, ns *v1.Namespace, definitio // Create a core dependent resource. dependent := newConfigMap(ns.Name, names.SimpleNameGenerator.GenerateName("dependent")) link(t, owner, dependent) - dependent, err = configMapClient.Create(dependent) + dependent, err = configMapClient.Create(context.TODO(), dependent) if err != nil { t.Fatalf("failed to create dependent: %v", err) } @@ -1130,7 +1131,7 @@ func testCRDDeletion(t *testing.T, ctx *testContext, ns *v1.Namespace, definitio // Ensure the dependent is deleted. if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) { - _, err := configMapClient.Get(dependent.GetName(), metav1.GetOptions{}) + _, err := configMapClient.Get(context.TODO(), dependent.GetName(), metav1.GetOptions{}) return apierrors.IsNotFound(err), nil }); err != nil { t.Fatalf("failed waiting for dependent %q (owned by %q) to be deleted", dependent.GetName(), owner.GetName()) diff --git a/test/integration/ipamperf/util.go b/test/integration/ipamperf/util.go index 883f6dd0df3..e6818021520 100644 --- a/test/integration/ipamperf/util.go +++ b/test/integration/ipamperf/util.go @@ -17,6 +17,7 @@ limitations under the License. package ipamperf import ( + "context" "time" "k8s.io/api/core/v1" @@ -62,7 +63,7 @@ func deleteNodes(apiURL string, config *Config) { Burst: config.CreateQPS, }) noGrace := int64(0) - if err := clientSet.CoreV1().Nodes().DeleteCollection(&metav1.DeleteOptions{GracePeriodSeconds: &noGrace}, metav1.ListOptions{}); err != nil { + if err := clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}, metav1.ListOptions{}); err != nil { klog.Errorf("Error deleting node: %v", err) } } @@ -78,7 +79,7 @@ func createNodes(apiURL string, config *Config) error { for i := 0; i < config.NumNodes; i++ { var err error for j := 0; j < maxCreateRetries; j++ { - if _, err = clientSet.CoreV1().Nodes().Create(baseNodeTemplate); err != nil && apierrors.IsServerTimeout(err) { + if _, err = clientSet.CoreV1().Nodes().Create(context.TODO(), baseNodeTemplate); err != nil && apierrors.IsServerTimeout(err) { klog.Infof("Server timeout creating nodes, retrying after %v", retryDelay) time.Sleep(retryDelay) continue diff --git a/test/integration/kubelet/watch_manager_test.go b/test/integration/kubelet/watch_manager_test.go index 248c7059fe7..95befe0f683 100644 --- a/test/integration/kubelet/watch_manager_test.go +++ b/test/integration/kubelet/watch_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package kubelet import ( + "context" "fmt" "sync" "testing" @@ -45,15 +46,15 @@ func TestWatchBasedManager(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - if _, err := client.CoreV1().Namespaces().Create((&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}})); err != nil { + if _, err := client.CoreV1().Namespaces().Create(context.TODO(), (&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}})); err != nil { t.Fatal(err) } listObj := func(namespace string, options metav1.ListOptions) (runtime.Object, error) { - return client.CoreV1().Secrets(namespace).List(options) + return client.CoreV1().Secrets(namespace).List(context.TODO(), options) } watchObj := func(namespace string, options metav1.ListOptions) (watch.Interface, error) { - return client.CoreV1().Secrets(namespace).Watch(options) + return client.CoreV1().Secrets(namespace).Watch(context.TODO(), options) } newObj := func() runtime.Object { return &v1.Secret{} } store := manager.NewObjectCache(listObj, watchObj, newObj, schema.GroupResource{Group: "v1", Resource: "secrets"}) @@ -68,7 +69,7 @@ func TestWatchBasedManager(t *testing.T) { defer wg.Done() for j := 0; j < 100; j++ { name := fmt.Sprintf("s%d", i*100+j) - if _, err := client.CoreV1().Secrets(testNamespace).Create(&v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: name}}); err != nil { + if _, err := client.CoreV1().Secrets(testNamespace).Create(context.TODO(), &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: name}}); err != nil { select { case errCh <- err: default: diff --git a/test/integration/master/audit_dynamic_test.go b/test/integration/master/audit_dynamic_test.go index a730cd7e6d3..ba0ea807153 100644 --- a/test/integration/master/audit_dynamic_test.go +++ b/test/integration/master/audit_dynamic_test.go @@ -17,6 +17,7 @@ limitations under the License. package master import ( + "context" "fmt" "sync" "sync/atomic" @@ -70,7 +71,7 @@ func TestDynamicAudit(t *testing.T) { // test creates a single audit sink, generates audit events, and ensures they arrive at the server success := t.Run("one sink", func(t *testing.T) { - _, err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Create(sinkConfig1) + _, err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Create(context.TODO(), sinkConfig1) require.NoError(t, err, "failed to create audit sink1") t.Log("created audit sink1") @@ -88,7 +89,7 @@ func TestDynamicAudit(t *testing.T) { // test creates a second audit sink, generates audit events, and ensures events arrive in both servers success = t.Run("two sink", func(t *testing.T) { - _, err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Create(sinkConfig2) + _, err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Create(context.TODO(), sinkConfig2) require.NoError(t, err, "failed to create audit sink2") t.Log("created audit sink2") @@ -108,7 +109,7 @@ func TestDynamicAudit(t *testing.T) { // test deletes an audit sink, generates audit events, and ensures they don't arrive in the corresponding server success = t.Run("delete sink", func(t *testing.T) { - err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Delete(sinkConfig2.Name, &metav1.DeleteOptions{}) + err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Delete(context.TODO(), sinkConfig2.Name, &metav1.DeleteOptions{}) require.NoError(t, err, "failed to delete audit sink2") t.Log("deleted audit sink2") @@ -144,7 +145,7 @@ func TestDynamicAudit(t *testing.T) { // The test checks that no events are lost or duplicated during the update. t.Run("update sink", func(t *testing.T) { // fetch sink1 config - sink1, err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Get(sinkConfig1.Name, metav1.GetOptions{}) + sink1, err := kubeclient.AuditregistrationV1alpha1().AuditSinks().Get(context.TODO(), sinkConfig1.Name, metav1.GetOptions{}) require.NoError(t, err) // reset event lists @@ -168,7 +169,7 @@ func TestDynamicAudit(t *testing.T) { // update the url sink1.Spec.Webhook.ClientConfig.URL = &testServer2.Server.URL - _, err = kubeclient.AuditregistrationV1alpha1().AuditSinks().Update(sink1) + _, err = kubeclient.AuditregistrationV1alpha1().AuditSinks().Update(context.TODO(), sink1) require.NoError(t, err, "failed to update audit sink1") t.Log("updated audit sink1 to point to server2") @@ -230,7 +231,7 @@ func sinkHealth(t *testing.T, kubeclient kubernetes.Interface, servers ...*utils // simpleOp is a function that simply tries to get a configmap with the given name and returns the // corresponding expected audit event func simpleOp(name string, kubeclient kubernetes.Interface) ([]utils.AuditEvent, error) { - _, err := kubeclient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + _, err := kubeclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return nil, err } diff --git a/test/integration/master/audit_test.go b/test/integration/master/audit_test.go index 332171a17f1..21532e4d24a 100644 --- a/test/integration/master/audit_test.go +++ b/test/integration/master/audit_test.go @@ -17,6 +17,7 @@ limitations under the License. package master import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -360,13 +361,13 @@ func configMapOperations(t *testing.T, kubeclient kubernetes.Interface) { }, } - _, err := kubeclient.CoreV1().ConfigMaps(namespace).Create(configMap) + _, err := kubeclient.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap) expectNoError(t, err, "failed to create audit-configmap") - _, err = kubeclient.CoreV1().ConfigMaps(namespace).Get(configMap.Name, metav1.GetOptions{}) + _, err = kubeclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMap.Name, metav1.GetOptions{}) expectNoError(t, err, "failed to get audit-configmap") - configMapChan, err := kubeclient.CoreV1().ConfigMaps(namespace).Watch(watchOptions) + configMapChan, err := kubeclient.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), watchOptions) expectNoError(t, err, "failed to create watch for config maps") for range configMapChan.ResultChan() { // Block until watchOptions.TimeoutSeconds expires. @@ -374,16 +375,16 @@ func configMapOperations(t *testing.T, kubeclient kubernetes.Interface) { // event at stage ResponseComplete will not be generated. } - _, err = kubeclient.CoreV1().ConfigMaps(namespace).Update(configMap) + _, err = kubeclient.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap) expectNoError(t, err, "failed to update audit-configmap") - _, err = kubeclient.CoreV1().ConfigMaps(namespace).Patch(configMap.Name, types.JSONPatchType, patch) + _, err = kubeclient.CoreV1().ConfigMaps(namespace).Patch(context.TODO(), configMap.Name, types.JSONPatchType, patch) expectNoError(t, err, "failed to patch configmap") - _, err = kubeclient.CoreV1().ConfigMaps(namespace).List(metav1.ListOptions{}) + _, err = kubeclient.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) expectNoError(t, err, "failed to list config maps") - err = kubeclient.CoreV1().ConfigMaps(namespace).Delete(configMap.Name, &metav1.DeleteOptions{}) + err = kubeclient.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configMap.Name, &metav1.DeleteOptions{}) expectNoError(t, err, "failed to delete audit-configmap") } @@ -427,7 +428,7 @@ func admitFunc(review *v1beta1.AdmissionReview) error { func createV1beta1MutationWebhook(client clientset.Interface, endpoint string) error { fail := admissionv1beta1.Fail // Attaching Mutation webhook to API server - _, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionv1beta1.MutatingWebhookConfiguration{ + _, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionv1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: testWebhookConfigurationName}, Webhooks: []admissionv1beta1.MutatingWebhook{{ Name: testWebhookName, diff --git a/test/integration/master/crd_test.go b/test/integration/master/crd_test.go index 358e0c7f961..ead8badaef4 100644 --- a/test/integration/master/crd_test.go +++ b/test/integration/master/crd_test.go @@ -50,7 +50,7 @@ func TestCRDShadowGroup(t *testing.T) { if err != nil { t.Fatalf("Unexpected error: %v", err) } - if _, err := kubeclient.CoreV1().Namespaces().Create((&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}})); err != nil { + if _, err := kubeclient.CoreV1().Namespaces().Create(context.TODO(), (&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}})); err != nil { t.Fatal(err) } @@ -60,7 +60,7 @@ func TestCRDShadowGroup(t *testing.T) { } t.Logf("Creating a NetworkPolicy") - nwPolicy, err := kubeclient.NetworkingV1().NetworkPolicies(testNamespace).Create(&networkingv1.NetworkPolicy{ + nwPolicy, err := kubeclient.NetworkingV1().NetworkPolicies(testNamespace).Create(context.TODO(), &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: testNamespace}, Spec: networkingv1.NetworkPolicySpec{ PodSelector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, @@ -92,7 +92,7 @@ func TestCRDShadowGroup(t *testing.T) { time.Sleep(2 * time.Second) t.Logf("Checking that we still see the NetworkPolicy") - _, err = kubeclient.NetworkingV1().NetworkPolicies(nwPolicy.Namespace).Get(nwPolicy.Name, metav1.GetOptions{}) + _, err = kubeclient.NetworkingV1().NetworkPolicies(nwPolicy.Namespace).Get(context.TODO(), nwPolicy.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Failed to get NetworkPolocy: %v", err) } @@ -112,7 +112,7 @@ func TestCRD(t *testing.T) { if err != nil { t.Fatalf("Unexpected error: %v", err) } - if _, err := kubeclient.CoreV1().Namespaces().Create((&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}})); err != nil { + if _, err := kubeclient.CoreV1().Namespaces().Create(context.TODO(), (&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}})); err != nil { t.Fatal(err) } @@ -262,14 +262,14 @@ func TestCRDOpenAPI(t *testing.T) { t.Logf("Check that structural schema is published") waitForSpec(structuralCRD, "string") - structuralCRD, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(structuralCRD.Name, metav1.GetOptions{}) + structuralCRD, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), structuralCRD.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } prop := structuralCRD.Spec.Validation.OpenAPIV3Schema.Properties["foo"] prop.Type = "boolean" structuralCRD.Spec.Validation.OpenAPIV3Schema.Properties["foo"] = prop - if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(structuralCRD); err != nil { + if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(context.TODO(), structuralCRD); err != nil { t.Fatal(err) } waitForSpec(structuralCRD, "boolean") diff --git a/test/integration/master/kms_transformation_test.go b/test/integration/master/kms_transformation_test.go index 34cba29b626..13f5dfa739a 100644 --- a/test/integration/master/kms_transformation_test.go +++ b/test/integration/master/kms_transformation_test.go @@ -176,7 +176,7 @@ resources: } // Secrets should be un-enveloped on direct reads from Kube API Server. - s, err := test.restClient.CoreV1().Secrets(testNamespace).Get(testSecret, metav1.GetOptions{}) + s, err := test.restClient.CoreV1().Secrets(testNamespace).Get(context.TODO(), testSecret, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get Secret from %s, err: %v", testNamespace, err) } diff --git a/test/integration/master/kube_apiserver_test.go b/test/integration/master/kube_apiserver_test.go index 8e3fb5a8a6e..14e59a204d2 100644 --- a/test/integration/master/kube_apiserver_test.go +++ b/test/integration/master/kube_apiserver_test.go @@ -62,7 +62,7 @@ func TestRun(t *testing.T) { // test whether the server is really healthy after /healthz told us so t.Logf("Creating Deployment directly after being healthy") var replicas int32 = 1 - _, err = client.AppsV1().Deployments("default").Create(&appsv1.Deployment{ + _, err = client.AppsV1().Deployments("default").Create(context.TODO(), &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ Kind: "Deployment", APIVersion: "apps/v1", @@ -437,7 +437,7 @@ func testReconcilersMasterLease(t *testing.T, leaseCount int, masterCount int) { t.Logf("error creating client: %v", err) return false, nil } - endpoints, err := client.CoreV1().Endpoints("default").Get("kubernetes", metav1.GetOptions{}) + endpoints, err := client.CoreV1().Endpoints("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{}) if err != nil { t.Logf("error fetching endpoints: %v", err) return false, nil @@ -472,7 +472,7 @@ func testReconcilersMasterLease(t *testing.T, leaseCount int, masterCount int) { t.Logf("create client error: %v", err) return false, nil } - endpoints, err := client.CoreV1().Endpoints("default").Get("kubernetes", metav1.GetOptions{}) + endpoints, err := client.CoreV1().Endpoints("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{}) if err != nil { t.Logf("error fetching endpoints: %v", err) return false, nil diff --git a/test/integration/master/synthetic_master_test.go b/test/integration/master/synthetic_master_test.go index 3538664a684..0d3d78f82e8 100644 --- a/test/integration/master/synthetic_master_test.go +++ b/test/integration/master/synthetic_master_test.go @@ -103,7 +103,7 @@ func TestKubernetesService(t *testing.T) { defer closeFn() coreClient := clientset.NewForConfigOrDie(config.GenericConfig.LoopbackClientConfig) err := wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) { - if _, err := coreClient.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}); err != nil && apierrors.IsNotFound(err) { + if _, err := coreClient.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}); err != nil && apierrors.IsNotFound(err) { return false, nil } else if err != nil { return false, err @@ -342,7 +342,7 @@ func TestObjectSizeResponses(t *testing.T) { for _, r := range requests { t.Run(r.size, func(t *testing.T) { - _, err := client.AppsV1().Deployments(metav1.NamespaceDefault).Create(r.deploymentObject) + _, err := client.AppsV1().Deployments(metav1.NamespaceDefault).Create(context.TODO(), r.deploymentObject) if err != nil { if !strings.Contains(err.Error(), r.expectedMessage) { t.Errorf("got: %s;want: %s", err.Error(), r.expectedMessage) @@ -611,7 +611,7 @@ func TestMasterService(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL}) err := wait.Poll(time.Second, time.Minute, func() (bool, error) { - svcList, err := client.CoreV1().Services(metav1.NamespaceDefault).List(metav1.ListOptions{}) + svcList, err := client.CoreV1().Services(metav1.NamespaceDefault).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Errorf("unexpected error: %v", err) return false, nil @@ -624,7 +624,7 @@ func TestMasterService(t *testing.T) { } } if found { - ep, err := client.CoreV1().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) + ep, err := client.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) if err != nil { return false, nil } @@ -668,7 +668,7 @@ func TestServiceAlloc(t *testing.T) { // Wait until the default "kubernetes" service is created. if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -679,18 +679,18 @@ func TestServiceAlloc(t *testing.T) { // make 5 more services to take up all IPs for i := 0; i < 5; i++ { - if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc(i)); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc(i)); err != nil { t.Error(err) } } // Make another service. It will fail because we're out of cluster IPs - if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc(8)); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc(8)); err != nil { if !strings.Contains(err.Error(), "range is full") { t.Errorf("unexpected error text: %v", err) } } else { - svcs, err := client.CoreV1().Services(metav1.NamespaceAll).List(metav1.ListOptions{}) + svcs, err := client.CoreV1().Services(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected success, and error getting the services: %v", err) } @@ -702,12 +702,12 @@ func TestServiceAlloc(t *testing.T) { } // Delete the first service. - if err := client.CoreV1().Services(metav1.NamespaceDefault).Delete(svc(1).ObjectMeta.Name, nil); err != nil { + if err := client.CoreV1().Services(metav1.NamespaceDefault).Delete(context.TODO(), svc(1).ObjectMeta.Name, nil); err != nil { t.Fatalf("got unexpected error: %v", err) } // This time creating the second service should work. - if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(svc(8)); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc(8)); err != nil { t.Fatalf("got unexpected error: %v", err) } } @@ -736,8 +736,8 @@ func TestUpdateNodeObjects(t *testing.T) { iterations := 10000 for i := 0; i < nodes*6; i++ { - c.Nodes().Delete(fmt.Sprintf("node-%d", i), nil) - _, err := c.Nodes().Create(&corev1.Node{ + c.Nodes().Delete(context.TODO(), fmt.Sprintf("node-%d", i), nil) + _, err := c.Nodes().Create(context.TODO(), &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("node-%d", i), }, @@ -750,7 +750,7 @@ func TestUpdateNodeObjects(t *testing.T) { for k := 0; k < listers; k++ { go func(lister int) { for i := 0; i < iterations; i++ { - _, err := c.Nodes().List(metav1.ListOptions{}) + _, err := c.Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { fmt.Printf("[list:%d] error after %d: %v\n", lister, i, err) break @@ -762,7 +762,7 @@ func TestUpdateNodeObjects(t *testing.T) { for k := 0; k < watchers; k++ { go func(lister int) { - w, err := c.Nodes().Watch(metav1.ListOptions{}) + w, err := c.Nodes().Watch(context.TODO(), metav1.ListOptions{}) if err != nil { fmt.Printf("[watch:%d] error: %v", lister, err) return @@ -792,14 +792,14 @@ func TestUpdateNodeObjects(t *testing.T) { fmt.Printf("[%d] iteration %d ...\n", node, i) } if i%20 == 0 { - _, err := c.Nodes().List(metav1.ListOptions{}) + _, err := c.Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { fmt.Printf("[%d] error after %d: %v\n", node, i, err) break } } - r, err := c.Nodes().List(metav1.ListOptions{ + r, err := c.Nodes().List(context.TODO(), metav1.ListOptions{ FieldSelector: fmt.Sprintf("metadata.name=node-%d", node), ResourceVersion: "0", }) @@ -812,7 +812,7 @@ func TestUpdateNodeObjects(t *testing.T) { break } - n, err := c.Nodes().Get(fmt.Sprintf("node-%d", node), metav1.GetOptions{}) + n, err := c.Nodes().Get(context.TODO(), fmt.Sprintf("node-%d", node), metav1.GetOptions{}) if err != nil { fmt.Printf("[%d] error after %d: %v\n", node, i, err) break @@ -850,7 +850,7 @@ func TestUpdateNodeObjects(t *testing.T) { lastCount = 0 n.Status.Conditions = nil } - if _, err := c.Nodes().UpdateStatus(n); err != nil { + if _, err := c.Nodes().UpdateStatus(context.TODO(), n); err != nil { if !apierrors.IsConflict(err) { fmt.Printf("[%d] error after %d: %v\n", node, i, err) break diff --git a/test/integration/master/transformation_testcase.go b/test/integration/master/transformation_testcase.go index 09278ab56a6..64623c057ac 100644 --- a/test/integration/master/transformation_testcase.go +++ b/test/integration/master/transformation_testcase.go @@ -98,7 +98,7 @@ func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML strin func (e *transformTest) cleanUp() { os.RemoveAll(e.configDir) - e.restClient.CoreV1().Namespaces().Delete(e.ns.Name, metav1.NewDeleteOptions(0)) + e.restClient.CoreV1().Namespaces().Delete(context.TODO(), e.ns.Name, metav1.NewDeleteOptions(0)) e.kubeAPIServer.TearDownFn() } @@ -133,7 +133,7 @@ func (e *transformTest) run(unSealSecretFunc unSealSecret, expectedEnvelopePrefi } // Secrets should be un-enveloped on direct reads from Kube API Server. - s, err := e.restClient.CoreV1().Secrets(testNamespace).Get(testSecret, metav1.GetOptions{}) + s, err := e.restClient.CoreV1().Secrets(testNamespace).Get(context.TODO(), testSecret, metav1.GetOptions{}) if err != nil { e.logger.Errorf("failed to get Secret from %s, err: %v", testNamespace, err) } @@ -205,7 +205,7 @@ func (e *transformTest) createNamespace(name string) (*corev1.Namespace, error) }, } - if _, err := e.restClient.CoreV1().Namespaces().Create(ns); err != nil { + if _, err := e.restClient.CoreV1().Namespaces().Create(context.TODO(), ns); err != nil { return nil, fmt.Errorf("unable to create testing namespace %v", err) } @@ -222,7 +222,7 @@ func (e *transformTest) createSecret(name, namespace string) (*corev1.Secret, er secretKey: []byte(secretVal), }, } - if _, err := e.restClient.CoreV1().Secrets(secret.Namespace).Create(secret); err != nil { + if _, err := e.restClient.CoreV1().Secrets(secret.Namespace).Create(context.TODO(), secret); err != nil { return nil, fmt.Errorf("error while writing secret: %v", err) } diff --git a/test/integration/metrics/metrics_test.go b/test/integration/metrics/metrics_test.go index 28d232274b3..5de14267556 100644 --- a/test/integration/metrics/metrics_test.go +++ b/test/integration/metrics/metrics_test.go @@ -17,6 +17,7 @@ limitations under the License. package metrics import ( + "context" "fmt" "io/ioutil" "net/http" @@ -90,7 +91,7 @@ func TestApiserverMetrics(t *testing.T) { // Make a request to the apiserver to ensure there's at least one data point // for the metrics we're expecting -- otherwise, they won't be exported. client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - if _, err := client.CoreV1().Pods(metav1.NamespaceDefault).List(metav1.ListOptions{}); err != nil { + if _, err := client.CoreV1().Pods(metav1.NamespaceDefault).List(context.TODO(), metav1.ListOptions{}); err != nil { t.Fatalf("unexpected error getting pods: %v", err) } diff --git a/test/integration/namespace/ns_conditions_test.go b/test/integration/namespace/ns_conditions_test.go index bba492be6c4..a1d92deb2c4 100644 --- a/test/integration/namespace/ns_conditions_test.go +++ b/test/integration/namespace/ns_conditions_test.go @@ -17,6 +17,7 @@ limitations under the License. package namespace import ( + "context" "encoding/json" "testing" "time" @@ -42,7 +43,7 @@ func TestNamespaceCondition(t *testing.T) { closeFn, nsController, informers, kubeClient, dynamicClient := namespaceLifecycleSetup(t) defer closeFn() nsName := "test-namespace-conditions" - _, err := kubeClient.CoreV1().Namespaces().Create(&corev1.Namespace{ + _, err := kubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: nsName, }, @@ -77,12 +78,12 @@ func TestNamespaceCondition(t *testing.T) { t.Fatal(err) } - if err = kubeClient.CoreV1().Namespaces().Delete(nsName, nil); err != nil { + if err = kubeClient.CoreV1().Namespaces().Delete(context.TODO(), nsName, nil); err != nil { t.Fatal(err) } err = wait.PollImmediate(1*time.Second, 60*time.Second, func() (bool, error) { - curr, err := kubeClient.CoreV1().Namespaces().Get(nsName, metav1.GetOptions{}) + curr, err := kubeClient.CoreV1().Namespaces().Get(context.TODO(), nsName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/integration/objectmeta/objectmeta_test.go b/test/integration/objectmeta/objectmeta_test.go index 4e4ee32be39..0384ce991ac 100644 --- a/test/integration/objectmeta/objectmeta_test.go +++ b/test/integration/objectmeta/objectmeta_test.go @@ -17,6 +17,7 @@ limitations under the License. package objectmeta import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -41,12 +42,12 @@ func TestIgnoreClusterName(t *testing.T) { ClusterName: "cluster-name-to-ignore", }, } - nsNew, err := client.CoreV1().Namespaces().Create(&ns) + nsNew, err := client.CoreV1().Namespaces().Create(context.TODO(), &ns) assert.Nil(t, err) assert.Equal(t, ns.Name, nsNew.Name) assert.Empty(t, nsNew.ClusterName) - nsNew, err = client.CoreV1().Namespaces().Update(&ns) + nsNew, err = client.CoreV1().Namespaces().Update(context.TODO(), &ns) assert.Nil(t, err) assert.Equal(t, ns.Name, nsNew.Name) assert.Empty(t, nsNew.ClusterName) diff --git a/test/integration/pods/pods_test.go b/test/integration/pods/pods_test.go index d0eab68715b..02ed3dbbab7 100644 --- a/test/integration/pods/pods_test.go +++ b/test/integration/pods/pods_test.go @@ -17,6 +17,7 @@ limitations under the License. package pods import ( + "context" "fmt" "strings" "testing" @@ -135,13 +136,13 @@ func TestPodUpdateActiveDeadlineSeconds(t *testing.T) { pod.Spec.ActiveDeadlineSeconds = tc.original pod.ObjectMeta.Name = fmt.Sprintf("activedeadlineseconds-test-%v", i) - if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod: %v", err) } pod.Spec.ActiveDeadlineSeconds = tc.update - _, err := client.CoreV1().Pods(ns.Name).Update(pod) + _, err := client.CoreV1().Pods(ns.Name).Update(context.TODO(), pod) if tc.valid && err != nil { t.Errorf("%v: failed to update pod: %v", tc.name, err) } else if !tc.valid && err == nil { @@ -179,7 +180,7 @@ func TestPodReadOnlyFilesystem(t *testing.T) { }, } - if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod: %v", err) } @@ -223,7 +224,7 @@ func TestPodCreateEphemeralContainers(t *testing.T) { }, } - if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err == nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err == nil { t.Errorf("Unexpected allowed creation of pod with ephemeral containers") integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) } else if !strings.HasSuffix(err.Error(), "spec.ephemeralContainers: Forbidden: cannot be set on create") { @@ -234,7 +235,7 @@ func TestPodCreateEphemeralContainers(t *testing.T) { // setUpEphemeralContainers creates a pod that has Ephemeral Containers. This is a two step // process because Ephemeral Containers are not allowed during pod creation. func setUpEphemeralContainers(podsClient typedv1.PodInterface, pod *v1.Pod, containers []v1.EphemeralContainer) error { - if _, err := podsClient.Create(pod); err != nil { + if _, err := podsClient.Create(context.TODO(), pod); err != nil { return fmt.Errorf("failed to create pod: %v", err) } @@ -243,17 +244,17 @@ func setUpEphemeralContainers(podsClient typedv1.PodInterface, pod *v1.Pod, cont } pod.Spec.EphemeralContainers = containers - if _, err := podsClient.Update(pod); err == nil { + if _, err := podsClient.Update(context.TODO(), pod); err == nil { return fmt.Errorf("unexpected allowed direct update of ephemeral containers during set up: %v", err) } - ec, err := podsClient.GetEphemeralContainers(pod.Name, metav1.GetOptions{}) + ec, err := podsClient.GetEphemeralContainers(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("unable to get ephemeral containers for test case set up: %v", err) } ec.EphemeralContainers = containers - if _, err = podsClient.UpdateEphemeralContainers(pod.Name, ec); err != nil { + if _, err = podsClient.UpdateEphemeralContainers(context.TODO(), pod.Name, ec); err != nil { return fmt.Errorf("failed to update ephemeral containers for test case set up: %v", err) } @@ -456,7 +457,7 @@ func TestPodPatchEphemeralContainers(t *testing.T) { t.Errorf("%v: %v", tc.name, err) } - if _, err := client.CoreV1().Pods(ns.Name).Patch(pod.Name, tc.patchType, tc.patchBody, "ephemeralcontainers"); tc.valid && err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Patch(context.TODO(), pod.Name, tc.patchType, tc.patchBody, "ephemeralcontainers"); tc.valid && err != nil { t.Errorf("%v: failed to update ephemeral containers: %v", tc.name, err) } else if !tc.valid && err == nil { t.Errorf("%v: unexpected allowed update to ephemeral containers", tc.name) @@ -646,13 +647,13 @@ func TestPodUpdateEphemeralContainers(t *testing.T) { t.Errorf("%v: %v", tc.name, err) } - ec, err := client.CoreV1().Pods(ns.Name).GetEphemeralContainers(pod.Name, metav1.GetOptions{}) + ec, err := client.CoreV1().Pods(ns.Name).GetEphemeralContainers(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { t.Errorf("%v: unable to get ephemeral containers: %v", tc.name, err) } ec.EphemeralContainers = tc.update - if _, err := client.CoreV1().Pods(ns.Name).UpdateEphemeralContainers(pod.Name, ec); tc.valid && err != nil { + if _, err := client.CoreV1().Pods(ns.Name).UpdateEphemeralContainers(context.TODO(), pod.Name, ec); tc.valid && err != nil { t.Errorf("%v: failed to update ephemeral containers: %v", tc.name, err) } else if !tc.valid && err == nil { t.Errorf("%v: unexpected allowed update to ephemeral containers", tc.name) diff --git a/test/integration/quota/quota_test.go b/test/integration/quota/quota_test.go index 891b9dc9d49..af29287a119 100644 --- a/test/integration/quota/quota_test.go +++ b/test/integration/quota/quota_test.go @@ -151,12 +151,12 @@ func TestQuota(t *testing.T) { } func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) { - w, err := clientset.CoreV1().ResourceQuotas(quota.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: quota.Name})) + w, err := clientset.CoreV1().ResourceQuotas(quota.Namespace).Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: quota.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } - if _, err := clientset.CoreV1().ResourceQuotas(quota.Namespace).Create(quota); err != nil { + if _, err := clientset.CoreV1().ResourceQuotas(quota.Namespace).Create(context.TODO(), quota); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -210,12 +210,12 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { }, } - w, err := clientset.CoreV1().ReplicationControllers(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: rc.Name})) + w, err := clientset.CoreV1().ReplicationControllers(namespace).Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: rc.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } - if _, err := clientset.CoreV1().ReplicationControllers(namespace).Create(rc); err != nil { + if _, err := clientset.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), rc); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -239,7 +239,7 @@ func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { return false, nil }) if err != nil { - pods, _ := clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()}) + pods, _ := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()}) t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items)) } } @@ -339,7 +339,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { }, }, } - if _, err := clientset.CoreV1().Pods(ns.Name).Create(pod); err == nil { + if _, err := clientset.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err == nil { t.Fatalf("expected error for insufficient quota") } @@ -362,7 +362,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { // attempt to create a new pod once the quota is propagated err = wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) { // retry until we succeed (to allow time for all changes to propagate) - if _, err := clientset.CoreV1().Pods(ns.Name).Create(pod); err == nil { + if _, err := clientset.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err == nil { return true, nil } return false, nil diff --git a/test/integration/replicaset/replicaset_test.go b/test/integration/replicaset/replicaset_test.go index fd3f7ebf9d9..05a631de274 100644 --- a/test/integration/replicaset/replicaset_test.go +++ b/test/integration/replicaset/replicaset_test.go @@ -17,6 +17,7 @@ limitations under the License. package replicaset import ( + "context" "fmt" "net/http/httptest" "reflect" @@ -173,14 +174,14 @@ func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*apps.Repl var createdRSs []*apps.ReplicaSet var createdPods []*v1.Pod for _, rs := range rss { - createdRS, err := clientSet.AppsV1().ReplicaSets(rs.Namespace).Create(rs) + createdRS, err := clientSet.AppsV1().ReplicaSets(rs.Namespace).Create(context.TODO(), rs) if err != nil { t.Fatalf("Failed to create replica set %s: %v", rs.Name, err) } createdRSs = append(createdRSs, createdRS) } for _, pod := range pods { - createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(pod) + createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create pod %s: %v", pod.Name, err) } @@ -209,12 +210,12 @@ func scaleRS(t *testing.T, c clientset.Interface, rs *apps.ReplicaSet, replicas func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, updateFunc func(*v1.Pod)) *v1.Pod { var pod *v1.Pod if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newPod, err := podClient.Get(podName, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return err } updateFunc(newPod) - pod, err = podClient.Update(newPod) + pod, err = podClient.Update(context.TODO(), newPod) return err }); err != nil { t.Fatalf("Failed to update pod %s: %v", podName, err) @@ -224,12 +225,12 @@ func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, upd func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, pod *v1.Pod, updateStatusFunc func(*v1.Pod)) { if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return err } updateStatusFunc(newPod) - _, err = podClient.UpdateStatus(newPod) + _, err = podClient.UpdateStatus(context.TODO(), newPod) return err }); err != nil { t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err) @@ -239,7 +240,7 @@ func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, pod *v1.Pod, func getPods(t *testing.T, podClient typedv1.PodInterface, labelMap map[string]string) *v1.PodList { podSelector := labels.Set(labelMap).AsSelector() options := metav1.ListOptions{LabelSelector: podSelector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) if err != nil { t.Fatalf("Failed obtaining a list of pods that match the pod labels %v: %v", labelMap, err) } @@ -249,12 +250,12 @@ func getPods(t *testing.T, podClient typedv1.PodInterface, labelMap map[string]s func updateRS(t *testing.T, rsClient appsclient.ReplicaSetInterface, rsName string, updateFunc func(*apps.ReplicaSet)) *apps.ReplicaSet { var rs *apps.ReplicaSet if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newRS, err := rsClient.Get(rsName, metav1.GetOptions{}) + newRS, err := rsClient.Get(context.TODO(), rsName, metav1.GetOptions{}) if err != nil { return err } updateFunc(newRS) - rs, err = rsClient.Update(newRS) + rs, err = rsClient.Update(context.TODO(), newRS) return err }); err != nil { t.Fatalf("Failed to update rs %s: %v", rsName, err) @@ -271,7 +272,7 @@ func testPodControllerRefPatch(t *testing.T, c clientset.Interface, pod *v1.Pod, }) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -280,7 +281,7 @@ func testPodControllerRefPatch(t *testing.T, c clientset.Interface, pod *v1.Pod, t.Fatalf("Failed to verify ControllerRef for the pod %s is not nil: %v", pod.Name, err) } - newPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain pod %s: %v", pod.Name, err) } @@ -318,7 +319,7 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1 } pod.Status.Conditions = append(pod.Status.Conditions, *condition) } - _, err := clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) + _, err := clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod) if err != nil { // When status fails to be updated, we continue to next pod continue @@ -335,11 +336,11 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1 func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rs *apps.ReplicaSet, replicas int32) { ns := rs.Namespace rsClient := c.AppsV1().ReplicaSets(ns) - newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{}) + newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain rs %s: %v", rs.Name, err) } - scale, err := c.AppsV1().ReplicaSets(ns).GetScale(rs.Name, metav1.GetOptions{}) + scale, err := c.AppsV1().ReplicaSets(ns).GetScale(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain scale subresource for rs %s: %v", rs.Name, err) } @@ -348,18 +349,18 @@ func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rs *a } if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - scale, err := c.AppsV1().ReplicaSets(ns).GetScale(rs.Name, metav1.GetOptions{}) + scale, err := c.AppsV1().ReplicaSets(ns).GetScale(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { return err } scale.Spec.Replicas = replicas - _, err = c.AppsV1().ReplicaSets(ns).UpdateScale(rs.Name, scale) + _, err = c.AppsV1().ReplicaSets(ns).UpdateScale(context.TODO(), rs.Name, scale) return err }); err != nil { t.Fatalf("Failed to set .Spec.Replicas of scale subresource for rs %s: %v", rs.Name, err) } - newRS, err = rsClient.Get(rs.Name, metav1.GetOptions{}) + newRS, err = rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain rs %s: %v", rs.Name, err) } @@ -428,14 +429,14 @@ func TestAdoption(t *testing.T) { rsClient := clientSet.AppsV1().ReplicaSets(ns.Name) podClient := clientSet.CoreV1().Pods(ns.Name) const rsName = "rs" - rs, err := rsClient.Create(newRS(rsName, ns.Name, 1)) + rs, err := rsClient.Create(context.TODO(), newRS(rsName, ns.Name, 1)) if err != nil { t.Fatalf("Failed to create replica set: %v", err) } podName := fmt.Sprintf("pod%d", i) pod := newMatchingPod(podName, ns.Name) pod.OwnerReferences = tc.existingOwnerReferences(rs) - _, err = podClient.Create(pod) + _, err = podClient.Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -443,7 +444,7 @@ func TestAdoption(t *testing.T) { stopCh := runControllerAndInformers(t, rm, informers, 1) defer close(stopCh) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + updatedPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -472,14 +473,14 @@ func TestRSSelectorImmutability(t *testing.T) { createRSsPods(t, clientSet, []*apps.ReplicaSet{rs}, []*v1.Pod{}) // test to ensure apps/v1 selector is immutable - rsV1, err := clientSet.AppsV1().ReplicaSets(ns.Name).Get(rs.Name, metav1.GetOptions{}) + rsV1, err := clientSet.AppsV1().ReplicaSets(ns.Name).Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get apps/v1 replicaset %s: %v", rs.Name, err) } newSelectorLabels := map[string]string{"changed_name_apps_v1": "changed_test_apps_v1"} rsV1.Spec.Selector.MatchLabels = newSelectorLabels rsV1.Spec.Template.Labels = newSelectorLabels - _, err = clientSet.AppsV1().ReplicaSets(ns.Name).Update(rsV1) + _, err = clientSet.AppsV1().ReplicaSets(ns.Name).Update(context.TODO(), rsV1) if err == nil { t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1 replicaset %s", rsV1.Name) } @@ -522,7 +523,7 @@ func TestSpecReplicasChange(t *testing.T) { } if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{}) + newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -558,7 +559,7 @@ func TestDeletingAndFailedPods(t *testing.T) { updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { pod.Finalizers = []string{"fake.example.com/blockDeletion"} }) - if err := c.CoreV1().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, &metav1.DeleteOptions{}); err != nil { t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err) } @@ -622,7 +623,7 @@ func TestOverlappingRSs(t *testing.T) { // Expect both RSs have .status.replicas = .spec.replicas for i := 0; i < 2; i++ { - newRS, err := c.AppsV1().ReplicaSets(ns.Name).Get(fmt.Sprintf("rs-%d", i+1), metav1.GetOptions{}) + newRS, err := c.AppsV1().ReplicaSets(ns.Name).Get(context.TODO(), fmt.Sprintf("rs-%d", i+1), metav1.GetOptions{}) if err != nil { t.Fatalf("failed to obtain rs rs-%d: %v", i+1, err) } @@ -662,7 +663,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { pod.Labels = newLabelMap }) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -677,7 +678,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { pod.Labels = labelMap() }) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { // If the pod is not found, it means the RS picks the pod for deletion (it is extra) // Verify there is only one pod in namespace and it has ControllerRef to the RS @@ -782,7 +783,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) { rsClient := c.AppsV1().ReplicaSets(ns.Name) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{}) + newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -877,7 +878,7 @@ func TestFullyLabeledReplicas(t *testing.T) { // Verify only one pod is fully labeled if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{}) + newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -910,14 +911,14 @@ func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) { } rsClient := c.AppsV1().ReplicaSets(ns.Name) - err := rsClient.Delete(rs.Name, nil) + err := rsClient.Delete(context.TODO(), rs.Name, nil) if err != nil { t.Fatalf("Failed to delete rs: %v", err) } // Verify no new finalizer has been added if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{}) + newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -943,5 +944,5 @@ func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) { rs.Finalizers = finalizers }) - rsClient.Delete(rs.Name, nil) + rsClient.Delete(context.TODO(), rs.Name, nil) } diff --git a/test/integration/replicationcontroller/replicationcontroller_test.go b/test/integration/replicationcontroller/replicationcontroller_test.go index 9988b3f7f58..f28e6244110 100644 --- a/test/integration/replicationcontroller/replicationcontroller_test.go +++ b/test/integration/replicationcontroller/replicationcontroller_test.go @@ -17,6 +17,7 @@ limitations under the License. package replicationcontroller import ( + "context" "fmt" "net/http/httptest" "reflect" @@ -155,14 +156,14 @@ func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.Replic var createdRCs []*v1.ReplicationController var createdPods []*v1.Pod for _, rc := range rcs { - createdRC, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(rc) + createdRC, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(context.TODO(), rc) if err != nil { t.Fatalf("Failed to create replication controller %s: %v", rc.Name, err) } createdRCs = append(createdRCs, createdRC) } for _, pod := range pods { - createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(pod) + createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create pod %s: %v", pod.Name, err) } @@ -176,7 +177,7 @@ func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.Replic func waitRCStable(t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) { rcClient := clientSet.CoreV1().ReplicationControllers(rc.Namespace) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newRC, err := rcClient.Get(rc.Name, metav1.GetOptions{}) + newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -198,12 +199,12 @@ func scaleRC(t *testing.T, c clientset.Interface, rc *v1.ReplicationController, func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, updateFunc func(*v1.Pod)) *v1.Pod { var pod *v1.Pod if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newPod, err := podClient.Get(podName, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return err } updateFunc(newPod) - pod, err = podClient.Update(newPod) + pod, err = podClient.Update(context.TODO(), newPod) return err }); err != nil { t.Fatalf("Failed to update pod %s: %v", podName, err) @@ -213,12 +214,12 @@ func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, upd func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, pod *v1.Pod, updateStatusFunc func(*v1.Pod)) { if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return err } updateStatusFunc(newPod) - _, err = podClient.UpdateStatus(newPod) + _, err = podClient.UpdateStatus(context.TODO(), newPod) return err }); err != nil { t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err) @@ -228,7 +229,7 @@ func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, pod *v1.Pod, func getPods(t *testing.T, podClient typedv1.PodInterface, labelMap map[string]string) *v1.PodList { podSelector := labels.Set(labelMap).AsSelector() options := metav1.ListOptions{LabelSelector: podSelector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) if err != nil { t.Fatalf("Failed obtaining a list of pods that match the pod labels %v: %v", labelMap, err) } @@ -238,12 +239,12 @@ func getPods(t *testing.T, podClient typedv1.PodInterface, labelMap map[string]s func updateRC(t *testing.T, rcClient typedv1.ReplicationControllerInterface, rcName string, updateFunc func(*v1.ReplicationController)) *v1.ReplicationController { var rc *v1.ReplicationController if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newRC, err := rcClient.Get(rcName, metav1.GetOptions{}) + newRC, err := rcClient.Get(context.TODO(), rcName, metav1.GetOptions{}) if err != nil { return err } updateFunc(newRC) - rc, err = rcClient.Update(newRC) + rc, err = rcClient.Update(context.TODO(), newRC) return err }); err != nil { t.Fatalf("Failed to update rc %s: %v", rcName, err) @@ -260,7 +261,7 @@ func testPodControllerRefPatch(t *testing.T, c clientset.Interface, pod *v1.Pod, }) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -269,7 +270,7 @@ func testPodControllerRefPatch(t *testing.T, c clientset.Interface, pod *v1.Pod, t.Fatalf("Failed to verify ControllerRef for the pod %s is not nil: %v", pod.Name, err) } - newPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain pod %s: %v", pod.Name, err) } @@ -307,7 +308,7 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1 } pod.Status.Conditions = append(pod.Status.Conditions, *condition) } - _, err := clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) + _, err := clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod) if err != nil { // When status fails to be updated, we continue to next pod continue @@ -324,11 +325,11 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1 func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rc *v1.ReplicationController, replicas int32) { ns := rc.Namespace rcClient := c.CoreV1().ReplicationControllers(ns) - newRC, err := rcClient.Get(rc.Name, metav1.GetOptions{}) + newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain rc %s: %v", rc.Name, err) } - scale, err := c.CoreV1().ReplicationControllers(ns).GetScale(rc.Name, metav1.GetOptions{}) + scale, err := c.CoreV1().ReplicationControllers(ns).GetScale(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain scale subresource for rc %s: %v", rc.Name, err) } @@ -337,18 +338,18 @@ func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rc *v } if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - scale, err := c.CoreV1().ReplicationControllers(ns).GetScale(rc.Name, metav1.GetOptions{}) + scale, err := c.CoreV1().ReplicationControllers(ns).GetScale(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { return err } scale.Spec.Replicas = replicas - _, err = c.CoreV1().ReplicationControllers(ns).UpdateScale(rc.Name, scale) + _, err = c.CoreV1().ReplicationControllers(ns).UpdateScale(context.TODO(), rc.Name, scale) return err }); err != nil { t.Fatalf("Failed to set .Spec.Replicas of scale subresource for rc %s: %v", rc.Name, err) } - newRC, err = rcClient.Get(rc.Name, metav1.GetOptions{}) + newRC, err = rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain rc %s: %v", rc.Name, err) } @@ -417,14 +418,14 @@ func TestAdoption(t *testing.T) { rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) podClient := clientSet.CoreV1().Pods(ns.Name) const rcName = "rc" - rc, err := rcClient.Create(newRC(rcName, ns.Name, 1)) + rc, err := rcClient.Create(context.TODO(), newRC(rcName, ns.Name, 1)) if err != nil { t.Fatalf("Failed to create replication controllers: %v", err) } podName := fmt.Sprintf("pod%d", i) pod := newMatchingPod(podName, ns.Name) pod.OwnerReferences = tc.existingOwnerReferences(rc) - _, err = podClient.Create(pod) + _, err = podClient.Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create Pod: %v", err) } @@ -432,7 +433,7 @@ func TestAdoption(t *testing.T) { stopCh := runControllerAndInformers(t, rm, informers, 1) defer close(stopCh) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + updatedPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -483,7 +484,7 @@ func TestSpecReplicasChange(t *testing.T) { } if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newRC, err := rcClient.Get(rc.Name, metav1.GetOptions{}) + newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -519,7 +520,7 @@ func TestDeletingAndFailedPods(t *testing.T) { updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { pod.Finalizers = []string{"fake.example.com/blockDeletion"} }) - if err := c.CoreV1().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, &metav1.DeleteOptions{}); err != nil { t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err) } @@ -583,7 +584,7 @@ func TestOverlappingRCs(t *testing.T) { // Expect both RCs have .status.replicas = .spec.replicas for i := 0; i < 2; i++ { - newRC, err := c.CoreV1().ReplicationControllers(ns.Name).Get(fmt.Sprintf("rc-%d", i+1), metav1.GetOptions{}) + newRC, err := c.CoreV1().ReplicationControllers(ns.Name).Get(context.TODO(), fmt.Sprintf("rc-%d", i+1), metav1.GetOptions{}) if err != nil { t.Fatalf("failed to obtain rc rc-%d: %v", i+1, err) } @@ -623,7 +624,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { pod.Labels = newLabelMap }) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -638,7 +639,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { pod.Labels = labelMap() }) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { // If the pod is not found, it means the RC picks the pod for deletion (it is extra) // Verify there is only one pod in namespace and it has ControllerRef to the RC @@ -743,7 +744,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) { rcClient := c.CoreV1().ReplicationControllers(ns.Name) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newRC, err := rcClient.Get(rc.Name, metav1.GetOptions{}) + newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -838,7 +839,7 @@ func TestFullyLabeledReplicas(t *testing.T) { // Verify only one pod is fully labeled if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newRC, err := rcClient.Get(rc.Name, metav1.GetOptions{}) + newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/integration/scale/scale_test.go b/test/integration/scale/scale_test.go index 93e6ed8553f..362c5be4001 100644 --- a/test/integration/scale/scale_test.go +++ b/test/integration/scale/scale_test.go @@ -121,16 +121,16 @@ func TestScaleSubresources(t *testing.T) { } // Create objects required to exercise scale subresources - if _, err := clientSet.CoreV1().ReplicationControllers("default").Create(rcStub); err != nil { + if _, err := clientSet.CoreV1().ReplicationControllers("default").Create(context.TODO(), rcStub); err != nil { t.Fatal(err) } - if _, err := clientSet.AppsV1().ReplicaSets("default").Create(rsStub); err != nil { + if _, err := clientSet.AppsV1().ReplicaSets("default").Create(context.TODO(), rsStub); err != nil { t.Fatal(err) } - if _, err := clientSet.AppsV1().Deployments("default").Create(deploymentStub); err != nil { + if _, err := clientSet.AppsV1().Deployments("default").Create(context.TODO(), deploymentStub); err != nil { t.Fatal(err) } - if _, err := clientSet.AppsV1().StatefulSets("default").Create(ssStub); err != nil { + if _, err := clientSet.AppsV1().StatefulSets("default").Create(context.TODO(), ssStub); err != nil { t.Fatal(err) } diff --git a/test/integration/scheduler/extender_test.go b/test/integration/scheduler/extender_test.go index 9102d88c4c6..67e56c2533c 100644 --- a/test/integration/scheduler/extender_test.go +++ b/test/integration/scheduler/extender_test.go @@ -19,6 +19,7 @@ package scheduler // This file tests scheduler extender. import ( + "context" "encoding/json" "fmt" "net/http" @@ -357,7 +358,7 @@ func TestSchedulerExtender(t *testing.T) { func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + defer cs.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) goodCondition := v1.NodeCondition{ Type: v1.NodeReady, @@ -377,7 +378,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) for ii := 0; ii < 5; ii++ { node.Name = fmt.Sprintf("machine%d", ii+1) - if _, err := cs.CoreV1().Nodes().Create(node); err != nil { + if _, err := cs.CoreV1().Nodes().Create(context.TODO(), node); err != nil { t.Fatalf("Failed to create nodes: %v", err) } } @@ -399,7 +400,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) }, } - myPod, err := cs.CoreV1().Pods(ns.Name).Create(pod) + myPod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create pod: %v", err) } @@ -409,17 +410,17 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) t.Fatalf("Failed to schedule pod: %v", err) } - myPod, err = cs.CoreV1().Pods(ns.Name).Get(myPod.Name, metav1.GetOptions{}) + myPod, err = cs.CoreV1().Pods(ns.Name).Get(context.TODO(), myPod.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get pod: %v", err) } else if myPod.Spec.NodeName != "machine2" { t.Fatalf("Failed to schedule using extender, expected machine2, got %v", myPod.Spec.NodeName) } var gracePeriod int64 - if err := cs.CoreV1().Pods(ns.Name).Delete(myPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}); err != nil { + if err := cs.CoreV1().Pods(ns.Name).Delete(context.TODO(), myPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}); err != nil { t.Fatalf("Failed to delete pod: %v", err) } - _, err = cs.CoreV1().Pods(ns.Name).Get(myPod.Name, metav1.GetOptions{}) + _, err = cs.CoreV1().Pods(ns.Name).Get(context.TODO(), myPod.Name, metav1.GetOptions{}) if err == nil { t.Fatalf("Failed to delete pod: %v", err) } diff --git a/test/integration/scheduler/framework_test.go b/test/integration/scheduler/framework_test.go index c3562c576d6..6253425b7da 100644 --- a/test/integration/scheduler/framework_test.go +++ b/test/integration/scheduler/framework_test.go @@ -951,7 +951,7 @@ func TestBindPlugin(t *testing.T) { t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err) continue } - pod, err = testCtx.clientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err = testCtx.clientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { t.Errorf("can't get pod: %v", err) } diff --git a/test/integration/scheduler/predicates_test.go b/test/integration/scheduler/predicates_test.go index ec474d59ede..f6d647bd5aa 100644 --- a/test/integration/scheduler/predicates_test.go +++ b/test/integration/scheduler/predicates_test.go @@ -17,6 +17,7 @@ limitations under the License. package scheduler import ( + "context" "fmt" "testing" "time" @@ -823,7 +824,7 @@ func TestInterPodAffinity(t *testing.T) { } else { nsName = testCtx.ns.Name } - createdPod, err := cs.CoreV1().Pods(nsName).Create(pod) + createdPod, err := cs.CoreV1().Pods(nsName).Create(context.TODO(), pod) if err != nil { t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test) } @@ -832,7 +833,7 @@ func TestInterPodAffinity(t *testing.T) { t.Errorf("Test Failed: error, %v, while waiting for pod during test, %v", err, test) } } - testPod, err := cs.CoreV1().Pods(testCtx.ns.Name).Create(test.pod) + testPod, err := cs.CoreV1().Pods(testCtx.ns.Name).Create(context.TODO(), test.pod) if err != nil { if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) { t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test) @@ -848,7 +849,7 @@ func TestInterPodAffinity(t *testing.T) { t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits) } - err = cs.CoreV1().Pods(testCtx.ns.Name).Delete(test.pod.Name, metav1.NewDeleteOptions(0)) + err = cs.CoreV1().Pods(testCtx.ns.Name).Delete(context.TODO(), test.pod.Name, metav1.NewDeleteOptions(0)) if err != nil { t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test) } @@ -863,7 +864,7 @@ func TestInterPodAffinity(t *testing.T) { } else { nsName = testCtx.ns.Name } - err = cs.CoreV1().Pods(nsName).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = cs.CoreV1().Pods(nsName).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0)) if err != nil { t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test) } @@ -1004,7 +1005,7 @@ func TestEvenPodsSpreadPredicate(t *testing.T) { allPods := append(tt.existingPods, tt.incomingPod) defer cleanupPods(cs, t, allPods) for _, pod := range tt.existingPods { - createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod) + createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) if err != nil { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } @@ -1013,7 +1014,7 @@ func TestEvenPodsSpreadPredicate(t *testing.T) { t.Errorf("Test Failed: error while waiting for pod during test: %v", err) } } - testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(tt.incomingPod) + testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod) if err != nil && !apierrors.IsInvalid(err) { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } diff --git a/test/integration/scheduler/preemption_test.go b/test/integration/scheduler/preemption_test.go index 9b9028b3bbd..6b266c3237b 100644 --- a/test/integration/scheduler/preemption_test.go +++ b/test/integration/scheduler/preemption_test.go @@ -52,7 +52,7 @@ var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(30 func waitForNominatedNodeNameWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error { if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) { - pod, err := cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + pod, err := cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -817,7 +817,7 @@ func TestPreemptionRaces(t *testing.T) { klog.Info("Check unschedulable pods still exists and were never scheduled...") for _, p := range additionalPods { - pod, err := cs.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{}) + pod, err := cs.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Error in getting Pod %v/%v info: %v", p.Namespace, p.Name, err) } @@ -921,7 +921,7 @@ func TestNominatedNodeCleanUp(t *testing.T) { } // And the nominated node name of the medium priority pod is cleared. if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - pod, err := cs.CoreV1().Pods(medPriPod.Namespace).Get(medPriPod.Name, metav1.GetOptions{}) + pod, err := cs.CoreV1().Pods(medPriPod.Namespace).Get(context.TODO(), medPriPod.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Error getting the medium priority pod info: %v", err) } @@ -1172,7 +1172,7 @@ func TestPDBInPreemption(t *testing.T) { } // Add pod condition ready so that PDB is updated. addPodConditionReady(p) - if _, err := testCtx.clientSet.CoreV1().Pods(testCtx.ns.Name).UpdateStatus(p); err != nil { + if _, err := testCtx.clientSet.CoreV1().Pods(testCtx.ns.Name).UpdateStatus(context.TODO(), p); err != nil { t.Fatal(err) } } @@ -1183,7 +1183,7 @@ func TestPDBInPreemption(t *testing.T) { // Create PDBs. for _, pdb := range test.pdbs { - _, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).Create(pdb) + _, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).Create(context.TODO(), pdb) if err != nil { t.Fatalf("Failed to create PDB: %v", err) } @@ -1220,7 +1220,7 @@ func TestPDBInPreemption(t *testing.T) { // Cleanup pods = append(pods, preemptor) cleanupPods(cs, t, pods) - cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).DeleteCollection(nil, metav1.ListOptions{}) - cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + cs.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) } } diff --git a/test/integration/scheduler/priorities_test.go b/test/integration/scheduler/priorities_test.go index 455bc3187b5..a09710695f5 100644 --- a/test/integration/scheduler/priorities_test.go +++ b/test/integration/scheduler/priorities_test.go @@ -17,6 +17,7 @@ limitations under the License. package scheduler import ( + "context" "fmt" "strings" "testing" @@ -333,7 +334,7 @@ func TestEvenPodsSpreadPriority(t *testing.T) { allPods := append(tt.existingPods, tt.incomingPod) defer cleanupPods(cs, t, allPods) for _, pod := range tt.existingPods { - createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod) + createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) if err != nil { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } @@ -342,7 +343,7 @@ func TestEvenPodsSpreadPriority(t *testing.T) { t.Errorf("Test Failed: error while waiting for pod during test: %v", err) } } - testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(tt.incomingPod) + testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod) if err != nil && !apierrors.IsInvalid(err) { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 0eace3381c2..33a5982b716 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -19,6 +19,7 @@ package scheduler // This file tests the scheduler. import ( + "context" "fmt" "testing" "time" @@ -60,7 +61,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) { defer framework.DeleteTestingNamespace(ns, s, t) clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + defer clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) informerFactory := informers.NewSharedInformerFactory(clientSet, 0) for i, test := range []struct { @@ -250,7 +251,7 @@ priorities: [] } policyConfigMap.APIVersion = "v1" - clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap) + clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &policyConfigMap) eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: clientSet.EventsV1beta1().Events("")}) stopCh := make(chan struct{}) @@ -296,7 +297,7 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) { defer framework.DeleteTestingNamespace(ns, s, t) clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + defer clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) informerFactory := informers.NewSharedInformerFactory(clientSet, 0) @@ -335,7 +336,7 @@ func TestUnschedulableNodes(t *testing.T) { nodeLister := testCtx.informerFactory.Core().V1().Nodes().Lister() // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (Nodes). - defer testCtx.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + defer testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) goodCondition := v1.NodeCondition{ Type: v1.NodeReady, @@ -374,7 +375,7 @@ func TestUnschedulableNodes(t *testing.T) { { makeUnSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) { n.Spec.Unschedulable = true - if _, err := c.CoreV1().Nodes().Update(n); err != nil { + if _, err := c.CoreV1().Nodes().Update(context.TODO(), n); err != nil { t.Fatalf("Failed to update node with unschedulable=true: %v", err) } err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool { @@ -390,7 +391,7 @@ func TestUnschedulableNodes(t *testing.T) { }, makeSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) { n.Spec.Unschedulable = false - if _, err := c.CoreV1().Nodes().Update(n); err != nil { + if _, err := c.CoreV1().Nodes().Update(context.TODO(), n); err != nil { t.Fatalf("Failed to update node with unschedulable=false: %v", err) } err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool { @@ -404,7 +405,7 @@ func TestUnschedulableNodes(t *testing.T) { } for i, mod := range nodeModifications { - unSchedNode, err := testCtx.clientSet.CoreV1().Nodes().Create(node) + unSchedNode, err := testCtx.clientSet.CoreV1().Nodes().Create(context.TODO(), node) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -431,7 +432,7 @@ func TestUnschedulableNodes(t *testing.T) { } // Apply the schedulable modification to the node, and wait for the reflection - schedNode, err := testCtx.clientSet.CoreV1().Nodes().Get(unSchedNode.Name, metav1.GetOptions{}) + schedNode, err := testCtx.clientSet.CoreV1().Nodes().Get(context.TODO(), unSchedNode.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get node: %v", err) } @@ -447,7 +448,7 @@ func TestUnschedulableNodes(t *testing.T) { if err := deletePod(testCtx.clientSet, myPod.Name, myPod.Namespace); err != nil { t.Errorf("Failed to delete pod: %v", err) } - err = testCtx.clientSet.CoreV1().Nodes().Delete(schedNode.Name, nil) + err = testCtx.clientSet.CoreV1().Nodes().Delete(context.TODO(), schedNode.Name, nil) if err != nil { t.Errorf("Failed to delete node: %v", err) } @@ -487,7 +488,7 @@ func TestMultiScheduler(t *testing.T) { }, }, } - testCtx.clientSet.CoreV1().Nodes().Create(node) + testCtx.clientSet.CoreV1().Nodes().Create(context.TODO(), node) // 3. create 3 pods for testing t.Logf("create 3 pods for testing") @@ -638,7 +639,7 @@ func TestAllocatable(t *testing.T) { }, } - if _, err := testCtx.clientSet.CoreV1().Nodes().UpdateStatus(allocNode); err != nil { + if _, err := testCtx.clientSet.CoreV1().Nodes().UpdateStatus(context.TODO(), allocNode); err != nil { t.Fatalf("Failed to update node with Status.Allocatable: %v", err) } @@ -749,7 +750,7 @@ func TestSchedulerInformers(t *testing.T) { // Cleanup pods = append(pods, unschedulable) cleanupPods(cs, t, pods) - cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).DeleteCollection(nil, metav1.ListOptions{}) - cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + cs.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) } } diff --git a/test/integration/scheduler/taint_test.go b/test/integration/scheduler/taint_test.go index 1c877fc771b..879cc7eb923 100644 --- a/test/integration/scheduler/taint_test.go +++ b/test/integration/scheduler/taint_test.go @@ -19,6 +19,7 @@ package scheduler // This file tests the Taint feature. import ( + "context" "errors" "fmt" "testing" @@ -530,11 +531,11 @@ func TestTaintNodeByCondition(t *testing.T) { }, } - if _, err := cs.CoreV1().Nodes().Create(node); err != nil { + if _, err := cs.CoreV1().Nodes().Create(context.TODO(), node); err != nil { t.Errorf("Failed to create node, err: %v", err) } if err := waitForNodeTaints(cs, node, test.expectedTaints); err != nil { - node, err = cs.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + node, err = cs.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Failed to get node <%s>", node.Name) } @@ -548,7 +549,7 @@ func TestTaintNodeByCondition(t *testing.T) { pod.Name = fmt.Sprintf("%s-%d", pod.Name, i) pod.Spec.Tolerations = p.tolerations - createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod) + createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) if err != nil { t.Fatalf("Failed to create pod %s/%s, error: %v", pod.Namespace, pod.Name, err) @@ -668,7 +669,7 @@ func TestTaintBasedEvictions(t *testing.T) { defer cleanupTest(t, testCtx) cs := testCtx.clientSet informers := testCtx.informerFactory - _, err := cs.CoreV1().Namespaces().Create(testCtx.ns) + _, err := cs.CoreV1().Namespaces().Create(context.TODO(), testCtx.ns) if err != nil { t.Errorf("Failed to create namespace %+v", err) } @@ -730,7 +731,7 @@ func TestTaintBasedEvictions(t *testing.T) { }, }, }) - if _, err := cs.CoreV1().Nodes().Create(nodes[i]); err != nil { + if _, err := cs.CoreV1().Nodes().Create(context.TODO(), nodes[i]); err != nil { t.Errorf("Failed to create node, err: %v", err) } } @@ -742,7 +743,7 @@ func TestTaintBasedEvictions(t *testing.T) { test.pod.Spec.Tolerations[0].TolerationSeconds = &tolerationSeconds[i] } - test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Create(test.pod) + test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Create(context.TODO(), test.pod) if err != nil { t.Fatalf("Test Failed: error: %v, while creating pod", err) } @@ -751,11 +752,11 @@ func TestTaintBasedEvictions(t *testing.T) { t.Errorf("Failed to schedule pod %s/%s on the node, err: %v", test.pod.Namespace, test.pod.Name, err) } - test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Get(test.pod.Name, metav1.GetOptions{}) + test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Get(context.TODO(), test.pod.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Test Failed: error: %v, while creating pod", err) } - neededNode, err = cs.CoreV1().Nodes().Get(test.pod.Spec.NodeName, metav1.GetOptions{}) + neededNode, err = cs.CoreV1().Nodes().Get(context.TODO(), test.pod.Spec.NodeName, metav1.GetOptions{}) if err != nil { t.Fatalf("Error while getting node associated with pod %v with err %v", test.pod.Name, err) } @@ -827,7 +828,7 @@ func TestTaintBasedEvictions(t *testing.T) { return false, nil }) if err != nil { - pod, _ := cs.CoreV1().Pods(testCtx.ns.Name).Get(test.pod.Name, metav1.GetOptions{}) + pod, _ := cs.CoreV1().Pods(testCtx.ns.Name).Get(context.TODO(), test.pod.Name, metav1.GetOptions{}) t.Fatalf("Error: %v, Expected test pod to be %s but it's %v", err, test.waitForPodCondition, pod) } cleanupPods(cs, t, []*v1.Pod{test.pod}) diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index 82cfdb1e1f2..6460e4e0860 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -83,7 +83,7 @@ func createAlgorithmSourceFromPolicy(policy *schedulerapi.Policy, clientSet clie Data: map[string]string{schedulerapi.SchedulerPolicyConfigMapKey: policyString}, } policyConfigMap.APIVersion = "v1" - clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&policyConfigMap) + clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &policyConfigMap) return schedulerapi.SchedulerAlgorithmSource{ Policy: &schedulerapi.SchedulerPolicySource{ @@ -264,7 +264,7 @@ func cleanupTest(t *testing.T, testCtx *testContext) { // Kill the scheduler. testCtx.cancelFn() // Cleanup nodes. - testCtx.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) framework.DeleteTestingNamespace(testCtx.ns, testCtx.httpServer, t) testCtx.closeFn() } @@ -302,7 +302,7 @@ func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key stri // nodeHasLabels returns a function that checks if a node has all the given labels. func nodeHasLabels(cs clientset.Interface, nodeName string, labels map[string]string) wait.ConditionFunc { return func() (bool, error) { - node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { // This could be a connection error so we want to retry. return false, nil @@ -344,17 +344,17 @@ func initNode(name string, res *v1.ResourceList, images []v1.ContainerImage) *v1 // createNode creates a node with the given resource list. func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) { - return cs.CoreV1().Nodes().Create(initNode(name, res, nil)) + return cs.CoreV1().Nodes().Create(context.TODO(), initNode(name, res, nil)) } // createNodeWithImages creates a node with the given resource list and images. func createNodeWithImages(cs clientset.Interface, name string, res *v1.ResourceList, images []v1.ContainerImage) (*v1.Node, error) { - return cs.CoreV1().Nodes().Create(initNode(name, res, images)) + return cs.CoreV1().Nodes().Create(context.TODO(), initNode(name, res, images)) } // updateNodeStatus updates the status of node. func updateNodeStatus(cs clientset.Interface, node *v1.Node) error { - _, err := cs.CoreV1().Nodes().UpdateStatus(node) + _, err := cs.CoreV1().Nodes().UpdateStatus(context.TODO(), node) return err } @@ -377,7 +377,7 @@ func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, nu // the taints. func nodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wait.ConditionFunc { return func() (bool, error) { - node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return false, err } @@ -398,13 +398,13 @@ func nodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wai } func addTaintToNode(cs clientset.Interface, nodeName string, taint v1.Taint) error { - node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return err } copy := node.DeepCopy() copy.Spec.Taints = append(copy.Spec.Taints, taint) - _, err = cs.CoreV1().Nodes().Update(copy) + _, err = cs.CoreV1().Nodes().Update(context.TODO(), copy) return err } @@ -416,8 +416,7 @@ func waitForNodeTaints(cs clientset.Interface, node *v1.Node, taints []v1.Taint) // cleanupNodes deletes all nodes. func cleanupNodes(cs clientset.Interface, t *testing.T) { - err := cs.CoreV1().Nodes().DeleteCollection( - metav1.NewDeleteOptions(0), metav1.ListOptions{}) + err := cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.NewDeleteOptions(0), metav1.ListOptions{}) if err != nil { t.Errorf("error while deleting all nodes: %v", err) } @@ -471,7 +470,7 @@ func initPausePod(cs clientset.Interface, conf *pausePodConfig) *v1.Pod { // createPausePod creates a pod with "Pause" image and the given config and // return its pointer and error status. func createPausePod(cs clientset.Interface, p *v1.Pod) (*v1.Pod, error) { - return cs.CoreV1().Pods(p.Namespace).Create(p) + return cs.CoreV1().Pods(p.Namespace).Create(context.TODO(), p) } // createPausePodWithResource creates a pod with "Pause" image and the given @@ -500,14 +499,14 @@ func createPausePodWithResource(cs clientset.Interface, podName string, // runPausePod creates a pod with "Pause" image and the given config and waits // until it is scheduled. It returns its pointer and error status. func runPausePod(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) { - pod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod) + pod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) if err != nil { return nil, fmt.Errorf("Error creating pause pod: %v", err) } if err = waitForPodToSchedule(cs, pod); err != nil { return pod, fmt.Errorf("Pod %v/%v didn't schedule successfully. Error: %v", pod.Namespace, pod.Name, err) } - if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil { + if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil { return pod, fmt.Errorf("Error getting pod %v/%v info: %v", pod.Namespace, pod.Name, err) } return pod, nil @@ -537,14 +536,14 @@ func initPodWithContainers(cs clientset.Interface, conf *podWithContainersConfig // runPodWithContainers creates a pod with given config and containers and waits // until it is scheduled. It returns its pointer and error status. func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) { - pod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod) + pod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) if err != nil { return nil, fmt.Errorf("Error creating pod-with-containers: %v", err) } if err = waitForPodToSchedule(cs, pod); err != nil { return pod, fmt.Errorf("Pod %v didn't schedule successfully. Error: %v", pod.Name, err) } - if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil { + if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil { return pod, fmt.Errorf("Error getting pod %v info: %v", pod.Name, err) } return pod, nil @@ -553,7 +552,7 @@ func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) // podDeleted returns true if a pod is not found in the given namespace. func podDeleted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil } @@ -567,7 +566,7 @@ func podDeleted(c clientset.Interface, podNamespace, podName string) wait.Condit // podIsGettingEvicted returns true if the pod's deletion timestamp is set. func podIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return false, err } @@ -581,7 +580,7 @@ func podIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wa // podScheduled returns true if a node is assigned to the given pod. func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { // This could be a connection error so we want to retry. return false, nil @@ -596,7 +595,7 @@ func podScheduled(c clientset.Interface, podNamespace, podName string) wait.Cond // podScheduledIn returns true if a given pod is placed onto one of the expected nodes. func podScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNames []string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { // This could be a connection error so we want to retry. return false, nil @@ -617,7 +616,7 @@ func podScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNam // gets unschedulable status. func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { // This could be a connection error so we want to retry. return false, nil @@ -633,7 +632,7 @@ func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait. // records such reasons in case of error. func podSchedulingError(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { // This could be a connection error so we want to retry. return false, nil @@ -672,7 +671,7 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error { // the expected values. func waitForPDBsStable(testCtx *testContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error { return wait.Poll(time.Second, 60*time.Second, func() (bool, error) { - pdbList, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).List(metav1.ListOptions{}) + pdbList, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -708,7 +707,7 @@ func waitCachedPodsStable(testCtx *testContext, pods []*v1.Pod) error { return false, nil } for _, p := range pods { - actualPod, err1 := testCtx.clientSet.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{}) + actualPod, err1 := testCtx.clientSet.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{}) if err1 != nil { return false, err1 } @@ -723,17 +722,17 @@ func waitCachedPodsStable(testCtx *testContext, pods []*v1.Pod) error { // deletePod deletes the given pod in the given namespace. func deletePod(cs clientset.Interface, podName string, nsName string) error { - return cs.CoreV1().Pods(nsName).Delete(podName, metav1.NewDeleteOptions(0)) + return cs.CoreV1().Pods(nsName).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0)) } func getPod(cs clientset.Interface, podName string, podNamespace string) (*v1.Pod, error) { - return cs.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + return cs.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) } // cleanupPods deletes the given pods and waits for them to be actually deleted. func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) { for _, p := range pods { - err := cs.CoreV1().Pods(p.Namespace).Delete(p.Name, metav1.NewDeleteOptions(0)) + err := cs.CoreV1().Pods(p.Namespace).Delete(context.TODO(), p.Name, metav1.NewDeleteOptions(0)) if err != nil && !apierrors.IsNotFound(err) { t.Errorf("error while deleting pod %v/%v: %v", p.Namespace, p.Name, err) } @@ -749,7 +748,7 @@ func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) { // noPodsInNamespace returns true if no pods in the given namespace. func noPodsInNamespace(c clientset.Interface, podNamespace string) wait.ConditionFunc { return func() (bool, error) { - pods, err := c.CoreV1().Pods(podNamespace).List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods(podNamespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return false, err } @@ -761,7 +760,7 @@ func noPodsInNamespace(c clientset.Interface, podNamespace string) wait.Conditio // cleanupPodsInNamespace deletes the pods in the given namespace and waits for them to // be actually deleted. func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) { - if err := cs.CoreV1().Pods(ns).DeleteCollection(nil, metav1.ListOptions{}); err != nil { + if err := cs.CoreV1().Pods(ns).DeleteCollection(context.TODO(), nil, metav1.ListOptions{}); err != nil { t.Errorf("error while listing pod in namespace %v: %v", ns, err) return } diff --git a/test/integration/scheduler_perf/scheduler_test.go b/test/integration/scheduler_perf/scheduler_test.go index 88bb2984955..9d9f44f59ab 100644 --- a/test/integration/scheduler_perf/scheduler_test.go +++ b/test/integration/scheduler_perf/scheduler_test.go @@ -243,11 +243,11 @@ func (na nodeAffinity) mutatePodTemplate(pod *v1.Pod) { // generateNodes generates nodes to be used for scheduling. func (inputConfig *schedulerPerfConfig) generateNodes(config *testConfig) { for i := 0; i < inputConfig.NodeCount; i++ { - config.clientset.CoreV1().Nodes().Create(config.mutatedNodeTemplate) + config.clientset.CoreV1().Nodes().Create(context.TODO(), config.mutatedNodeTemplate) } for i := 0; i < config.numNodes-inputConfig.NodeCount; i++ { - config.clientset.CoreV1().Nodes().Create(baseNodeTemplate) + config.clientset.CoreV1().Nodes().Create(context.TODO(), baseNodeTemplate) } } diff --git a/test/integration/secrets/secrets_test.go b/test/integration/secrets/secrets_test.go index d15991af3a2..0a87df95ecd 100644 --- a/test/integration/secrets/secrets_test.go +++ b/test/integration/secrets/secrets_test.go @@ -19,6 +19,7 @@ package secrets // This file tests use of the secrets API resource. import ( + "context" "testing" "k8s.io/api/core/v1" @@ -31,7 +32,7 @@ import ( ) func deleteSecretOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.CoreV1().Secrets(ns).Delete(name, nil); err != nil { + if err := c.CoreV1().Secrets(ns).Delete(context.TODO(), name, nil); err != nil { t.Errorf("unable to delete secret %v: %v", name, err) } } @@ -62,7 +63,7 @@ func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) { }, } - if _, err := client.CoreV1().Secrets(s.Namespace).Create(&s); err != nil { + if _, err := client.CoreV1().Secrets(s.Namespace).Create(context.TODO(), &s); err != nil { t.Errorf("unable to create test secret: %v", err) } defer deleteSecretOrErrorf(t, client, s.Namespace, s.Name) @@ -102,14 +103,14 @@ func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) { // Create a pod to consume secret. pod.ObjectMeta.Name = "uses-secret" - if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) // Create a pod that consumes non-existent secret. pod.ObjectMeta.Name = "uses-non-existent-secret" - if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) diff --git a/test/integration/serviceaccount/service_account_test.go b/test/integration/serviceaccount/service_account_test.go index 139ff88ef71..3818e87a7f3 100644 --- a/test/integration/serviceaccount/service_account_test.go +++ b/test/integration/serviceaccount/service_account_test.go @@ -71,7 +71,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { ns := "test-service-account-creation" // Create namespace - _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil { t.Fatalf("could not create namespace: %v", err) } @@ -83,7 +83,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { } // Delete service account - err = c.CoreV1().ServiceAccounts(ns).Delete(defaultUser.Name, nil) + err = c.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), defaultUser.Name, nil) if err != nil { t.Fatalf("Could not delete default serviceaccount: %v", err) } @@ -109,13 +109,13 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { name := "my-service-account" // Create namespace - _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil { t.Fatalf("could not create namespace: %v", err) } // Create service account - _, err = c.CoreV1().ServiceAccounts(ns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name}}) + _, err = c.CoreV1().ServiceAccounts(ns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name}}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -127,7 +127,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Delete token - err = c.CoreV1().Secrets(ns).Delete(token1Name, nil) + err = c.CoreV1().Secrets(ns).Delete(context.TODO(), token1Name, nil) if err != nil { t.Fatalf("Could not delete token: %v", err) } @@ -145,12 +145,12 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Trigger creation of a new referenced token - serviceAccount, err := c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + serviceAccount, err := c.CoreV1().ServiceAccounts(ns).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } serviceAccount.Secrets = []v1.ObjectReference{} - _, err = c.CoreV1().ServiceAccounts(ns).Update(serviceAccount) + _, err = c.CoreV1().ServiceAccounts(ns).Update(context.TODO(), serviceAccount) if err != nil { t.Fatal(err) } @@ -168,7 +168,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Delete service account - err = c.CoreV1().ServiceAccounts(ns).Delete(name, nil) + err = c.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), name, nil) if err != nil { t.Fatal(err) } @@ -177,7 +177,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { tokensToCleanup := sets.NewString(token1Name, token2Name, token3Name) err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { // Get all secrets in the namespace - secrets, err := c.CoreV1().Secrets(ns).List(metav1.ListOptions{}) + secrets, err := c.CoreV1().Secrets(ns).List(context.TODO(), metav1.ListOptions{}) // Retrieval errors should fail if err != nil { return false, err @@ -206,7 +206,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { ns := "auto-mount-ns" // Create "my" namespace - _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } @@ -260,7 +260,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { } expectedContainer2VolumeMounts := protoPod.Spec.Containers[1].VolumeMounts - createdPod, err := c.CoreV1().Pods(ns).Create(&protoPod) + createdPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), &protoPod) if err != nil { t.Fatal(err) } @@ -289,19 +289,19 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { otherns := "other-ns" // Create "my" namespace - _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}) + _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "other" namespace - _, err = c.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}) + _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "ro" user in myns - _, err = c.CoreV1().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}) + _, err = c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -314,13 +314,13 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { roClient := clientset.NewForConfigOrDie(&roClientConfig) doServiceAccountAPIRequests(t, roClient, myns, true, true, false) doServiceAccountAPIRequests(t, roClient, otherns, true, false, false) - err = c.CoreV1().Secrets(myns).Delete(roTokenName, nil) + err = c.CoreV1().Secrets(myns).Delete(context.TODO(), roTokenName, nil) if err != nil { t.Fatalf("could not delete token: %v", err) } // wait for delete to be observed and reacted to via watch wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { - sa, err := c.CoreV1().ServiceAccounts(myns).Get(readOnlyServiceAccountName, metav1.GetOptions{}) + sa, err := c.CoreV1().ServiceAccounts(myns).Get(context.TODO(), readOnlyServiceAccountName, metav1.GetOptions{}) if err != nil { return false, err } @@ -334,7 +334,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { doServiceAccountAPIRequests(t, roClient, myns, false, false, false) // Create "rw" user in myns - _, err = c.CoreV1().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}) + _, err = c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -489,13 +489,13 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*v1.ServiceAccount, error) { if !shouldWait { - return c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + return c.CoreV1().ServiceAccounts(ns).Get(context.TODO(), name, metav1.GetOptions{}) } var user *v1.ServiceAccount var err error err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { - user, err = c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + user, err = c.CoreV1().ServiceAccounts(ns).Get(context.TODO(), name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return false, nil } @@ -512,7 +512,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st token := "" findToken := func() (bool, error) { - user, err := c.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + user, err := c.CoreV1().ServiceAccounts(ns).Get(context.TODO(), name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return false, nil } @@ -521,7 +521,7 @@ func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name st } for _, ref := range user.Secrets { - secret, err := c.CoreV1().Secrets(ns).Get(ref.Name, metav1.GetOptions{}) + secret, err := c.CoreV1().Secrets(ns).Get(context.TODO(), ref.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { continue } @@ -571,17 +571,17 @@ func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string readOps := []testOperation{ func() error { - _, err := c.CoreV1().Secrets(ns).List(metav1.ListOptions{}) + _, err := c.CoreV1().Secrets(ns).List(context.TODO(), metav1.ListOptions{}) return err }, func() error { - _, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + _, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) return err }, } writeOps := []testOperation{ - func() error { _, err := c.CoreV1().Secrets(ns).Create(testSecret); return err }, - func() error { return c.CoreV1().Secrets(ns).Delete(testSecret.Name, nil) }, + func() error { _, err := c.CoreV1().Secrets(ns).Create(context.TODO(), testSecret); return err }, + func() error { return c.CoreV1().Secrets(ns).Delete(context.TODO(), testSecret.Name, nil) }, } for _, op := range readOps { diff --git a/test/integration/statefulset/statefulset_test.go b/test/integration/statefulset/statefulset_test.go index d25832e9dd4..ddb0f733da5 100644 --- a/test/integration/statefulset/statefulset_test.go +++ b/test/integration/statefulset/statefulset_test.go @@ -17,6 +17,7 @@ limitations under the License. package statefulset import ( + "context" "fmt" "testing" @@ -140,7 +141,7 @@ func TestSpecReplicasChange(t *testing.T) { } if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - newSTS, err := stsClient.Get(sts.Name, metav1.GetOptions{}) + newSTS, err := stsClient.Get(context.TODO(), sts.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -177,7 +178,7 @@ func TestDeletingAndFailedPods(t *testing.T) { updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { pod.Finalizers = []string{"fake.example.com/blockDeletion"} }) - if err := c.CoreV1().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, &metav1.DeleteOptions{}); err != nil { t.Fatalf("error deleting pod %s: %v", deletingPod.Name, err) } diff --git a/test/integration/statefulset/util.go b/test/integration/statefulset/util.go index 3481351a1f8..0dc964ff3f6 100644 --- a/test/integration/statefulset/util.go +++ b/test/integration/statefulset/util.go @@ -17,6 +17,7 @@ limitations under the License. package statefulset import ( + "context" "fmt" "net/http/httptest" "testing" @@ -189,7 +190,7 @@ func runControllerAndInformers(sc *statefulset.StatefulSetController, informers } func createHeadlessService(t *testing.T, clientSet clientset.Interface, headlessService *v1.Service) { - _, err := clientSet.CoreV1().Services(headlessService.Namespace).Create(headlessService) + _, err := clientSet.CoreV1().Services(headlessService.Namespace).Create(context.TODO(), headlessService) if err != nil { t.Fatalf("failed creating headless service: %v", err) } @@ -199,14 +200,14 @@ func createSTSsPods(t *testing.T, clientSet clientset.Interface, stss []*appsv1. var createdSTSs []*appsv1.StatefulSet var createdPods []*v1.Pod for _, sts := range stss { - createdSTS, err := clientSet.AppsV1().StatefulSets(sts.Namespace).Create(sts) + createdSTS, err := clientSet.AppsV1().StatefulSets(sts.Namespace).Create(context.TODO(), sts) if err != nil { t.Fatalf("failed to create sts %s: %v", sts.Name, err) } createdSTSs = append(createdSTSs, createdSTS) } for _, pod := range pods { - createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(pod) + createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod) if err != nil { t.Fatalf("failed to create pod %s: %v", pod.Name, err) } @@ -221,7 +222,7 @@ func waitSTSStable(t *testing.T, clientSet clientset.Interface, sts *appsv1.Stat stsClient := clientSet.AppsV1().StatefulSets(sts.Namespace) desiredGeneration := sts.Generation if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - newSTS, err := stsClient.Get(sts.Name, metav1.GetOptions{}) + newSTS, err := stsClient.Get(context.TODO(), sts.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -234,12 +235,12 @@ func waitSTSStable(t *testing.T, clientSet clientset.Interface, sts *appsv1.Stat func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, updateFunc func(*v1.Pod)) *v1.Pod { var pod *v1.Pod if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newPod, err := podClient.Get(podName, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return err } updateFunc(newPod) - pod, err = podClient.Update(newPod) + pod, err = podClient.Update(context.TODO(), newPod) return err }); err != nil { t.Fatalf("failed to update pod %s: %v", podName, err) @@ -250,12 +251,12 @@ func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, upd func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, podName string, updateStatusFunc func(*v1.Pod)) *v1.Pod { var pod *v1.Pod if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newPod, err := podClient.Get(podName, metav1.GetOptions{}) + newPod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return err } updateStatusFunc(newPod) - pod, err = podClient.UpdateStatus(newPod) + pod, err = podClient.UpdateStatus(context.TODO(), newPod) return err }); err != nil { t.Fatalf("failed to update status of pod %s: %v", podName, err) @@ -266,7 +267,7 @@ func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, podName strin func getPods(t *testing.T, podClient typedv1.PodInterface, labelMap map[string]string) *v1.PodList { podSelector := labels.Set(labelMap).AsSelector() options := metav1.ListOptions{LabelSelector: podSelector.String()} - pods, err := podClient.List(options) + pods, err := podClient.List(context.TODO(), options) if err != nil { t.Fatalf("failed obtaining a list of pods that match the pod labels %v: %v", labelMap, err) } @@ -279,12 +280,12 @@ func getPods(t *testing.T, podClient typedv1.PodInterface, labelMap map[string]s func updateSTS(t *testing.T, stsClient typedappsv1.StatefulSetInterface, stsName string, updateFunc func(*appsv1.StatefulSet)) *appsv1.StatefulSet { var sts *appsv1.StatefulSet if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newSTS, err := stsClient.Get(stsName, metav1.GetOptions{}) + newSTS, err := stsClient.Get(context.TODO(), stsName, metav1.GetOptions{}) if err != nil { return err } updateFunc(newSTS) - sts, err = stsClient.Update(newSTS) + sts, err = stsClient.Update(context.TODO(), newSTS) return err }); err != nil { t.Fatalf("failed to update sts %s: %v", stsName, err) @@ -296,12 +297,12 @@ func updateSTS(t *testing.T, stsClient typedappsv1.StatefulSetInterface, stsName func scaleSTS(t *testing.T, c clientset.Interface, sts *appsv1.StatefulSet, replicas int32) { stsClient := c.AppsV1().StatefulSets(sts.Namespace) if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newSTS, err := stsClient.Get(sts.Name, metav1.GetOptions{}) + newSTS, err := stsClient.Get(context.TODO(), sts.Name, metav1.GetOptions{}) if err != nil { return err } *newSTS.Spec.Replicas = replicas - sts, err = stsClient.Update(newSTS) + sts, err = stsClient.Update(context.TODO(), newSTS) return err }); err != nil { t.Fatalf("failed to update .Spec.Replicas to %d for sts %s: %v", replicas, sts.Name, err) diff --git a/test/integration/storageclasses/storage_classes_test.go b/test/integration/storageclasses/storage_classes_test.go index c97b61e3d6e..988cd69df7d 100644 --- a/test/integration/storageclasses/storage_classes_test.go +++ b/test/integration/storageclasses/storage_classes_test.go @@ -19,6 +19,7 @@ package storageclasses // This file contains tests for the storage classes API resource. import ( + "context" "testing" "k8s.io/api/core/v1" @@ -59,7 +60,7 @@ func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Names Provisioner: provisionerPluginName, } - if _, err := client.StorageV1().StorageClasses().Create(&s); err != nil { + if _, err := client.StorageV1().StorageClasses().Create(context.TODO(), &s); err != nil { t.Errorf("unable to create test storage class: %v", err) } defer deleteStorageClassOrErrorf(t, client, s.Namespace, s.Name) @@ -79,20 +80,20 @@ func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Names } pvc.ObjectMeta.Name = "uses-storageclass" - if _, err := client.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc); err != nil { + if _, err := client.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc); err != nil { t.Errorf("Failed to create pvc: %v", err) } defer deletePersistentVolumeClaimOrErrorf(t, client, ns.Name, pvc.Name) } func deleteStorageClassOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.StorageV1().StorageClasses().Delete(name, nil); err != nil { + if err := c.StorageV1().StorageClasses().Delete(context.TODO(), name, nil); err != nil { t.Errorf("unable to delete storage class %v: %v", name, err) } } func deletePersistentVolumeClaimOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(name, nil); err != nil { + if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), name, nil); err != nil { t.Errorf("unable to delete persistent volume claim %v: %v", name, err) } } diff --git a/test/integration/ttlcontroller/ttlcontroller_test.go b/test/integration/ttlcontroller/ttlcontroller_test.go index b011a438238..e35e6917b0c 100644 --- a/test/integration/ttlcontroller/ttlcontroller_test.go +++ b/test/integration/ttlcontroller/ttlcontroller_test.go @@ -17,6 +17,7 @@ limitations under the License. package ttlcontroller import ( + "context" "fmt" "net/http/httptest" "strconv" @@ -59,7 +60,7 @@ func createNodes(t *testing.T, client *clientset.Clientset, startIndex, endIndex Name: fmt.Sprintf("node-%d", idx), }, } - if _, err := client.CoreV1().Nodes().Create(node); err != nil { + if _, err := client.CoreV1().Nodes().Create(context.TODO(), node); err != nil { t.Fatalf("Failed to create node: %v", err) } }(i) @@ -74,7 +75,7 @@ func deleteNodes(t *testing.T, client *clientset.Clientset, startIndex, endIndex go func(idx int) { defer wg.Done() name := fmt.Sprintf("node-%d", idx) - if err := client.CoreV1().Nodes().Delete(name, &metav1.DeleteOptions{}); err != nil { + if err := client.CoreV1().Nodes().Delete(context.TODO(), name, &metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete node: %v", err) } }(i) diff --git a/test/integration/utils.go b/test/integration/utils.go index e1f6e0092f1..e34d8687a61 100644 --- a/test/integration/utils.go +++ b/test/integration/utils.go @@ -17,6 +17,7 @@ limitations under the License. package integration import ( + "context" "testing" "time" @@ -34,7 +35,7 @@ import ( // DeletePodOrErrorf deletes a pod or fails with a call to t.Errorf. func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.CoreV1().Pods(ns).Delete(name, nil); err != nil { + if err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, nil); err != nil { t.Errorf("unable to delete pod %v: %v", name, err) } } @@ -58,7 +59,7 @@ var ( // WaitForPodToDisappear polls the API server if the pod has been deleted. func WaitForPodToDisappear(podClient coreclient.PodInterface, podName string, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := podClient.Get(podName, metav1.GetOptions{}) + _, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) if err == nil { return false, nil } diff --git a/test/integration/volume/attach_detach_test.go b/test/integration/volume/attach_detach_test.go index f63802b21e7..710f36d5689 100644 --- a/test/integration/volume/attach_detach_test.go +++ b/test/integration/volume/attach_detach_test.go @@ -17,6 +17,7 @@ limitations under the License. package volume import ( + "context" "fmt" "net/http/httptest" "testing" @@ -162,14 +163,14 @@ func TestPodDeletionWithDswp(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node); err != nil { t.Fatalf("Failed to created node : %v", err) } stopCh := make(chan struct{}) go informers.Core().V1().Nodes().Informer().Run(stopCh) - if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -240,13 +241,13 @@ func TestPodUpdateWithWithADC(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -276,7 +277,7 @@ func TestPodUpdateWithWithADC(t *testing.T) { pod.Status.Phase = v1.PodSucceeded - if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod); err != nil { t.Errorf("Failed to update pod : %v", err) } @@ -309,13 +310,13 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -345,7 +346,7 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { pod.Status.Phase = v1.PodSucceeded - if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), pod); err != nil { t.Errorf("Failed to update pod : %v", err) } @@ -488,13 +489,13 @@ func TestPodAddedByDswp(t *testing.T) { pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) - if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node); err != nil { t.Fatalf("Failed to created node : %v", err) } go informers.Core().V1().Nodes().Informer().Run(podStopCh) - if _, err := testClient.CoreV1().Pods(ns.Name).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -564,7 +565,7 @@ func TestPVCBoundWithADC(t *testing.T) { }, }, } - if _, err := testClient.CoreV1().Nodes().Create(node); err != nil { + if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node); err != nil { t.Fatalf("Failed to created node : %v", err) } @@ -572,10 +573,10 @@ func TestPVCBoundWithADC(t *testing.T) { pvcs := []*v1.PersistentVolumeClaim{} for i := 0; i < 3; i++ { pod, pvc := fakePodWithPVC(fmt.Sprintf("fakepod-pvcnotbound-%d", i), fmt.Sprintf("fakepvc-%d", i), namespaceName) - if _, err := testClient.CoreV1().Pods(pod.Namespace).Create(pod); err != nil { + if _, err := testClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod); err != nil { t.Errorf("Failed to create pod : %v", err) } - if _, err := testClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc); err != nil { + if _, err := testClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc); err != nil { t.Errorf("Failed to create pvc : %v", err) } pvcs = append(pvcs, pvc) @@ -583,7 +584,7 @@ func TestPVCBoundWithADC(t *testing.T) { // pod with no pvc podNew := fakePodWithVol(namespaceName) podNew.SetName("fakepod") - if _, err := testClient.CoreV1().Pods(podNew.Namespace).Create(podNew); err != nil { + if _, err := testClient.CoreV1().Pods(podNew.Namespace).Create(context.TODO(), podNew); err != nil { t.Errorf("Failed to create pod : %v", err) } @@ -624,7 +625,7 @@ func createPVForPVC(t *testing.T, testClient *clientset.Clientset, pvc *v1.Persi StorageClassName: *pvc.Spec.StorageClassName, }, } - if _, err := testClient.CoreV1().PersistentVolumes().Create(pv); err != nil { + if _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { t.Errorf("Failed to create pv : %v", err) } } diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index 84995edc0ae..9614ff2ec23 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -17,6 +17,7 @@ limitations under the License. package volume import ( + "context" "fmt" "math/rand" "net/http/httptest" @@ -116,7 +117,7 @@ func TestPersistentVolumeRecycler(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -127,13 +128,13 @@ func TestPersistentVolumeRecycler(t *testing.T) { pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRecycle) pvc := createPVC("fake-pvc-recycler", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") - _, err := testClient.CoreV1().PersistentVolumes().Create(pv) + _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv) if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) } klog.V(2).Infof("TestPersistentVolumeRecycler pvc created") - _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) if err != nil { t.Errorf("Failed to create PersistentVolumeClaim: %v", err) } @@ -146,7 +147,7 @@ func TestPersistentVolumeRecycler(t *testing.T) { klog.V(2).Infof("TestPersistentVolumeRecycler pvc bound") // deleting a claim releases the volume, after which it can be recycled - if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil { + if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, nil); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } klog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted") @@ -171,7 +172,7 @@ func TestPersistentVolumeDeleter(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -182,12 +183,12 @@ func TestPersistentVolumeDeleter(t *testing.T) { pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimDelete) pvc := createPVC("fake-pvc-deleter", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") - _, err := testClient.CoreV1().PersistentVolumes().Create(pv) + _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv) if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) } klog.V(2).Infof("TestPersistentVolumeDeleter pv created") - _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) if err != nil { t.Errorf("Failed to create PersistentVolumeClaim: %v", err) } @@ -198,7 +199,7 @@ func TestPersistentVolumeDeleter(t *testing.T) { klog.V(2).Infof("TestPersistentVolumeDeleter pvc bound") // deleting a claim releases the volume, after which it can be recycled - if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil { + if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, nil); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } klog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted") @@ -231,7 +232,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -247,7 +248,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { counter++ newPvc := pvc.DeepCopy() newPvc.ObjectMeta = metav1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)} - claim, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(newPvc) + claim, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), newPvc) if err != nil { t.Fatalf("Error creating newPvc: %v", err) } @@ -264,7 +265,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { pv.Spec.ClaimRef = claimRef pv.Spec.ClaimRef.UID = "" - pv, err = testClient.CoreV1().PersistentVolumes().Create(pv) + pv, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv) if err != nil { t.Fatalf("Unexpected error creating pv: %v", err) } @@ -275,7 +276,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound) klog.V(2).Infof("TestPersistentVolumeBindRace pvc bound") - pv, err = testClient.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) + pv, err = testClient.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pv: %v", err) } @@ -301,7 +302,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -321,11 +322,11 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { pvTrue.ObjectMeta.SetLabels(map[string]string{"foo": "true"}) pvFalse.ObjectMeta.SetLabels(map[string]string{"foo": "false"}) - _, err = testClient.CoreV1().PersistentVolumes().Create(pvTrue) + _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvTrue) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } - _, err = testClient.CoreV1().PersistentVolumes().Create(pvFalse) + _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvFalse) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } @@ -337,7 +338,7 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { }, } - _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) if err != nil { t.Fatalf("Failed to create PersistentVolumeClaim: %v", err) } @@ -348,14 +349,14 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound) t.Log("claim bound") - pv, err := testClient.CoreV1().PersistentVolumes().Get("pv-false", metav1.GetOptions{}) + pv, err := testClient.CoreV1().PersistentVolumes().Get(context.TODO(), "pv-false", metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pv: %v", err) } if pv.Spec.ClaimRef != nil { t.Fatalf("False PV shouldn't be bound") } - pv, err = testClient.CoreV1().PersistentVolumes().Get("pv-true", metav1.GetOptions{}) + pv, err = testClient.CoreV1().PersistentVolumes().Get(context.TODO(), "pv-true", metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pv: %v", err) } @@ -382,7 +383,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -402,11 +403,11 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { pvTrue.ObjectMeta.SetLabels(map[string]string{"foo": "valA", "bar": ""}) pvFalse.ObjectMeta.SetLabels(map[string]string{"foo": "valB", "baz": ""}) - _, err = testClient.CoreV1().PersistentVolumes().Create(pvTrue) + _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvTrue) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } - _, err = testClient.CoreV1().PersistentVolumes().Create(pvFalse) + _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvFalse) if err != nil { t.Fatalf("Failed to create PersistentVolume: %v", err) } @@ -437,7 +438,7 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { }, } - _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) if err != nil { t.Fatalf("Failed to create PersistentVolumeClaim: %v", err) } @@ -448,14 +449,14 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound) t.Log("claim bound") - pv, err := testClient.CoreV1().PersistentVolumes().Get("pv-false", metav1.GetOptions{}) + pv, err := testClient.CoreV1().PersistentVolumes().Get(context.TODO(), "pv-false", metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pv: %v", err) } if pv.Spec.ClaimRef != nil { t.Fatalf("False PV shouldn't be bound") } - pv, err = testClient.CoreV1().PersistentVolumes().Get("pv-true", metav1.GetOptions{}) + pv, err = testClient.CoreV1().PersistentVolumes().Get(context.TODO(), "pv-true", metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pv: %v", err) } @@ -482,7 +483,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -500,7 +501,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { pvc := createPVC("pvc-2", ns.Name, strconv.Itoa(maxPVs/2)+"G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") for i := 0; i < maxPVs; i++ { - _, err := testClient.CoreV1().PersistentVolumes().Create(pvs[i]) + _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvs[i]) if err != nil { t.Errorf("Failed to create PersistentVolume %d: %v", i, err) } @@ -508,7 +509,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { } t.Log("volumes created") - _, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + _, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) if err != nil { t.Errorf("Failed to create PersistentVolumeClaim: %v", err) } @@ -523,7 +524,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { // only one PV is bound bound := 0 for i := 0; i < maxPVs; i++ { - pv, err := testClient.CoreV1().PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{}) + pv, err := testClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvs[i].Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pv: %v", err) } @@ -548,7 +549,7 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { } // deleting a claim releases the volume - if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil { + if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, nil); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } t.Log("claim deleted") @@ -572,7 +573,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) controllerStopCh := make(chan struct{}) informers.Start(controllerStopCh) @@ -597,7 +598,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // with >3000 volumes. go func() { for i := 0; i < objCount; i++ { - _, _ = testClient.CoreV1().PersistentVolumes().Create(pvs[i]) + _, _ = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvs[i]) } }() // Wait for them to get Available @@ -618,7 +619,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // Modify PV i := rand.Intn(objCount) name := "pv-" + strconv.Itoa(i) - pv, err := testClient.CoreV1().PersistentVolumes().Get(name, metav1.GetOptions{}) + pv, err := testClient.CoreV1().PersistentVolumes().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { // Silently ignore error, the PV may have be already deleted // or not exists yet. @@ -630,7 +631,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { } else { pv.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int()) } - _, err = testClient.CoreV1().PersistentVolumes().Update(pv) + _, err = testClient.CoreV1().PersistentVolumes().Update(context.TODO(), pv) if err != nil { // Silently ignore error, the PV may have been updated by // the controller. @@ -642,7 +643,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // Modify PVC i := rand.Intn(objCount) name := "pvc-" + strconv.Itoa(i) - pvc, err := testClient.CoreV1().PersistentVolumeClaims(metav1.NamespaceDefault).Get(name, metav1.GetOptions{}) + pvc, err := testClient.CoreV1().PersistentVolumeClaims(metav1.NamespaceDefault).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { // Silently ignore error, the PVC may have be already // deleted or not exists yet. @@ -654,7 +655,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { } else { pvc.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int()) } - _, err = testClient.CoreV1().PersistentVolumeClaims(metav1.NamespaceDefault).Update(pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(metav1.NamespaceDefault).Update(context.TODO(), pvc) if err != nil { // Silently ignore error, the PVC may have been updated by // the controller. @@ -676,7 +677,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // Create the claims, again in a separate goroutine. go func() { for i := 0; i < objCount; i++ { - _, _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvcs[i]) + _, _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvcs[i]) } }() @@ -696,7 +697,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // check that everything is bound to something for i := 0; i < objCount; i++ { - pv, err := testClient.CoreV1().PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{}) + pv, err := testClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvs[i].Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pv: %v", err) } @@ -705,7 +706,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { } klog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) - pvc, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{}) + pvc, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Get(context.TODO(), pvcs[i].Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pvc: %v", err) } @@ -745,13 +746,13 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { pvc := createPVC(pvcName, ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") pvc.Annotations = map[string]string{"annBindCompleted": ""} pvc.Spec.VolumeName = pvName - newPVC, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + newPVC, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) if err != nil { t.Fatalf("Cannot create claim %q: %v", pvc.Name, err) } // Save Bound status as a separate transaction newPVC.Status.Phase = v1.ClaimBound - newPVC, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).UpdateStatus(newPVC) + newPVC, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).UpdateStatus(context.TODO(), newPVC) if err != nil { t.Fatalf("Cannot update claim status %q: %v", pvc.Name, err) } @@ -769,13 +770,13 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { return } pv.Spec.ClaimRef = claimRef - newPV, err := testClient.CoreV1().PersistentVolumes().Create(pv) + newPV, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pv) if err != nil { t.Fatalf("Cannot create volume %q: %v", pv.Name, err) } // Save Bound status as a separate transaction newPV.Status.Phase = v1.VolumeBound - newPV, err = testClient.CoreV1().PersistentVolumes().UpdateStatus(newPV) + newPV, err = testClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), newPV) if err != nil { t.Fatalf("Cannot update volume status %q: %v", pv.Name, err) } @@ -826,7 +827,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { // check that everything is bound to something for i := 0; i < objCount; i++ { - pv, err := testClient.CoreV1().PersistentVolumes().Get(pvs[i].Name, metav1.GetOptions{}) + pv, err := testClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvs[i].Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pv: %v", err) } @@ -835,7 +836,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { } klog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) - pvc, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{}) + pvc, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Get(context.TODO(), pvcs[i].Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pvc: %v", err) } @@ -861,8 +862,8 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes and StorageClasses). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) - defer testClient.StorageV1().StorageClasses().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) + defer testClient.StorageV1().StorageClasses().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) storageClass := storage.StorageClass{ TypeMeta: metav1.TypeMeta{ @@ -873,7 +874,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { }, Provisioner: provisionerPluginName, } - testClient.StorageV1().StorageClasses().Create(&storageClass) + testClient.StorageV1().StorageClasses().Create(context.TODO(), &storageClass) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -892,7 +893,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { // early. It gets stuck with >3000 claims. go func() { for i := 0; i < objCount; i++ { - _, _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvcs[i]) + _, _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvcs[i]) } }() @@ -904,7 +905,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { klog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: claims are bound") // check that we have enough bound PVs - pvList, err := testClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{}) + pvList, err := testClient.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list volumes: %s", err) } @@ -921,13 +922,13 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { // Delete the claims for i := 0; i < objCount; i++ { - _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvcs[i].Name, nil) + _ = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvcs[i].Name, nil) } // Wait for the PVs to get deleted by listing remaining volumes // (delete events were unreliable) for { - volumes, err := testClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{}) + volumes, err := testClient.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to list volumes: %v", err) } @@ -956,7 +957,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { // NOTE: This test cannot run in parallel, because it is creating and deleting // non-namespaced objects (PersistenceVolumes). - defer testClient.CoreV1().PersistentVolumes().DeleteCollection(nil, metav1.ListOptions{}) + defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) stopCh := make(chan struct{}) informers.Start(stopCh) @@ -971,17 +972,17 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { pvc := createPVC("pvc-rwm", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, "") - _, err := testClient.CoreV1().PersistentVolumes().Create(pvRwm) + _, err := testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvRwm) if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) } - _, err = testClient.CoreV1().PersistentVolumes().Create(pvRwo) + _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), pvRwo) if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) } t.Log("volumes created") - _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), pvc) if err != nil { t.Errorf("Failed to create PersistentVolumeClaim: %v", err) } @@ -994,14 +995,14 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { t.Log("claim bound") // only RWM PV is bound - pv, err := testClient.CoreV1().PersistentVolumes().Get("pv-rwo", metav1.GetOptions{}) + pv, err := testClient.CoreV1().PersistentVolumes().Get(context.TODO(), "pv-rwo", metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pv: %v", err) } if pv.Spec.ClaimRef != nil { t.Fatalf("ReadWriteOnce PV shouldn't be bound") } - pv, err = testClient.CoreV1().PersistentVolumes().Get("pv-rwm", metav1.GetOptions{}) + pv, err = testClient.CoreV1().PersistentVolumes().Get(context.TODO(), "pv-rwm", metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error getting pv: %v", err) } @@ -1013,7 +1014,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { } // deleting a claim releases the volume - if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil { + if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(context.TODO(), pvc.Name, nil); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } t.Log("claim deleted") @@ -1024,7 +1025,7 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase v1.PersistentVolumePhase) { // Check if the volume is already in requested phase - volume, err := client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + volume, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if err == nil && volume.Status.Phase == phase { return } @@ -1045,7 +1046,7 @@ func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase v1.PersistentVolumeClaimPhase) { // Check if the claim is already in requested phase - claim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + claim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), claimName, metav1.GetOptions{}) if err == nil && claim.Status.Phase == phase { return } @@ -1141,11 +1142,11 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio t.Fatalf("Failed to construct PersistentVolumes: %v", err) } - watchPV, err := testClient.CoreV1().PersistentVolumes().Watch(metav1.ListOptions{}) + watchPV, err := testClient.CoreV1().PersistentVolumes().Watch(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to watch PersistentVolumes: %v", err) } - watchPVC, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Watch(metav1.ListOptions{}) + watchPVC, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Watch(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to watch PersistentVolumeClaims: %v", err) } diff --git a/test/integration/volumescheduling/util.go b/test/integration/volumescheduling/util.go index 882ee1daaf9..729efaa417a 100644 --- a/test/integration/volumescheduling/util.go +++ b/test/integration/volumescheduling/util.go @@ -153,7 +153,7 @@ func cleanupTest(t *testing.T, testCtx *testContext) { // Kill the scheduler. testCtx.cancelFn() // Cleanup nodes. - testCtx.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) framework.DeleteTestingNamespace(testCtx.ns, testCtx.httpServer, t) testCtx.closeFn() } @@ -185,7 +185,7 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error { // podScheduled returns true if a node is assigned to the given pod. func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { // This could be a connection error so we want to retry. return false, nil @@ -201,7 +201,7 @@ func podScheduled(c clientset.Interface, podNamespace, podName string) wait.Cond // gets unschedulable status. func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { // This could be a connection error so we want to retry. return false, nil diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index 37af80a8cc4..fe53e168447 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -195,7 +195,7 @@ func TestVolumeBinding(t *testing.T) { classes[classImmediate] = makeStorageClass(fmt.Sprintf("immediate-%v", suffix), &modeImmediate) classes[classWait] = makeStorageClass(fmt.Sprintf("wait-%v", suffix), &modeWait) for _, sc := range classes { - if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil { + if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } } @@ -203,14 +203,14 @@ func TestVolumeBinding(t *testing.T) { // Create PVs for _, pvConfig := range test.pvs { pv := makePV(pvConfig.name, classes[pvConfig.scName].Name, pvConfig.preboundPVC, config.ns, pvConfig.node) - if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } for _, pvConfig := range test.unboundPvs { pv := makePV(pvConfig.name, classes[pvConfig.scName].Name, pvConfig.preboundPVC, config.ns, pvConfig.node) - if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } @@ -232,19 +232,19 @@ func TestVolumeBinding(t *testing.T) { // Create PVCs for _, pvcConfig := range test.pvcs { pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scName].Name, pvcConfig.preboundPV) - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } for _, pvcConfig := range test.unboundPvcs { pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scName].Name, pvcConfig.preboundPV) - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } // Create Pod - if _, err := config.client.CoreV1().Pods(config.ns).Create(test.pod); err != nil { + if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), test.pod); err != nil { t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err) } if test.shouldFail { @@ -297,7 +297,7 @@ func TestVolumeBindingRescheduling(t *testing.T) { }, trigger: func(config *testConfig) { sc := makeDynamicProvisionerStorageClass(storageClassName, &modeWait, nil) - if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil { + if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } }, @@ -310,12 +310,12 @@ func TestVolumeBindingRescheduling(t *testing.T) { }, trigger: func(config *testConfig) { sc := makeStorageClass(storageClassName, &modeWait) - if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil { + if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } // Create pv for this class to mock static provisioner behavior. pv := makePV("pv-reschedule-onclassadd-static", storageClassName, "", "", node1) - if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } }, @@ -332,7 +332,7 @@ func TestVolumeBindingRescheduling(t *testing.T) { }, trigger: func(config *testConfig) { pvc := makePVC("pvc-reschedule-onpvcadd", config.ns, &classWait, "") - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } }, @@ -350,7 +350,7 @@ func TestVolumeBindingRescheduling(t *testing.T) { // Create unbound pvc for _, pvcConfig := range test.pvcs { pvc := makePVC(pvcConfig.name, config.ns, &storageClassName, "") - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } @@ -358,13 +358,13 @@ func TestVolumeBindingRescheduling(t *testing.T) { // Create PVs for _, pvConfig := range test.pvs { pv := makePV(pvConfig.name, sharedClasses[pvConfig.scName].Name, pvConfig.preboundPVC, config.ns, pvConfig.node) - if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } // Create pod - if _, err := config.client.CoreV1().Pods(config.ns).Create(test.pod); err != nil { + if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), test.pod); err != nil { t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err) } @@ -434,7 +434,7 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, if dynamic { scName = &classDynamic sc := makeDynamicProvisionerStorageClass(*scName, &modeWait, nil) - if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil { + if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } } @@ -460,7 +460,7 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, // static prebound pvs pv = makePV(pvName, classImmediate, pvcName, config.ns, node1) } - if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } pvs[i] = pv @@ -470,7 +470,7 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, } else { pvc = makePVC(pvcName, config.ns, scName, "") } - if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } pvcs[i] = pvc @@ -486,7 +486,7 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, } pod := makePod(fmt.Sprintf("pod%03d", i), config.ns, podPvcs) - if pod, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil { + if pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod); err != nil { t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) } pods[i] = pod @@ -528,7 +528,7 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n // Create PVs for the first node for i := 0; i < numPVsFirstNode; i++ { pv := makePV(fmt.Sprintf("pv-node1-%v", i), classWait, "", "", node1) - if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } @@ -536,7 +536,7 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n // Create 1 PV per Node for the remaining nodes for i := 2; i <= numNodes; i++ { pv := makePV(fmt.Sprintf("pv-node%v-0", i), classWait, "", "", fmt.Sprintf("node-%v", i)) - if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + if pv, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } @@ -545,7 +545,7 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n for i := 0; i < numPods; i++ { // Create one pvc per pod pvc := makePVC(fmt.Sprintf("pvc-%v", i), config.ns, &classWait, "") - if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + if pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } pvcs = append(pvcs, pvc) @@ -577,7 +577,7 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n } } - if pod, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil { + if pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod); err != nil { t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) } pods = append(pods, pod) @@ -590,7 +590,7 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n t.Errorf("Failed to schedule Pod %q: %v", pod.Name, err) } else { // Keep track of all the nodes that the Pods were scheduled on - pod, err = config.client.CoreV1().Pods(config.ns).Get(pod.Name, metav1.GetOptions{}) + pod, err = config.client.CoreV1().Pods(config.ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get Pod %q: %v", pod.Name, err) } @@ -648,12 +648,12 @@ func TestPVAffinityConflict(t *testing.T) { pvc := makePVC("local-pvc", config.ns, &classImmediate, "") // Create PV - if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } // Create PVC - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } @@ -671,7 +671,7 @@ func TestPVAffinityConflict(t *testing.T) { pod := makePod(podName, config.ns, []string{"local-pvc"}) nodeMarkers[i].(func(*v1.Pod, string))(pod, "node-2") // Create Pod - if _, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil { + if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod); err != nil { t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) } // Give time to shceduler to attempt to schedule pod @@ -679,7 +679,7 @@ func TestPVAffinityConflict(t *testing.T) { t.Errorf("Failed as Pod %s was not unschedulable: %v", pod.Name, err) } // Check pod conditions - p, err := config.client.CoreV1().Pods(config.ns).Get(podName, metav1.GetOptions{}) + p, err := config.client.CoreV1().Pods(config.ns).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to access Pod %s status: %v", podName, err) } @@ -693,7 +693,7 @@ func TestPVAffinityConflict(t *testing.T) { t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match node selector, node(s) had volume node affinity conflict. Got message %q", podName, p.Status.Conditions[0].Message) } // Deleting test pod - if err := config.client.CoreV1().Pods(config.ns).Delete(podName, &metav1.DeleteOptions{}); err != nil { + if err := config.client.CoreV1().Pods(config.ns).Delete(context.TODO(), podName, &metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete Pod %s: %v", podName, err) } } @@ -768,14 +768,14 @@ func TestVolumeProvision(t *testing.T) { } classes[classTopoMismatch] = makeDynamicProvisionerStorageClass(fmt.Sprintf("topomismatch-%v", suffix), &modeWait, topo) for _, sc := range classes { - if _, err := config.client.StorageV1().StorageClasses().Create(sc); err != nil { + if _, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } } // Create PVs for _, pvConfig := range test.pvs { pv := makePV(pvConfig.name, classes[pvConfig.scName].Name, pvConfig.preboundPVC, config.ns, pvConfig.node) - if _, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { + if _, err := config.client.CoreV1().PersistentVolumes().Create(context.TODO(), pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } } @@ -783,25 +783,25 @@ func TestVolumeProvision(t *testing.T) { // Create PVCs for _, pvcConfig := range test.boundPvcs { pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scName].Name, pvcConfig.preboundPV) - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } for _, pvcConfig := range test.unboundPvcs { pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scName].Name, pvcConfig.preboundPV) - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } for _, pvcConfig := range test.provisionedPvcs { pvc := makePVC(pvcConfig.name, config.ns, &classes[pvcConfig.scName].Name, pvcConfig.preboundPV) - if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil { + if _, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc); err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } } // Create Pod - if _, err := config.client.CoreV1().Pods(config.ns).Create(test.pod); err != nil { + if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), test.pod); err != nil { t.Fatalf("Failed to create Pod %q: %v", test.pod.Name, err) } if test.shouldFail { @@ -848,7 +848,7 @@ func TestRescheduleProvisioning(t *testing.T) { defer func() { close(controllerCh) deleteTestObjects(clientset, ns, nil) - testCtx.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{}) + testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{}) testCtx.closeFn() }() @@ -859,14 +859,14 @@ func TestRescheduleProvisioning(t *testing.T) { // Prepare node and storage class. testNode := makeNode(0) - if _, err := clientset.CoreV1().Nodes().Create(testNode); err != nil { + if _, err := clientset.CoreV1().Nodes().Create(context.TODO(), testNode); err != nil { t.Fatalf("Failed to create Node %q: %v", testNode.Name, err) } scName := "fail-provision" sc := makeDynamicProvisionerStorageClass(scName, &modeWait, nil) // Expect the storage class fail to provision. sc.Parameters[volumetest.ExpectProvisionFailureKey] = "" - if _, err := clientset.StorageV1().StorageClasses().Create(sc); err != nil { + if _, err := clientset.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } @@ -874,7 +874,7 @@ func TestRescheduleProvisioning(t *testing.T) { pvcName := "pvc-fail-to-provision" pvc := makePVC(pvcName, ns, &scName, "") pvc.Annotations = map[string]string{"volume.kubernetes.io/selected-node": node1} - pvc, err = clientset.CoreV1().PersistentVolumeClaims(ns).Create(pvc) + pvc, err = clientset.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc) if err != nil { t.Fatalf("Failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) } @@ -913,14 +913,14 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t // Create nodes for i := 0; i < numberOfNodes; i++ { testNode := makeNode(i) - if _, err := clientset.CoreV1().Nodes().Create(testNode); err != nil { + if _, err := clientset.CoreV1().Nodes().Create(context.TODO(), testNode); err != nil { t.Fatalf("Failed to create Node %q: %v", testNode.Name, err) } } // Create SCs for _, sc := range sharedClasses { - if _, err := clientset.StorageV1().StorageClasses().Create(sc); err != nil { + if _, err := clientset.StorageV1().StorageClasses().Create(context.TODO(), sc); err != nil { t.Fatalf("Failed to create StorageClass %q: %v", sc.Name, err) } } @@ -984,10 +984,10 @@ func initPVController(t *testing.T, testCtx *testContext, provisionDelaySeconds } func deleteTestObjects(client clientset.Interface, ns string, option *metav1.DeleteOptions) { - client.CoreV1().Pods(ns).DeleteCollection(option, metav1.ListOptions{}) - client.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(option, metav1.ListOptions{}) - client.CoreV1().PersistentVolumes().DeleteCollection(option, metav1.ListOptions{}) - client.StorageV1().StorageClasses().DeleteCollection(option, metav1.ListOptions{}) + client.CoreV1().Pods(ns).DeleteCollection(context.TODO(), option, metav1.ListOptions{}) + client.CoreV1().PersistentVolumeClaims(ns).DeleteCollection(context.TODO(), option, metav1.ListOptions{}) + client.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), option, metav1.ListOptions{}) + client.StorageV1().StorageClasses().DeleteCollection(context.TODO(), option, metav1.ListOptions{}) } func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass { @@ -1136,7 +1136,7 @@ func makeNode(index int) *v1.Node { } func validatePVCPhase(t *testing.T, client clientset.Interface, pvcName string, ns string, phase v1.PersistentVolumeClaimPhase, isProvisioned bool) { - claim, err := client.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}) + claim, err := client.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{}) if err != nil { t.Errorf("Failed to get PVC %v/%v: %v", ns, pvcName, err) } @@ -1172,7 +1172,7 @@ func validateProvisionAnn(claim *v1.PersistentVolumeClaim, volIsProvisioned bool func waitForProvisionAnn(client clientset.Interface, pvc *v1.PersistentVolumeClaim, annShouldExist bool) error { return wait.Poll(time.Second, 30*time.Second, func() (bool, error) { - claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -1184,7 +1184,7 @@ func waitForProvisionAnn(client clientset.Interface, pvc *v1.PersistentVolumeCla } func validatePVPhase(t *testing.T, client clientset.Interface, pvName string, phase v1.PersistentVolumePhase) { - pv, err := client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if err != nil { t.Errorf("Failed to get PV %v: %v", pvName, err) } @@ -1196,7 +1196,7 @@ func validatePVPhase(t *testing.T, client clientset.Interface, pvName string, ph func waitForPVPhase(client clientset.Interface, pvName string, phase v1.PersistentVolumePhase) error { return wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) { - pv, err := client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1210,7 +1210,7 @@ func waitForPVPhase(client clientset.Interface, pvName string, phase v1.Persiste func waitForPVCBound(client clientset.Interface, pvc *v1.PersistentVolumeClaim) error { return wait.Poll(time.Second, 30*time.Second, func() (bool, error) { - claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/soak/serve_hostnames/serve_hostnames.go b/test/soak/serve_hostnames/serve_hostnames.go index 32a9dd8849a..9ba0cb14457 100644 --- a/test/soak/serve_hostnames/serve_hostnames.go +++ b/test/soak/serve_hostnames/serve_hostnames.go @@ -94,7 +94,7 @@ func main() { var nodes *v1.NodeList for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) { - nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err = client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err == nil { break } @@ -116,18 +116,18 @@ func main() { queries := *queriesAverage * len(nodes.Items) * *podsPerNode // Create the namespace - got, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "serve-hostnames-"}}) + got, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "serve-hostnames-"}}) if err != nil { klog.Fatalf("Failed to create namespace: %v", err) } ns := got.Name defer func(ns string) { - if err := client.CoreV1().Namespaces().Delete(ns, nil); err != nil { + if err := client.CoreV1().Namespaces().Delete(context.TODO(), ns, nil); err != nil { klog.Warningf("Failed to delete namespace %s: %v", ns, err) } else { // wait until the namespace disappears for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ { - if _, err := client.CoreV1().Namespaces().Get(ns, metav1.GetOptions{}); err != nil { + if _, err := client.CoreV1().Namespaces().Get(context.TODO(), ns, metav1.GetOptions{}); err != nil { if apierrors.IsNotFound(err) { return } @@ -144,7 +144,7 @@ func main() { var svc *v1.Service for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) { t := time.Now() - svc, err = client.CoreV1().Services(ns).Create(&v1.Service{ + svc, err = client.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "serve-hostnames", Labels: map[string]string{ @@ -177,7 +177,7 @@ func main() { klog.Infof("Cleaning up service %s/serve-hostnames", ns) // Make several attempts to delete the service. for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { - if err := client.CoreV1().Services(ns).Delete(svc.Name, nil); err == nil { + if err := client.CoreV1().Services(ns).Delete(context.TODO(), svc.Name, nil); err == nil { return } klog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) @@ -194,7 +194,7 @@ func main() { for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) { klog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) t := time.Now() - _, err = client.CoreV1().Pods(ns).Create(&v1.Pod{ + _, err = client.CoreV1().Pods(ns).Create(context.TODO(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, Labels: map[string]string{ @@ -230,7 +230,7 @@ func main() { // Make several attempts to delete the pods. for _, podName := range podNames { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { - if err = client.CoreV1().Pods(ns).Delete(podName, nil); err == nil { + if err = client.CoreV1().Pods(ns).Delete(context.TODO(), podName, nil); err == nil { break } klog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) @@ -242,7 +242,7 @@ func main() { for _, podName := range podNames { var pod *v1.Pod for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { - pod, err = client.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { klog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) continue diff --git a/test/utils/create_resources.go b/test/utils/create_resources.go index 21dd9801bfb..7f8a5146761 100644 --- a/test/utils/create_resources.go +++ b/test/utils/create_resources.go @@ -19,6 +19,7 @@ limitations under the License. package utils import ( + "context" "fmt" "time" @@ -68,7 +69,7 @@ func CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod) return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().Pods(namespace).Create(obj) + _, err := c.CoreV1().Pods(namespace).Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -85,7 +86,7 @@ func CreateRCWithRetries(c clientset.Interface, namespace string, obj *v1.Replic return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().ReplicationControllers(namespace).Create(obj) + _, err := c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -102,7 +103,7 @@ func CreateReplicaSetWithRetries(c clientset.Interface, namespace string, obj *a return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().ReplicaSets(namespace).Create(obj) + _, err := c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -119,7 +120,7 @@ func CreateDeploymentWithRetries(c clientset.Interface, namespace string, obj *a return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().Deployments(namespace).Create(obj) + _, err := c.AppsV1().Deployments(namespace).Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -136,7 +137,7 @@ func CreateDaemonSetWithRetries(c clientset.Interface, namespace string, obj *ap return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.AppsV1().DaemonSets(namespace).Create(obj) + _, err := c.AppsV1().DaemonSets(namespace).Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -153,7 +154,7 @@ func CreateJobWithRetries(c clientset.Interface, namespace string, obj *batch.Jo return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.BatchV1().Jobs(namespace).Create(obj) + _, err := c.BatchV1().Jobs(namespace).Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -170,7 +171,7 @@ func CreateSecretWithRetries(c clientset.Interface, namespace string, obj *v1.Se return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().Secrets(namespace).Create(obj) + _, err := c.CoreV1().Secrets(namespace).Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -187,7 +188,7 @@ func CreateConfigMapWithRetries(c clientset.Interface, namespace string, obj *v1 return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().ConfigMaps(namespace).Create(obj) + _, err := c.CoreV1().ConfigMaps(namespace).Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -204,7 +205,7 @@ func CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.S return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().Services(namespace).Create(obj) + _, err := c.CoreV1().Services(namespace).Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -221,7 +222,7 @@ func CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().ResourceQuotas(namespace).Create(obj) + _, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -238,7 +239,7 @@ func CreatePersistentVolumeWithRetries(c clientset.Interface, obj *v1.Persistent return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().PersistentVolumes().Create(obj) + _, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } @@ -255,7 +256,7 @@ func CreatePersistentVolumeClaimWithRetries(c clientset.Interface, namespace str return fmt.Errorf("Object provided to create is empty") } createFunc := func() (bool, error) { - _, err := c.CoreV1().PersistentVolumeClaims(namespace).Create(obj) + _, err := c.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), obj) if err == nil || apierrors.IsAlreadyExists(err) { return true, nil } diff --git a/test/utils/delete_resources.go b/test/utils/delete_resources.go index 1eb96cddaa6..60d1ba44e8c 100644 --- a/test/utils/delete_resources.go +++ b/test/utils/delete_resources.go @@ -19,6 +19,7 @@ limitations under the License. package utils import ( + "context" "fmt" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -34,23 +35,23 @@ import ( func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error { switch kind { case api.Kind("Pod"): - return c.CoreV1().Pods(namespace).Delete(name, options) + return c.CoreV1().Pods(namespace).Delete(context.TODO(), name, options) case api.Kind("ReplicationController"): - return c.CoreV1().ReplicationControllers(namespace).Delete(name, options) + return c.CoreV1().ReplicationControllers(namespace).Delete(context.TODO(), name, options) case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"): - return c.AppsV1().ReplicaSets(namespace).Delete(name, options) + return c.AppsV1().ReplicaSets(namespace).Delete(context.TODO(), name, options) case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"): - return c.AppsV1().Deployments(namespace).Delete(name, options) + return c.AppsV1().Deployments(namespace).Delete(context.TODO(), name, options) case extensionsinternal.Kind("DaemonSet"): - return c.AppsV1().DaemonSets(namespace).Delete(name, options) + return c.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, options) case batchinternal.Kind("Job"): - return c.BatchV1().Jobs(namespace).Delete(name, options) + return c.BatchV1().Jobs(namespace).Delete(context.TODO(), name, options) case api.Kind("Secret"): - return c.CoreV1().Secrets(namespace).Delete(name, options) + return c.CoreV1().Secrets(namespace).Delete(context.TODO(), name, options) case api.Kind("ConfigMap"): - return c.CoreV1().ConfigMaps(namespace).Delete(name, options) + return c.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), name, options) case api.Kind("Service"): - return c.CoreV1().Services(namespace).Delete(name, options) + return c.CoreV1().Services(namespace).Delete(context.TODO(), name, options) default: return fmt.Errorf("Unsupported kind when deleting: %v", kind) } diff --git a/test/utils/density_utils.go b/test/utils/density_utils.go index d1b583cc623..a09a24f3f74 100644 --- a/test/utils/density_utils.go +++ b/test/utils/density_utils.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "fmt" "strings" "time" @@ -42,7 +43,7 @@ func AddLabelsToNode(c clientset.Interface, nodeName string, labels map[string]s patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString) var err error for attempt := 0; attempt < retries; attempt++ { - _, err = c.CoreV1().Nodes().Patch(nodeName, types.MergePatchType, []byte(patch)) + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.MergePatchType, []byte(patch)) if err != nil { if !apierrors.IsConflict(err) { return err @@ -61,7 +62,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri var node *v1.Node var err error for attempt := 0; attempt < retries; attempt++ { - node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return err } @@ -74,7 +75,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri } delete(node.Labels, labelKey) } - _, err = c.CoreV1().Nodes().Update(node) + _, err = c.CoreV1().Nodes().Update(context.TODO(), node) if err != nil { if !apierrors.IsConflict(err) { return err @@ -92,7 +93,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri // VerifyLabelsRemoved checks if Node for given nodeName does not have any of labels from labelKeys. // Return non-nil error if it does. func VerifyLabelsRemoved(c clientset.Interface, nodeName string, labelKeys []string) error { - node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/utils/deployment.go b/test/utils/deployment.go index 323b927d061..f6e968a3639 100644 --- a/test/utils/deployment.go +++ b/test/utils/deployment.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "fmt" "time" @@ -51,7 +52,7 @@ func LogReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.R func LogPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet, logf LogfFn) { minReadySeconds := deployment.Spec.MinReadySeconds podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { - return c.CoreV1().Pods(namespace).List(options) + return c.CoreV1().Pods(namespace).List(context.TODO(), options) } podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc) @@ -80,7 +81,7 @@ func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *apps.D err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - deployment, err = c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(d.Namespace).Get(context.TODO(), d.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -174,7 +175,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName var reason string err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - deployment, err = c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return false, err } @@ -205,7 +206,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName // CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected. func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error { - deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("unable to get deployment %s during revision check: %v", deploymentName, err) } @@ -259,12 +260,12 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, var updateErr error pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - if deployment, err = c.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil { + if deployment, err = c.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(deployment) - if deployment, err = c.AppsV1().Deployments(namespace).Update(deployment); err == nil { + if deployment, err = c.AppsV1().Deployments(namespace).Update(context.TODO(), deployment); err == nil { logf("Updating deployment %s", name) return true, nil } @@ -279,14 +280,14 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error { return deploymentutil.WaitForObservedDeployment(func() (*apps.Deployment, error) { - return c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + return c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) }, desiredGeneration, 2*time.Second, 1*time.Minute) } // WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback. func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string, pollInterval, pollTimeout time.Duration) error { err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return false, err } @@ -306,7 +307,7 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64, pollInterval, pollTimeout time.Duration) error { var deployment *apps.Deployment err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - d, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + d, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return false, err } @@ -322,7 +323,7 @@ func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentNa func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType, logf LogfFn, pollInterval, pollTimeout time.Duration) error { var deployment *apps.Deployment pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - d, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + d, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/utils/pod_store.go b/test/utils/pod_store.go index 7dae2b12cf0..b48796c217a 100644 --- a/test/utils/pod_store.go +++ b/test/utils/pod_store.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "time" "k8s.io/api/core/v1" @@ -42,13 +43,13 @@ func NewPodStore(c clientset.Interface, namespace string, label labels.Selector, ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = label.String() options.FieldSelector = field.String() - obj, err := c.CoreV1().Pods(namespace).List(options) + obj, err := c.CoreV1().Pods(namespace).List(context.TODO(), options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = label.String() options.FieldSelector = field.String() - return c.CoreV1().Pods(namespace).Watch(options) + return c.CoreV1().Pods(namespace).Watch(context.TODO(), options) }, } store := cache.NewStore(cache.MetaNamespaceKeyFunc) diff --git a/test/utils/replicaset.go b/test/utils/replicaset.go index 838dd891f03..60ae2a28078 100644 --- a/test/utils/replicaset.go +++ b/test/utils/replicaset.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "fmt" "testing" "time" @@ -34,12 +35,12 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, var updateErr error pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - if rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rs) - if rs, err = c.AppsV1().ReplicaSets(namespace).Update(rs); err == nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).Update(context.TODO(), rs); err == nil { logf("Updating replica set %q", name) return true, nil } @@ -56,7 +57,7 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *apps.ReplicaSet, pollInterval, pollTimeout time.Duration) error { desiredGeneration := rs.Generation if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - newRS, err := clientSet.AppsV1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{}) + newRS, err := clientSet.AppsV1().ReplicaSets(rs.Namespace).Get(context.TODO(), rs.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -72,12 +73,12 @@ func UpdateReplicaSetStatusWithRetries(c clientset.Interface, namespace, name st var updateErr error pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - if rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rs) - if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(rs); err == nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(context.TODO(), rs); err == nil { logf("Updating replica set %q", name) return true, nil } diff --git a/test/utils/runners.go b/test/utils/runners.go index bfef242767e..f3ba0caeecd 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -66,7 +66,7 @@ func removePtr(replicas *int32) int32 { func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) { // Wait until it's scheduled - p, err := c.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{ResourceVersion: "0"}) + p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"}) if err == nil && p.Spec.NodeName != "" { return p, nil } @@ -74,7 +74,7 @@ func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, time startTime := time.Now() for startTime.Add(timeout).After(time.Now()) { time.Sleep(pollingPeriod) - p, err := c.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{ResourceVersion: "0"}) + p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"}) if err == nil && p.Spec.NodeName != "" { return p, nil } @@ -852,7 +852,7 @@ func (config *RCConfig) start() error { if oldRunning != config.Replicas { // List only pods from a given replication controller. options := metav1.ListOptions{LabelSelector: label.String()} - if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(options); err == nil { + if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(context.TODO(), options); err == nil { for _, pod := range pods.Items { config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp) } @@ -1088,7 +1088,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d) } - _, err := client.StorageV1beta1().CSINodes().Create(csiNode) + _, err := client.StorageV1beta1().CSINodes().Create(context.TODO(), csiNode) if apierrors.IsAlreadyExists(err) { // Something created CSINode instance after we checked it did not exist. // Make the caller to re-try PrepareDependentObjects by returning Conflict error @@ -1118,12 +1118,12 @@ func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode, } csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.MigratedPlugins, ",") - _, err := client.StorageV1beta1().CSINodes().Update(csiNode) + _, err := client.StorageV1beta1().CSINodes().Update(context.TODO(), csiNode) return err } func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error { - csiNode, err := client.StorageV1beta1().CSINodes().Get(node.Name, metav1.GetOptions{}) + csiNode, err := client.StorageV1beta1().CSINodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return s.createCSINode(node.Name, client) @@ -1134,7 +1134,7 @@ func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client } func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error { - csiNode, err := client.StorageV1beta1().CSINodes().Get(nodeName, metav1.GetOptions{}) + csiNode, err := client.StorageV1beta1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return nil @@ -1194,7 +1194,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo return nil } for attempt := 0; attempt < retries; attempt++ { - if _, err = client.CoreV1().Nodes().Patch(node.Name, types.MergePatchType, []byte(patch)); err == nil { + if _, err = client.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.MergePatchType, []byte(patch)); err == nil { break } if !apierrors.IsConflict(err) { @@ -1224,7 +1224,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error { var err error for attempt := 0; attempt < retries; attempt++ { - node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("Skipping cleanup of Node: failed to get Node %v: %v", nodeName, err) } @@ -1232,7 +1232,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare if apiequality.Semantic.DeepEqual(node, updatedNode) { return nil } - if _, err = client.CoreV1().Nodes().Update(updatedNode); err == nil { + if _, err = client.CoreV1().Nodes().Update(context.TODO(), updatedNode); err == nil { break } if !apierrors.IsConflict(err) { @@ -1698,7 +1698,7 @@ func (config *DaemonConfig) Run() error { var err error for i := 0; i < retries; i++ { // Wait for all daemons to be running - nodes, err = config.Client.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) + nodes, err = config.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) if err == nil { break } else if i+1 == retries {