From 4c8539cece2f0a6e6974b30d00c7341e10320bc5 Mon Sep 17 00:00:00 2001 From: Kevin Date: Wed, 25 Oct 2017 23:54:32 +0800 Subject: [PATCH] use core client with explicit version globally --- .../app/autoscaling.go | 4 +- cmd/kube-controller-manager/app/core.go | 6 +- pkg/cloudprovider/providers/gce/gce.go | 2 +- .../providers/gce/gce_clusterid.go | 4 +- pkg/controller/bootstrap/bootstrapsigner.go | 14 +- pkg/controller/bootstrap/tokencleaner.go | 10 +- .../certificates/certificate_controller.go | 2 +- pkg/controller/cloud/pvlcontroller.go | 2 +- pkg/controller/controller_utils.go | 16 +- pkg/controller/cronjob/cronjob_controller.go | 4 +- pkg/controller/daemon/daemon_controller.go | 6 +- pkg/controller/daemon/update.go | 2 +- .../deployment/deployment_controller.go | 6 +- .../deployment/util/deployment_util.go | 2 +- pkg/controller/disruption/disruption.go | 2 +- .../endpoint/endpoints_controller.go | 10 +- .../namespace/namespace_controller.go | 6 +- pkg/controller/node/ipam/adapter.go | 6 +- pkg/controller/node/ipam/cidr_allocator.go | 2 +- .../node/ipam/cloud_cidr_allocator.go | 6 +- pkg/controller/node/ipam/range_allocator.go | 6 +- pkg/controller/node/node_controller.go | 10 +- .../node/scheduler/taint_controller.go | 8 +- pkg/controller/node/util/controller_utils.go | 12 +- .../metrics/legacy_metrics_client.go | 4 +- pkg/controller/podgc/gc_controller.go | 8 +- pkg/controller/replicaset/replica_set.go | 6 +- .../replication/replication_controller.go | 8 +- pkg/controller/route/route_controller.go | 2 +- pkg/controller/service/service_controller.go | 8 +- .../serviceaccounts_controller.go | 6 +- pkg/controller/serviceaccount/tokengetter.go | 4 +- .../serviceaccount/tokens_controller.go | 20 +- .../statefulset/stateful_pod_control.go | 8 +- pkg/controller/statefulset/stateful_set.go | 2 +- pkg/controller/ttl/ttl_controller.go | 2 +- .../attachdetach/attach_detach_controller.go | 2 +- .../attach_detach_controller_test.go | 4 +- .../statusupdater/node_status_updater.go | 4 +- .../volume/persistentvolume/pv_controller.go | 20 +- .../persistentvolume/pv_controller_base.go | 4 +- pkg/kubelet/config/apiserver.go | 2 +- pkg/kubelet/configmap/configmap_manager.go | 4 +- pkg/kubelet/kubelet.go | 4 +- pkg/kubelet/kubelet_node_status.go | 6 +- pkg/kubelet/kubelet_pods.go | 4 +- pkg/kubelet/kubeletconfig/watch.go | 4 +- pkg/kubelet/pod/mirror_client.go | 4 +- pkg/kubelet/secret/secret_manager.go | 4 +- pkg/kubelet/status/status_manager.go | 6 +- .../desired_state_of_world_populator.go | 4 +- .../volumemanager/reconciler/reconciler.go | 2 +- .../volumemanager/volume_manager_test.go | 4 +- pkg/quota/evaluator/core/configmap.go | 2 +- .../core/persistent_volume_claims.go | 2 +- pkg/quota/evaluator/core/pods.go | 2 +- .../evaluator/core/replication_controllers.go | 2 +- pkg/quota/evaluator/core/resource_quotas.go | 2 +- pkg/quota/evaluator/core/secrets.go | 2 +- pkg/quota/evaluator/core/services.go | 2 +- pkg/util/node/node.go | 2 +- pkg/volume/cinder/cinder_util.go | 2 +- pkg/volume/glusterfs/glusterfs.go | 10 +- pkg/volume/rbd/rbd.go | 2 +- pkg/volume/testing/testing.go | 4 +- pkg/volume/util.go | 10 +- .../operationexecutor/operation_generator.go | 4 +- pkg/volume/util/util.go | 4 +- .../plugin/namespace/lifecycle/admission.go | 2 +- test/e2e/apimachinery/chunking.go | 2 +- test/e2e/apimachinery/etcd_failure.go | 2 +- test/e2e/apimachinery/garbage_collector.go | 28 +-- test/e2e/apimachinery/generated_clientset.go | 2 +- test/e2e/apimachinery/initializers.go | 36 ++-- test/e2e/apimachinery/namespace.go | 24 +-- test/e2e/apimachinery/table_conversion.go | 8 +- test/e2e/apps/cronjob.go | 2 +- test/e2e/apps/daemon_restart.go | 6 +- test/e2e/apps/daemon_set.go | 16 +- test/e2e/apps/deployment.go | 12 +- test/e2e/apps/rc.go | 26 +-- test/e2e/apps/replica_set.go | 14 +- test/e2e/apps/statefulset.go | 14 +- test/e2e/auth/audit.go | 30 +-- test/e2e/auth/service_accounts.go | 32 ++-- .../cluster_autoscaler_scalability.go | 6 +- .../autoscaling/cluster_size_autoscaling.go | 44 ++--- .../autoscaling/custom_metrics_autoscaling.go | 4 +- test/e2e/autoscaling/dns_autoscaling.go | 10 +- test/e2e/common/apparmor.go | 4 +- test/e2e/common/autoscaling_utils.go | 16 +- test/e2e/common/configmap.go | 4 +- test/e2e/common/configmap_volume.go | 20 +- test/e2e/common/events.go | 8 +- test/e2e/common/pods.go | 6 +- test/e2e/common/projected.go | 42 ++--- test/e2e/common/secrets.go | 4 +- test/e2e/common/secrets_volume.go | 18 +- test/e2e/common/sysctl.go | 2 +- test/e2e/common/util.go | 8 +- test/e2e/common/volumes.go | 2 +- test/e2e/e2e.go | 4 +- test/e2e/events.go | 6 +- test/e2e/examples.go | 6 +- test/e2e/framework/exec_util.go | 2 +- test/e2e/framework/firewall_util.go | 2 +- test/e2e/framework/framework.go | 8 +- test/e2e/framework/ingress_utils.go | 16 +- test/e2e/framework/jobs_util.go | 2 +- test/e2e/framework/kubelet_stats.go | 12 +- .../framework/metrics/api_server_metrics.go | 2 +- test/e2e/framework/metrics/kubelet_metrics.go | 2 +- test/e2e/framework/metrics/metrics_grabber.go | 8 +- test/e2e/framework/metrics_util.go | 10 +- test/e2e/framework/networking_utils.go | 4 +- test/e2e/framework/nodes_util.go | 2 +- test/e2e/framework/pods.go | 6 +- test/e2e/framework/rc_util.go | 12 +- test/e2e/framework/resource_usage_gatherer.go | 4 +- test/e2e/framework/service_util.go | 56 +++--- test/e2e/framework/statefulset_utils.go | 12 +- test/e2e/framework/util.go | 172 +++++++++--------- .../logging/utils/logging_agent.go | 2 +- .../instrumentation/monitoring/cadvisor.go | 4 +- .../monitoring/custom_metrics_stackdriver.go | 8 +- .../instrumentation/monitoring/influxdb.go | 18 +- .../monitoring/metrics_grabber.go | 4 +- test/e2e/kubectl/kubectl.go | 26 +-- test/e2e/kubectl/portforward.go | 10 +- test/e2e/lifecycle/addon_update.go | 4 +- test/e2e/lifecycle/reboot.go | 4 +- test/e2e/lifecycle/resize_nodes.go | 4 +- test/e2e/lifecycle/restart.go | 4 +- test/e2e/multicluster/ubernetes_lite.go | 10 +- test/e2e/network/dns.go | 26 +-- test/e2e/network/dns_common.go | 22 +-- test/e2e/network/dns_configmap.go | 4 +- test/e2e/network/example_cluster_dns.go | 4 +- test/e2e/network/networking.go | 2 +- test/e2e/network/proxy.go | 6 +- test/e2e/network/service.go | 12 +- test/e2e/network/service_latency.go | 12 +- test/e2e/network/serviceloadbalancers.go | 10 +- test/e2e/network_partition.go | 38 ++-- test/e2e/node/security_context.go | 4 +- test/e2e/pod_gc.go | 6 +- test/e2e/pre_stop.go | 16 +- test/e2e/scalability/density.go | 10 +- test/e2e/scalability/load.go | 6 +- test/e2e/scheduling/limit_range.go | 16 +- test/e2e/scheduling/resource_quota.go | 68 +++---- test/e2e/storage/empty_dir_wrapper.go | 18 +- test/e2e/storage/pd.go | 4 +- .../storage/persistent_volumes-disruptive.go | 2 +- test/e2e/storage/persistent_volumes-local.go | 16 +- test/e2e/ui/dashboard.go | 4 +- test/e2e/upgrades/apparmor.go | 2 +- test/e2e/upgrades/configmaps.go | 2 +- test/e2e/upgrades/kube_proxy_migration.go | 2 +- test/e2e/upgrades/secrets.go | 2 +- test/e2e/upgrades/sysctl.go | 4 +- test/e2e_node/benchmark_util.go | 2 +- test/e2e_node/critical_pod_test.go | 4 +- test/e2e_node/density_test.go | 4 +- test/e2e_node/garbage_collector_test.go | 2 +- test/e2e_node/gpus.go | 6 +- test/e2e_node/memory_eviction_test.go | 10 +- test/e2e_node/mirror_pod_test.go | 18 +- test/e2e_node/node_container_manager_test.go | 2 +- test/e2e_node/node_problem_detector_linux.go | 16 +- test/e2e_node/runtime_conformance_test.go | 4 +- test/e2e_node/util.go | 2 +- test/integration/apiserver/patch_test.go | 4 +- test/integration/client/client_test.go | 10 +- test/integration/configmap/configmap_test.go | 6 +- test/integration/deployment/util.go | 2 +- test/integration/evictions/evictions_test.go | 8 +- test/integration/framework/perf_utils.go | 4 +- .../garbage_collector_test.go | 42 ++--- .../integration/replicaset/replicaset_test.go | 26 +-- .../replicationcontroller_test.go | 24 +-- test/integration/scheduler/scheduler_test.go | 4 +- .../scheduler_perf/scheduler_test.go | 4 +- test/integration/secrets/secrets_test.go | 8 +- .../storageclasses/storage_classes_test.go | 4 +- test/integration/utils.go | 2 +- test/utils/density_utils.go | 8 +- test/utils/deployment.go | 2 +- test/utils/pod_store.go | 4 +- test/utils/runners.go | 32 ++-- 190 files changed, 921 insertions(+), 921 deletions(-) diff --git a/cmd/kube-controller-manager/app/autoscaling.go b/cmd/kube-controller-manager/app/autoscaling.go index 827d767b14b..a2578f5ea76 100644 --- a/cmd/kube-controller-manager/app/autoscaling.go +++ b/cmd/kube-controller-manager/app/autoscaling.go @@ -87,11 +87,11 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me replicaCalc := podautoscaler.NewReplicaCalculator( metricsClient, - hpaClient.Core(), + hpaClient.CoreV1(), ctx.Options.HorizontalPodAutoscalerTolerance, ) go podautoscaler.NewHorizontalController( - hpaClientGoClient.Core(), + hpaClientGoClient.CoreV1(), scaleClient, hpaClient.Autoscaling(), restMapper, diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index 69dccc882c8..33e4d2c40a2 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -250,7 +250,7 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) { api.Kind("ConfigMap"), } resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ - QuotaClient: resourceQuotaControllerClient.Core(), + QuotaClient: resourceQuotaControllerClient.CoreV1(), ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(), ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.Options.ResourceQuotaSyncPeriod.Duration), Registry: resourceQuotaRegistry, @@ -258,8 +258,8 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) { ReplenishmentResyncPeriod: ResyncPeriod(&ctx.Options), GroupKindsToReplenish: groupKindsToReplenish, } - if resourceQuotaControllerClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", resourceQuotaControllerClient.Core().RESTClient().GetRateLimiter()) + if resourceQuotaControllerClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", resourceQuotaControllerClient.CoreV1().RESTClient().GetRateLimiter()) } go resourcequotacontroller.NewResourceQuotaController( diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 169b05d6661..43b20d04f8b 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -540,7 +540,7 @@ func (gce *GCECloud) Initialize(clientBuilder controller.ControllerClientBuilder if gce.OnXPN() { gce.eventBroadcaster = record.NewBroadcaster() - gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(gce.client.Core().RESTClient()).Events("")}) + gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(gce.client.CoreV1().RESTClient()).Events("")}) gce.eventRecorder = gce.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "gce-cloudprovider"}) } diff --git a/pkg/cloudprovider/providers/gce/gce_clusterid.go b/pkg/cloudprovider/providers/gce/gce_clusterid.go index 6f1b667c8ef..46b4ff4f6a0 100644 --- a/pkg/cloudprovider/providers/gce/gce_clusterid.go +++ b/pkg/cloudprovider/providers/gce/gce_clusterid.go @@ -101,7 +101,7 @@ func (gce *GCECloud) watchClusterID() { }, } - listerWatcher := cache.NewListWatchFromClient(gce.ClusterID.client.Core().RESTClient(), "configmaps", UIDNamespace, fields.Everything()) + listerWatcher := cache.NewListWatchFromClient(gce.ClusterID.client.CoreV1().RESTClient(), "configmaps", UIDNamespace, fields.Everything()) var controller cache.Controller gce.ClusterID.store, controller = cache.NewInformer(newSingleObjectListerWatcher(listerWatcher, UIDConfigMapName), &v1.ConfigMap{}, updateFuncFrequency, mapEventHandler) @@ -189,7 +189,7 @@ func (ci *ClusterID) getOrInitialize() error { UIDProvider: newId, } - if _, err := ci.client.Core().ConfigMaps(UIDNamespace).Create(cfg); err != nil { + if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(cfg); err != nil { glog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err) return err } diff --git a/pkg/controller/bootstrap/bootstrapsigner.go b/pkg/controller/bootstrap/bootstrapsigner.go index 07a987658be..a89c49121e7 100644 --- a/pkg/controller/bootstrap/bootstrapsigner.go +++ b/pkg/controller/bootstrap/bootstrapsigner.go @@ -99,19 +99,19 @@ func NewBootstrapSigner(cl clientset.Interface, options BootstrapSignerOptions) secretNamespace: options.TokenSecretNamespace, syncQueue: workqueue.NewNamed("bootstrap_signer_queue"), } - if cl.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("bootstrap_signer", cl.Core().RESTClient().GetRateLimiter()) + if cl.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("bootstrap_signer", cl.CoreV1().RESTClient().GetRateLimiter()) } configMapSelector := fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ConfigMapName}) e.configMaps, e.configMapsController = cache.NewInformer( &cache.ListWatch{ ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { lo.FieldSelector = configMapSelector.String() - return e.client.Core().ConfigMaps(options.ConfigMapNamespace).List(lo) + return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).List(lo) }, WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { lo.FieldSelector = configMapSelector.String() - return e.client.Core().ConfigMaps(options.ConfigMapNamespace).Watch(lo) + return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).Watch(lo) }, }, &v1.ConfigMap{}, @@ -127,11 +127,11 @@ func NewBootstrapSigner(cl clientset.Interface, options BootstrapSignerOptions) &cache.ListWatch{ ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { lo.FieldSelector = secretSelector.String() - return e.client.Core().Secrets(e.secretNamespace).List(lo) + return e.client.CoreV1().Secrets(e.secretNamespace).List(lo) }, WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { lo.FieldSelector = secretSelector.String() - return e.client.Core().Secrets(e.secretNamespace).Watch(lo) + return e.client.CoreV1().Secrets(e.secretNamespace).Watch(lo) }, }, &v1.Secret{}, @@ -227,7 +227,7 @@ func (e *BootstrapSigner) signConfigMap() { } func (e *BootstrapSigner) updateConfigMap(cm *v1.ConfigMap) { - _, err := e.client.Core().ConfigMaps(cm.Namespace).Update(cm) + _, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(cm) if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) { glog.V(3).Infof("Error updating ConfigMap: %v", err) } diff --git a/pkg/controller/bootstrap/tokencleaner.go b/pkg/controller/bootstrap/tokencleaner.go index 38328edda20..587effde641 100644 --- a/pkg/controller/bootstrap/tokencleaner.go +++ b/pkg/controller/bootstrap/tokencleaner.go @@ -71,8 +71,8 @@ func NewTokenCleaner(cl clientset.Interface, options TokenCleanerOptions) *Token client: cl, tokenSecretNamespace: options.TokenSecretNamespace, } - if cl.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("token_cleaner", cl.Core().RESTClient().GetRateLimiter()) + if cl.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("token_cleaner", cl.CoreV1().RESTClient().GetRateLimiter()) } secretSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(bootstrapapi.SecretTypeBootstrapToken)}) @@ -80,11 +80,11 @@ func NewTokenCleaner(cl clientset.Interface, options TokenCleanerOptions) *Token &cache.ListWatch{ ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { lo.FieldSelector = secretSelector.String() - return e.client.Core().Secrets(e.tokenSecretNamespace).List(lo) + return e.client.CoreV1().Secrets(e.tokenSecretNamespace).List(lo) }, WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { lo.FieldSelector = secretSelector.String() - return e.client.Core().Secrets(e.tokenSecretNamespace).Watch(lo) + return e.client.CoreV1().Secrets(e.tokenSecretNamespace).Watch(lo) }, }, &v1.Secret{}, @@ -118,7 +118,7 @@ func (tc *TokenCleaner) evalSecret(o interface{}) { if len(secret.UID) > 0 { options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}} } - err := tc.client.Core().Secrets(secret.Namespace).Delete(secret.Name, options) + err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(secret.Name, options) // NotFound isn't a real error (it's already been deleted) // Conflict isn't a real error (the UID precondition failed) if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) { diff --git a/pkg/controller/certificates/certificate_controller.go b/pkg/controller/certificates/certificate_controller.go index 1484f4d41a5..4f06e56b91c 100644 --- a/pkg/controller/certificates/certificate_controller.go +++ b/pkg/controller/certificates/certificate_controller.go @@ -57,7 +57,7 @@ func NewCertificateController( // Send events to the apiserver eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) cc := &CertificateController{ kubeClient: kubeClient, diff --git a/pkg/controller/cloud/pvlcontroller.go b/pkg/controller/cloud/pvlcontroller.go index 1a75a9c2867..3ae1fbf6bc9 100644 --- a/pkg/controller/cloud/pvlcontroller.go +++ b/pkg/controller/cloud/pvlcontroller.go @@ -235,7 +235,7 @@ func (pvlc *PersistentVolumeLabelController) updateVolume(vol *v1.PersistentVolu return err } - _, err = pvlc.kubeClient.Core().PersistentVolumes().Patch(string(volName), types.StrategicMergePatchType, patchBytes) + _, err = pvlc.kubeClient.CoreV1().PersistentVolumes().Patch(string(volName), types.StrategicMergePatchType, patchBytes) if err != nil { return fmt.Errorf("failed to update PersistentVolume %s: %v", volName, err) } diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 360f2091b2d..497bdb16e7b 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -546,7 +546,7 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v } func (r RealPodControl) PatchPod(namespace, name string, data []byte) error { - _, err := r.KubeClient.Core().Pods(namespace).Patch(name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.CoreV1().Pods(namespace).Patch(name, types.StrategicMergePatchType, data) return err } @@ -589,7 +589,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT if labels.Set(pod.Labels).AsSelectorPreValidated().Empty() { return fmt.Errorf("unable to create pods, no labels") } - if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil { + if newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(pod); err != nil { r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err) return err } else { @@ -610,7 +610,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime return fmt.Errorf("object does not have ObjectMeta, %v", err) } glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) - if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil { + if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil { r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) return fmt.Errorf("unable to delete pods: %v", err) } else { @@ -925,10 +925,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -982,10 +982,10 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -1030,7 +1030,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } - _, err = c.Core().Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes) return err } diff --git a/pkg/controller/cronjob/cronjob_controller.go b/pkg/controller/cronjob/cronjob_controller.go index 682ae721a4a..dab9da6e26e 100644 --- a/pkg/controller/cronjob/cronjob_controller.go +++ b/pkg/controller/cronjob/cronjob_controller.go @@ -70,10 +70,10 @@ func NewCronJobController(kubeClient clientset.Interface) *CronJobController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } jm := &CronJobController{ diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 0b0328eee82..0bd8b775ca7 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -134,10 +134,10 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } dsc := &DaemonSetsController{ kubeClient: kubeClient, diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go index 1ed7f3f0f5e..bbc9015a28a 100644 --- a/pkg/controller/daemon/update.go +++ b/pkg/controller/daemon/update.go @@ -228,7 +228,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur toUpdate.Labels = make(map[string]string) } toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] - _, err = dsc.kubeClient.Core().Pods(ds.Namespace).Update(toUpdate) + _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate) if err != nil { return nil, err } diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index d3d5c7d4258..8c44889e4f3 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -101,10 +101,10 @@ func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, r eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")}) - if client != nil && client.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().RESTClient().GetRateLimiter()) + if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.CoreV1().RESTClient().GetRateLimiter()) } dc := &DeploymentController{ client: client, diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index e9bd1d6317b..d017e33709e 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -721,7 +721,7 @@ func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister cor } // Only label the pod that doesn't already have the new hash if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash { - _, err := UpdatePodWithRetries(c.Core().Pods(namespace), podLister, pod.Namespace, pod.Name, + _, err := UpdatePodWithRetries(c.CoreV1().Pods(namespace), podLister, pod.Namespace, pod.Name, func(podToUpdate *v1.Pod) error { // Precondition: the pod doesn't contain the new hash in its label. if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash { diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index ab474ed2228..dced7564f54 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -294,7 +294,7 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) { if dc.kubeClient != nil { glog.Infof("Sending events to api server.") - dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(dc.kubeClient.Core().RESTClient()).Events("")}) + dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(dc.kubeClient.CoreV1().RESTClient()).Events("")}) } else { glog.Infof("No api server defined - no events will be sent to API server.") } diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index 13ea96fd81a..cdd02eb86a8 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -75,8 +75,8 @@ var ( // NewEndpointController returns a new *EndpointController. func NewEndpointController(podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer, endpointsInformer coreinformers.EndpointsInformer, client clientset.Interface) *EndpointController { - if client != nil && client.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().RESTClient().GetRateLimiter()) + if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.CoreV1().RESTClient().GetRateLimiter()) } e := &EndpointController{ client: client, @@ -395,7 +395,7 @@ func (e *EndpointController) syncService(key string) error { // service is deleted. However, if we're down at the time when // the service is deleted, we will miss that deletion, so this // doesn't completely solve the problem. See #6877. - err = e.client.Core().Endpoints(namespace).Delete(name, nil) + err = e.client.CoreV1().Endpoints(namespace).Delete(name, nil) if err != nil && !errors.IsNotFound(err) { return err } @@ -508,10 +508,10 @@ func (e *EndpointController) syncService(key string) error { glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps) if createEndpoints { // No previous endpoints, create them - _, err = e.client.Core().Endpoints(service.Namespace).Create(newEndpoints) + _, err = e.client.CoreV1().Endpoints(service.Namespace).Create(newEndpoints) } else { // Pre-existing - _, err = e.client.Core().Endpoints(service.Namespace).Update(newEndpoints) + _, err = e.client.CoreV1().Endpoints(service.Namespace).Update(newEndpoints) } if err != nil { if createEndpoints && errors.IsForbidden(err) { diff --git a/pkg/controller/namespace/namespace_controller.go b/pkg/controller/namespace/namespace_controller.go index c3bdd28be7a..32a0bb33fd9 100644 --- a/pkg/controller/namespace/namespace_controller.go +++ b/pkg/controller/namespace/namespace_controller.go @@ -72,11 +72,11 @@ func NewNamespaceController( // create the controller so we can inject the enqueue function namespaceController := &NamespaceController{ queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"), - namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(kubeClient.Core().Namespaces(), clientPool, kubeClient.Core(), discoverResourcesFn, finalizerToken, true), + namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(kubeClient.CoreV1().Namespaces(), clientPool, kubeClient.CoreV1(), discoverResourcesFn, finalizerToken, true), } - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } // configure the namespace informer event handlers diff --git a/pkg/controller/node/ipam/adapter.go b/pkg/controller/node/ipam/adapter.go index 00a91535c8e..6a5d9e480b0 100644 --- a/pkg/controller/node/ipam/adapter.go +++ b/pkg/controller/node/ipam/adapter.go @@ -52,7 +52,7 @@ func newAdapter(k8s clientset.Interface, cloud *gce.GCECloud) *adapter { ret.recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudCIDRAllocator"}) glog.V(0).Infof("Sending events to api server.") broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ - Interface: v1core.New(k8s.Core().RESTClient()).Events(""), + Interface: v1core.New(k8s.CoreV1().RESTClient()).Events(""), }) return ret @@ -86,7 +86,7 @@ func (a *adapter) AddAlias(ctx context.Context, nodeName string, cidrRange *net. } func (a *adapter) Node(ctx context.Context, name string) (*v1.Node, error) { - return a.k8s.Core().Nodes().Get(name, metav1.GetOptions{}) + return a.k8s.CoreV1().Nodes().Get(name, metav1.GetOptions{}) } func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error { @@ -101,7 +101,7 @@ func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRang return err } - _, err = a.k8s.Core().Nodes().Patch(node.Name, types.StrategicMergePatchType, bytes) + _, err = a.k8s.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, bytes) return err } diff --git a/pkg/controller/node/ipam/cidr_allocator.go b/pkg/controller/node/ipam/cidr_allocator.go index 6fa8a684e06..56523009a0e 100644 --- a/pkg/controller/node/ipam/cidr_allocator.go +++ b/pkg/controller/node/ipam/cidr_allocator.go @@ -107,7 +107,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) { // controller manager to restart. if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) { var err error - nodeList, err = kubeClient.Core().Nodes().List(metav1.ListOptions{ + nodeList, err = kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ FieldSelector: fields.Everything().String(), LabelSelector: labels.Everything().String(), }) diff --git a/pkg/controller/node/ipam/cloud_cidr_allocator.go b/pkg/controller/node/ipam/cloud_cidr_allocator.go index 5403ebb91a1..778f6a6a158 100644 --- a/pkg/controller/node/ipam/cloud_cidr_allocator.go +++ b/pkg/controller/node/ipam/cloud_cidr_allocator.go @@ -71,7 +71,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"}) eventBroadcaster.StartLogging(glog.Infof) glog.V(0).Infof("Sending events to api server.") - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")}) gceCloud, ok := cloud.(*gce.GCECloud) if !ok { @@ -170,7 +170,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(data nodeAndCIDR) error { podCIDR := data.cidr.String() for rep := 0; rep < cidrUpdateRetries; rep++ { // TODO: change it to using PATCH instead of full Node updates. - node, err = ca.client.Core().Nodes().Get(data.nodeName, metav1.GetOptions{}) + node, err = ca.client.CoreV1().Nodes().Get(data.nodeName, metav1.GetOptions{}) if err != nil { glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err) continue @@ -189,7 +189,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(data nodeAndCIDR) error { // See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248 } node.Spec.PodCIDR = podCIDR - if _, err = ca.client.Core().Nodes().Update(node); err == nil { + if _, err = ca.client.CoreV1().Nodes().Update(node); err == nil { glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) break } diff --git a/pkg/controller/node/ipam/range_allocator.go b/pkg/controller/node/ipam/range_allocator.go index cacc47a8617..5fdb90145a6 100644 --- a/pkg/controller/node/ipam/range_allocator.go +++ b/pkg/controller/node/ipam/range_allocator.go @@ -68,7 +68,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, s recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"}) eventBroadcaster.StartLogging(glog.Infof) glog.V(0).Infof("Sending events to api server.") - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")}) ra := &rangeAllocator{ client: client, @@ -228,7 +228,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { podCIDR := data.cidr.String() for rep := 0; rep < cidrUpdateRetries; rep++ { // TODO: change it to using PATCH instead of full Node updates. - node, err = r.client.Core().Nodes().Get(data.nodeName, metav1.GetOptions{}) + node, err = r.client.CoreV1().Nodes().Get(data.nodeName, metav1.GetOptions{}) if err != nil { glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err) continue @@ -245,7 +245,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { return nil } node.Spec.PodCIDR = podCIDR - if _, err = r.client.Core().Nodes().Update(node); err == nil { + if _, err = r.client.CoreV1().Nodes().Update(node); err == nil { glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) break } diff --git a/pkg/controller/node/node_controller.go b/pkg/controller/node/node_controller.go index 208b22fd73b..900bbd62222 100644 --- a/pkg/controller/node/node_controller.go +++ b/pkg/controller/node/node_controller.go @@ -246,11 +246,11 @@ func NewNodeController( glog.V(0).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink( &v1core.EventSinkImpl{ - Interface: v1core.New(kubeClient.Core().RESTClient()).Events(""), + Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""), }) - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } if allocateNodeCIDRs { @@ -648,7 +648,7 @@ func (nc *Controller) monitorNodeStatus() error { return true, nil } name := node.Name - node, err = nc.kubeClient.Core().Nodes().Get(name, metav1.GetOptions{}) + node, err = nc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name) return false, err @@ -1055,7 +1055,7 @@ func (nc *Controller) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.Node _, currentCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) if !apiequality.Semantic.DeepEqual(currentCondition, &observedReadyCondition) { - if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil { + if _, err = nc.kubeClient.CoreV1().Nodes().UpdateStatus(node); err != nil { glog.Errorf("Error updating node %s: %v", node.Name, err) return gracePeriod, observedReadyCondition, currentReadyCondition, err } diff --git a/pkg/controller/node/scheduler/taint_controller.go b/pkg/controller/node/scheduler/taint_controller.go index bab01348968..574752a6b21 100644 --- a/pkg/controller/node/scheduler/taint_controller.go +++ b/pkg/controller/node/scheduler/taint_controller.go @@ -88,7 +88,7 @@ func deletePodHandler(c clientset.Interface, emitEventFunc func(types.Namespaced } var err error for i := 0; i < retries; i++ { - err = c.Core().Pods(ns).Delete(name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(ns).Delete(name, &metav1.DeleteOptions{}) if err == nil { break } @@ -110,12 +110,12 @@ func getNoExecuteTaints(taints []v1.Taint) []v1.Taint { func getPodsAssignedToNode(c clientset.Interface, nodeName string) ([]v1.Pod, error) { selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}) - pods, err := c.Core().Pods(v1.NamespaceAll).List(metav1.ListOptions{ + pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{ FieldSelector: selector.String(), LabelSelector: labels.Everything().String(), }) for i := 0; i < retries && err != nil; i++ { - pods, err = c.Core().Pods(v1.NamespaceAll).List(metav1.ListOptions{ + pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{ FieldSelector: selector.String(), LabelSelector: labels.Everything().String(), }) @@ -156,7 +156,7 @@ func NewNoExecuteTaintManager(c clientset.Interface) *NoExecuteTaintManager { eventBroadcaster.StartLogging(glog.Infof) if c != nil { glog.V(0).Infof("Sending events to api server.") - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(c.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(c.CoreV1().RESTClient()).Events("")}) } else { glog.Fatalf("kubeClient is nil when starting NodeController") } diff --git a/pkg/controller/node/util/controller_utils.go b/pkg/controller/node/util/controller_utils.go index d8bc43fad6a..34e1ff0f453 100644 --- a/pkg/controller/node/util/controller_utils.go +++ b/pkg/controller/node/util/controller_utils.go @@ -55,7 +55,7 @@ func DeletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n remaining := false selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String() options := metav1.ListOptions{FieldSelector: selector} - pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(options) + pods, err := kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(options) var updateErrList []error if err != nil { @@ -93,7 +93,7 @@ func DeletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n glog.V(2).Infof("Starting deletion of pod %v/%v", pod.Namespace, pod.Name) recorder.Eventf(&pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) - if err := kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { + if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { return false, err } remaining = true @@ -118,7 +118,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa var updatedPod *v1.Pod var err error - if updatedPod, err = kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod); err != nil { + if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod); err != nil { return nil, err } return updatedPod, nil @@ -127,7 +127,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa // ForcefullyDeleteNode deletes the node immediately. The pods on the // node are cleaned up by the podGC. func ForcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error { - if err := kubeClient.Core().Nodes().Delete(nodeName, nil); err != nil { + if err := kubeClient.CoreV1().Nodes().Delete(nodeName, nil); err != nil { return fmt.Errorf("unable to delete node %q: %v", nodeName, err) } return nil @@ -139,7 +139,7 @@ func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error { nodeName := node.Name glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName) opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()} - pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(opts) + pods, err := kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(opts) if err != nil { return err } @@ -155,7 +155,7 @@ func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error { if cond.Type == v1.PodReady { pod.Status.Conditions[i].Status = v1.ConditionFalse glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name) - _, err := kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod) + _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(&pod) if err != nil { glog.Warningf("Failed to update status for pod %q: %v", format.Pod(&pod), err) errMsg = append(errMsg, fmt.Sprintf("%v", err)) diff --git a/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go b/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go index 36c24da2f63..5d13acbd65b 100644 --- a/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go +++ b/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go @@ -54,8 +54,8 @@ type HeapsterMetricsClient struct { func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, service, port string) MetricsClient { return &HeapsterMetricsClient{ - services: client.Core().Services(namespace), - podsGetter: client.Core(), + services: client.CoreV1().Services(namespace), + podsGetter: client.CoreV1(), heapsterScheme: scheme, heapsterService: service, heapsterPort: port, diff --git a/pkg/controller/podgc/gc_controller.go b/pkg/controller/podgc/gc_controller.go index 76179b736b2..8badcc4ea37 100644 --- a/pkg/controller/podgc/gc_controller.go +++ b/pkg/controller/podgc/gc_controller.go @@ -52,15 +52,15 @@ type PodGCController struct { } func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInformer, terminatedPodThreshold int) *PodGCController { - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } gcc := &PodGCController{ kubeClient: kubeClient, terminatedPodThreshold: terminatedPodThreshold, deletePod: func(namespace, name string) error { glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name) - return kubeClient.Core().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)) + return kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)) }, } @@ -143,7 +143,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) { func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) { glog.V(4).Infof("GC'ing orphaned") // We want to get list of Nodes from the etcd, to make sure that it's as fresh as possible. - nodes, err := gcc.kubeClient.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := gcc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return } diff --git a/pkg/controller/replicaset/replica_set.go b/pkg/controller/replicaset/replica_set.go index 3a24370b078..2186e75df34 100644 --- a/pkg/controller/replicaset/replica_set.go +++ b/pkg/controller/replicaset/replica_set.go @@ -95,12 +95,12 @@ type ReplicaSetController struct { // NewReplicaSetController configures a replica set controller with the specified event recorder func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController { - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) rsc := &ReplicaSetController{ kubeClient: kubeClient, diff --git a/pkg/controller/replication/replication_controller.go b/pkg/controller/replication/replication_controller.go index ff032f84035..4f7be18143f 100644 --- a/pkg/controller/replication/replication_controller.go +++ b/pkg/controller/replication/replication_controller.go @@ -90,13 +90,13 @@ type ReplicationManager struct { // NewReplicationManager configures a replication manager with the specified event recorder func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager { - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) rm := &ReplicationManager{ kubeClient: kubeClient, @@ -651,7 +651,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error { newStatus := calculateStatus(rc, filteredPods, manageReplicasErr) // Always updates status as pods come up or die. - updatedRC, err := updateReplicationControllerStatus(rm.kubeClient.Core().ReplicationControllers(rc.Namespace), *rc, newStatus) + updatedRC, err := updateReplicationControllerStatus(rm.kubeClient.CoreV1().ReplicationControllers(rc.Namespace), *rc, newStatus) if err != nil { // Multiple things could lead to this update failing. Returning an error causes a requeue without forcing a hotloop return err diff --git a/pkg/controller/route/route_controller.go b/pkg/controller/route/route_controller.go index 352527a3d97..9a399a02709 100644 --- a/pkg/controller/route/route_controller.go +++ b/pkg/controller/route/route_controller.go @@ -103,7 +103,7 @@ func (rc *RouteController) Run(stopCh <-chan struct{}, syncPeriod time.Duration) } if rc.broadcaster != nil { - rc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(rc.kubeClient.Core().RESTClient()).Events("")}) + rc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(rc.kubeClient.CoreV1().RESTClient()).Events("")}) } // TODO: If we do just the full Resync every 5 minutes (default value) diff --git a/pkg/controller/service/service_controller.go b/pkg/controller/service/service_controller.go index f37314b15de..7c7f2cd6f4a 100644 --- a/pkg/controller/service/service_controller.go +++ b/pkg/controller/service/service_controller.go @@ -113,11 +113,11 @@ func New( ) (*ServiceController, error) { broadcaster := record.NewBroadcaster() broadcaster.StartLogging(glog.Infof) - broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "service-controller"}) - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } s := &ServiceController{ @@ -327,7 +327,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S func (s *ServiceController) persistUpdate(service *v1.Service) error { var err error for i := 0; i < clientRetryCount; i++ { - _, err = s.kubeClient.Core().Services(service.Namespace).UpdateStatus(service) + _, err = s.kubeClient.CoreV1().Services(service.Namespace).UpdateStatus(service) if err == nil { return nil } diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller.go b/pkg/controller/serviceaccount/serviceaccounts_controller.go index a84ce61382d..54dd3d4b8bb 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller.go @@ -67,8 +67,8 @@ func NewServiceAccountsController(saInformer coreinformers.ServiceAccountInforme serviceAccountsToEnsure: options.ServiceAccounts, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"), } - if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().RESTClient().GetRateLimiter()) + if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.CoreV1().RESTClient().GetRateLimiter()) } saInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -210,7 +210,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error { // TODO eliminate this once the fake client can handle creation without NS sa.Namespace = ns.Name - if _, err := c.client.Core().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) { + if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) { createFailures = append(createFailures, err) } } diff --git a/pkg/controller/serviceaccount/tokengetter.go b/pkg/controller/serviceaccount/tokengetter.go index e81f1381573..08e7cd1ce4d 100644 --- a/pkg/controller/serviceaccount/tokengetter.go +++ b/pkg/controller/serviceaccount/tokengetter.go @@ -44,10 +44,10 @@ func NewGetterFromClient(c clientset.Interface) serviceaccount.ServiceAccountTok return clientGetter{c} } func (c clientGetter) GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error) { - return c.client.Core().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + return c.client.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) } func (c clientGetter) GetSecret(namespace, name string) (*v1.Secret, error) { - return c.client.Core().Secrets(namespace).Get(name, metav1.GetOptions{}) + return c.client.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) } // registryGetter implements ServiceAccountTokenGetter using a service account and secret registry diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index f8d5851101c..ed33cadc465 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -86,8 +86,8 @@ func NewTokensController(serviceAccounts informers.ServiceAccountInformer, secre maxRetries: maxRetries, } - if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_tokens_controller", cl.Core().RESTClient().GetRateLimiter()) + if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_tokens_controller", cl.CoreV1().RESTClient().GetRateLimiter()) } e.serviceAccounts = serviceAccounts.Lister() @@ -344,7 +344,7 @@ func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry if len(uid) > 0 { opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}} } - err := e.client.Core().Secrets(ns).Delete(name, opts) + err := e.client.CoreV1().Secrets(ns).Delete(name, opts) // NotFound doesn't need a retry (it's already been deleted) // Conflict doesn't need a retry (the UID precondition failed) if err == nil || apierrors.IsNotFound(err) || apierrors.IsConflict(err) { @@ -366,7 +366,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou // We don't want to update the cache's copy of the service account // so add the secret to a freshly retrieved copy of the service account - serviceAccounts := e.client.Core().ServiceAccounts(serviceAccount.Namespace) + serviceAccounts := e.client.CoreV1().ServiceAccounts(serviceAccount.Namespace) liveServiceAccount, err := serviceAccounts.Get(serviceAccount.Name, metav1.GetOptions{}) if err != nil { // Retry if we cannot fetch the live service account (for a NotFound error, either the live lookup or our cache are stale) @@ -405,7 +405,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou } // Save the secret - createdToken, err := e.client.Core().Secrets(serviceAccount.Namespace).Create(secret) + createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(secret) if err != nil { // retriable error return true, err @@ -455,7 +455,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou // we weren't able to use the token, try to clean it up. glog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err) deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}} - if deleteErr := e.client.Core().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil { + if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil { glog.Error(deleteErr) // if we fail, just log it } } @@ -513,7 +513,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou // We don't want to update the cache's copy of the secret // so add the token to a freshly retrieved copy of the secret - secrets := e.client.Core().Secrets(cachedSecret.Namespace) + secrets := e.client.CoreV1().Secrets(cachedSecret.Namespace) liveSecret, err := secrets.Get(cachedSecret.Name, metav1.GetOptions{}) if err != nil { // Retry for any error other than a NotFound @@ -577,7 +577,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou func (e *TokensController) removeSecretReference(saNamespace string, saName string, saUID types.UID, secretName string) error { // We don't want to update the cache's copy of the service account // so remove the secret from a freshly retrieved copy of the service account - serviceAccounts := e.client.Core().ServiceAccounts(saNamespace) + serviceAccounts := e.client.CoreV1().ServiceAccounts(saNamespace) serviceAccount, err := serviceAccounts.Get(saName, metav1.GetOptions{}) // Ignore NotFound errors when attempting to remove a reference if apierrors.IsNotFound(err) { @@ -631,7 +631,7 @@ func (e *TokensController) getServiceAccount(ns string, name string, uid types.U } // Live lookup - sa, err = e.client.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + sa, err = e.client.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil, nil } @@ -667,7 +667,7 @@ func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetc } // Live lookup - secret, err := e.client.Core().Secrets(ns).Get(name, metav1.GetOptions{}) + secret, err := e.client.CoreV1().Secrets(ns).Get(name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil, nil } diff --git a/pkg/controller/statefulset/stateful_pod_control.go b/pkg/controller/statefulset/stateful_pod_control.go index ef33a78b16b..fff08046297 100644 --- a/pkg/controller/statefulset/stateful_pod_control.go +++ b/pkg/controller/statefulset/stateful_pod_control.go @@ -77,7 +77,7 @@ func (spc *realStatefulPodControl) CreateStatefulPod(set *apps.StatefulSet, pod return err } // If we created the PVCs attempt to create the Pod - _, err := spc.client.Core().Pods(set.Namespace).Create(pod) + _, err := spc.client.CoreV1().Pods(set.Namespace).Create(pod) // sink already exists errors if apierrors.IsAlreadyExists(err) { return err @@ -113,7 +113,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod attemptedUpdate = true // commit the update, retrying on conflicts - _, updateErr := spc.client.Core().Pods(set.Namespace).Update(pod) + _, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(pod) if updateErr == nil { return nil } @@ -134,7 +134,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod } func (spc *realStatefulPodControl) DeleteStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error { - err := spc.client.Core().Pods(set.Namespace).Delete(pod.Name, nil) + err := spc.client.CoreV1().Pods(set.Namespace).Delete(pod.Name, nil) spc.recordPodEvent("delete", set, pod, err) return err } @@ -182,7 +182,7 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.Statef _, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name) switch { case apierrors.IsNotFound(err): - _, err := spc.client.Core().PersistentVolumeClaims(claim.Namespace).Create(&claim) + _, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(&claim) if err != nil { errs = append(errs, fmt.Errorf("Failed to create PVC %s: %s", claim.Name, err)) } diff --git a/pkg/controller/statefulset/stateful_set.go b/pkg/controller/statefulset/stateful_set.go index 03c9c28dac2..e9db4c10b44 100644 --- a/pkg/controller/statefulset/stateful_set.go +++ b/pkg/controller/statefulset/stateful_set.go @@ -85,7 +85,7 @@ func NewStatefulSetController( ) *StatefulSetController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset-controller"}) ssc := &StatefulSetController{ diff --git a/pkg/controller/ttl/ttl_controller.go b/pkg/controller/ttl/ttl_controller.go index 3581e0519f0..2938ca8270b 100644 --- a/pkg/controller/ttl/ttl_controller.go +++ b/pkg/controller/ttl/ttl_controller.go @@ -263,7 +263,7 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey if err != nil { return err } - _, err = ttlc.kubeClient.Core().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes) + _, err = ttlc.kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes) if err != nil { glog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err) return err diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller.go b/pkg/controller/volume/attachdetach/attach_detach_controller.go index 847ab12d769..ca6d0a5d3e0 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -134,7 +134,7 @@ func NewAttachDetachController( eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"}) adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr) diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go index 9f8b5865801..283a910f24c 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go @@ -239,7 +239,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 for _, newPod := range extraPods1 { // Add a new pod between ASW and DSW ppoulators - _, err = adc.kubeClient.Core().Pods(newPod.ObjectMeta.Namespace).Create(newPod) + _, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(newPod) if err != nil { t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err) } @@ -256,7 +256,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 for _, newPod := range extraPods2 { // Add a new pod between DSW ppoulator and reconciler run - _, err = adc.kubeClient.Core().Pods(newPod.ObjectMeta.Namespace).Create(newPod) + _, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(newPod) if err != nil { t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err) } diff --git a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go index e1b31fbccc8..ab6dd05ecc4 100644 --- a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go +++ b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go @@ -129,10 +129,10 @@ func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj err) } - _, err = nsu.kubeClient.Core().Nodes().PatchStatus(string(nodeName), patchBytes) + _, err = nsu.kubeClient.CoreV1().Nodes().PatchStatus(string(nodeName), patchBytes) if err != nil { return fmt.Errorf( - "failed to kubeClient.Core().Nodes().Patch for node %q. %v", + "failed to kubeClient.CoreV1().Nodes().Patch for node %q. %v", nodeName, err) } diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 3c6b60fb650..efe3b511adc 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -641,7 +641,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo return claim, nil } - newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone) if err != nil { glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err) return newClaim, err @@ -697,7 +697,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV volumeClone.Status.Phase = phase volumeClone.Status.Message = message - newVol, err := ctrl.kubeClient.Core().PersistentVolumes().UpdateStatus(volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(volumeClone) if err != nil { glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err) return newVol, err @@ -775,7 +775,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV // Save the volume only if something was changed if dirty { glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volume.Name) - newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) if err != nil { glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volume.Name, claimToClaimKey(claim), err) return newVol, err @@ -829,7 +829,7 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo if dirty { glog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) - newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claim.Namespace).Update(claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone) if err != nil { glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err) return newClaim, err @@ -916,7 +916,7 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume volumeClone.Spec.ClaimRef.UID = "" } - newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) if err != nil { glog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err) return err @@ -977,7 +977,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) // This method may have been waiting for a volume lock for some time. // Previous recycleVolumeOperation might just have saved an updated version, // so read current volume state now. - newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) + newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) if err != nil { glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err) return @@ -1056,7 +1056,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) e // This method may have been waiting for a volume lock for some time. // Previous deleteVolumeOperation might just have saved an updated version, so // read current volume state now. - newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) + newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) if err != nil { glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err) return nil @@ -1100,7 +1100,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) e glog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name) // Delete the volume - if err = ctrl.kubeClient.Core().PersistentVolumes().Delete(volume.Name, nil); err != nil { + if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(volume.Name, nil); err != nil { // Oops, could not delete the volume and therefore the controller will // try to delete the volume again on next update. We _could_ maintain a // cache of "recently deleted volumes" and avoid unnecessary deletion, @@ -1260,7 +1260,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa // yet. pvName := ctrl.getProvisionedVolumeNameForClaim(claim) - volume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) if err == nil && volume != nil { // Volume has been already provisioned, nothing to do. glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim)) @@ -1338,7 +1338,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { glog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) var newVol *v1.PersistentVolume - if newVol, err = ctrl.kubeClient.Core().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) { + if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) { // Save succeeded. if err != nil { glog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim)) diff --git a/pkg/controller/volume/persistentvolume/pv_controller_base.go b/pkg/controller/volume/persistentvolume/pv_controller_base.go index 73af960ace3..f40086a8031 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller_base.go +++ b/pkg/controller/volume/persistentvolume/pv_controller_base.go @@ -71,7 +71,7 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error) if eventRecorder == nil { broadcaster := record.NewBroadcaster() broadcaster.StartLogging(glog.Infof) - broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(p.KubeClient.Core().RESTClient()).Events("")}) + broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(p.KubeClient.CoreV1().RESTClient()).Events("")}) eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"}) } @@ -425,7 +425,7 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent // modify these, therefore create a copy. claimClone := claim.DeepCopy() metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annStorageProvisioner, class.Provisioner) - newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claim.Namespace).Update(claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone) if err != nil { return newClaim, err } diff --git a/pkg/kubelet/config/apiserver.go b/pkg/kubelet/config/apiserver.go index 016c33bf720..cbc10c29515 100644 --- a/pkg/kubelet/config/apiserver.go +++ b/pkg/kubelet/config/apiserver.go @@ -31,7 +31,7 @@ import ( // NewSourceApiserver creates a config source that watches and pulls from the apiserver. func NewSourceApiserver(c clientset.Interface, nodeName types.NodeName, updates chan<- interface{}) { - lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName))) + lw := cache.NewListWatchFromClient(c.CoreV1().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName))) newSourceApiserverFromLW(lw, updates) } diff --git a/pkg/kubelet/configmap/configmap_manager.go b/pkg/kubelet/configmap/configmap_manager.go index cf5d263bdf0..71a7ae14854 100644 --- a/pkg/kubelet/configmap/configmap_manager.go +++ b/pkg/kubelet/configmap/configmap_manager.go @@ -64,7 +64,7 @@ func NewSimpleConfigMapManager(kubeClient clientset.Interface) Manager { } func (s *simpleConfigMapManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) { - return s.kubeClient.Core().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + return s.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) } func (s *simpleConfigMapManager) RegisterPod(pod *v1.Pod) { @@ -216,7 +216,7 @@ func (s *configMapStore) Get(namespace, name string) (*v1.ConfigMap, error) { // etcd and apiserver (the cache is eventually consistent). util.FromApiserverCache(&opts) } - configMap, err := s.kubeClient.Core().ConfigMaps(namespace).Get(name, opts) + configMap, err := s.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, opts) if err != nil && !apierrors.IsNotFound(err) && data.configMap == nil && data.err == nil { // Couldn't fetch the latest configmap, but there is no cached data to return. // Return the fetch result instead. diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 6662772c32e..6a8e7729b49 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -439,7 +439,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, serviceIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) if kubeDeps.KubeClient != nil { - serviceLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.Core().RESTClient(), "services", metav1.NamespaceAll, fields.Everything()) + serviceLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.CoreV1().RESTClient(), "services", metav1.NamespaceAll, fields.Everything()) r := cache.NewReflector(serviceLW, &v1.Service{}, serviceIndexer, 0) go r.Run(wait.NeverStop) } @@ -448,7 +448,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) if kubeDeps.KubeClient != nil { fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector() - nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.Core().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector) + nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.CoreV1().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector) r := cache.NewReflector(nodeLW, &v1.Node{}, nodeIndexer, 0) go r.Run(wait.NeverStop) } diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index f87d14b8c38..aef50b7f334 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -97,7 +97,7 @@ func (kl *Kubelet) registerWithAPIServer() { // a different externalID value, it attempts to delete that node so that a // later attempt can recreate it. func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { - _, err := kl.kubeClient.Core().Nodes().Create(node) + _, err := kl.kubeClient.CoreV1().Nodes().Create(node) if err == nil { return true } @@ -107,7 +107,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { return false } - existingNode, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName), metav1.GetOptions{}) + existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(string(kl.nodeName), metav1.GetOptions{}) if err != nil { glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err) return false @@ -146,7 +146,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { "Previously node %q had externalID %q; now it is %q; will delete and recreate.", kl.nodeName, node.Spec.ExternalID, existingNode.Spec.ExternalID, ) - if err := kl.kubeClient.Core().Nodes().Delete(node.Name, nil); err != nil { + if err := kl.kubeClient.CoreV1().Nodes().Delete(node.Name, nil); err != nil { glog.Errorf("Unable to register node %q with API server: error deleting old node: %v", kl.nodeName, err) } else { glog.Infof("Deleted old node object %q", kl.nodeName) diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 480c841e189..768aee62d73 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -1777,13 +1777,13 @@ func hasHostNamespace(pod *v1.Pod) bool { func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { for _, volume := range pod.Spec.Volumes { if volume.PersistentVolumeClaim != nil { - pvc, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) + pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) if err != nil { glog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err) continue } if pvc != nil { - referencedVolume, err := kl.kubeClient.Core().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { glog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err) continue diff --git a/pkg/kubelet/kubeletconfig/watch.go b/pkg/kubelet/kubeletconfig/watch.go index 2c0363c7cab..ecc255b962f 100644 --- a/pkg/kubelet/kubeletconfig/watch.go +++ b/pkg/kubelet/kubeletconfig/watch.go @@ -47,12 +47,12 @@ func newSharedNodeInformer(client clientset.Interface, nodeName string, lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (kuberuntime.Object, error) { - return client.Core().Nodes().List(metav1.ListOptions{ + return client.CoreV1().Nodes().List(metav1.ListOptions{ FieldSelector: fieldselector.String(), }) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return client.Core().Nodes().Watch(metav1.ListOptions{ + return client.CoreV1().Nodes().Watch(metav1.ListOptions{ FieldSelector: fieldselector.String(), ResourceVersion: options.ResourceVersion, }) diff --git a/pkg/kubelet/pod/mirror_client.go b/pkg/kubelet/pod/mirror_client.go index fc81acec2b1..9b8add5bc5a 100644 --- a/pkg/kubelet/pod/mirror_client.go +++ b/pkg/kubelet/pod/mirror_client.go @@ -63,7 +63,7 @@ func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error { } hash := getPodHash(pod) copyPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = hash - apiPod, err := mc.apiserverClient.Core().Pods(copyPod.Namespace).Create(©Pod) + apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(©Pod) if err != nil && errors.IsAlreadyExists(err) { // Check if the existing pod is the same as the pod we want to create. if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash { @@ -84,7 +84,7 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error { } glog.V(2).Infof("Deleting a mirror pod %q", podFullName) // TODO(random-liu): Delete the mirror pod with uid precondition in mirror pod manager - if err := mc.apiserverClient.Core().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) { + if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) { glog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err) } return nil diff --git a/pkg/kubelet/secret/secret_manager.go b/pkg/kubelet/secret/secret_manager.go index 7fd8008784e..3de230a9032 100644 --- a/pkg/kubelet/secret/secret_manager.go +++ b/pkg/kubelet/secret/secret_manager.go @@ -64,7 +64,7 @@ func NewSimpleSecretManager(kubeClient clientset.Interface) Manager { } func (s *simpleSecretManager) GetSecret(namespace, name string) (*v1.Secret, error) { - return s.kubeClient.Core().Secrets(namespace).Get(name, metav1.GetOptions{}) + return s.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) } func (s *simpleSecretManager) RegisterPod(pod *v1.Pod) { @@ -216,7 +216,7 @@ func (s *secretStore) Get(namespace, name string) (*v1.Secret, error) { // etcd and apiserver (the cache is eventually consistent). util.FromApiserverCache(&opts) } - secret, err := s.kubeClient.Core().Secrets(namespace).Get(name, opts) + secret, err := s.kubeClient.CoreV1().Secrets(namespace).Get(name, opts) if err != nil && !apierrors.IsNotFound(err) && data.secret == nil && data.err == nil { // Couldn't fetch the latest secret, but there is no cached data to return. // Return the fetch result instead. diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index 119f0d46fb0..07feb883253 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -441,7 +441,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { } // TODO: make me easier to express from client code - pod, err := m.kubeClient.Core().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{}) + pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{}) if errors.IsNotFound(err) { glog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid) // If the Pod is deleted the status will be cleared in @@ -462,7 +462,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { } pod.Status = status.status // TODO: handle conflict as a retry, make that easier too. - newPod, err := m.kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod) + newPod, err := m.kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) if err != nil { glog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err) return @@ -477,7 +477,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { deleteOptions := metav1.NewDeleteOptions(0) // Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace. deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID)) - err = m.kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) + err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) if err != nil { glog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err) return diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index d4a13c58a8e..4c457217c6e 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -396,7 +396,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( namespace string, claimName string) (string, types.UID, error) { pvc, err := - dswp.kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) if err != nil || pvc == nil { return "", "", fmt.Errorf( "failed to fetch PVC %s/%s from API server. err=%v", @@ -425,7 +425,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVSpec( name string, pvcReadOnly bool, expectedClaimUID types.UID) (*volume.Spec, string, error) { - pv, err := dswp.kubeClient.Core().PersistentVolumes().Get(name, metav1.GetOptions{}) + pv, err := dswp.kubeClient.CoreV1().PersistentVolumes().Get(name, metav1.GetOptions{}) if err != nil || pv == nil { return nil, "", fmt.Errorf( "failed to fetch PV %q from API server. err=%v", name, err) diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 81642fb6a29..00e5f046f82 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -495,7 +495,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error { // Get the node status to retrieve volume device path information. - node, fetchErr := rc.kubeClient.Core().Nodes().Get(string(rc.nodeName), metav1.GetOptions{}) + node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{}) if fetchErr != nil { glog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr) } else { diff --git a/pkg/kubelet/volumemanager/volume_manager_test.go b/pkg/kubelet/volumemanager/volume_manager_test.go index 6eb81cb7f74..7632a5f9430 100644 --- a/pkg/kubelet/volumemanager/volume_manager_test.go +++ b/pkg/kubelet/volumemanager/volume_manager_test.go @@ -324,11 +324,11 @@ func delayClaimBecomesBound( ) { time.Sleep(500 * time.Millisecond) volumeClaim, _ := - kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) volumeClaim.Status = v1.PersistentVolumeClaimStatus{ Phase: v1.ClaimBound, } - kubeClient.Core().PersistentVolumeClaims(namespace).Update(volumeClaim) + kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(volumeClaim) return } diff --git a/pkg/quota/evaluator/core/configmap.go b/pkg/quota/evaluator/core/configmap.go index bde20fc7247..dc446c8ebab 100644 --- a/pkg/quota/evaluator/core/configmap.go +++ b/pkg/quota/evaluator/core/configmap.go @@ -33,7 +33,7 @@ func listConfigMapsByNamespaceFuncUsingClient(kubeClient clientset.Interface) ge // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require // structured objects. return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().ConfigMaps(namespace).List(options) + itemList, err := kubeClient.CoreV1().ConfigMaps(namespace).List(options) if err != nil { return nil, err } diff --git a/pkg/quota/evaluator/core/persistent_volume_claims.go b/pkg/quota/evaluator/core/persistent_volume_claims.go index 9759c492e68..c72ff7307b5 100644 --- a/pkg/quota/evaluator/core/persistent_volume_claims.go +++ b/pkg/quota/evaluator/core/persistent_volume_claims.go @@ -73,7 +73,7 @@ func listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient clientset.I // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require // structured objects. return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().PersistentVolumeClaims(namespace).List(options) + itemList, err := kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(options) if err != nil { return nil, err } diff --git a/pkg/quota/evaluator/core/pods.go b/pkg/quota/evaluator/core/pods.go index 7354c3d8bf2..223e18f4397 100644 --- a/pkg/quota/evaluator/core/pods.go +++ b/pkg/quota/evaluator/core/pods.go @@ -66,7 +66,7 @@ func listPodsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic. // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require // structured objects. return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().Pods(namespace).List(options) + itemList, err := kubeClient.CoreV1().Pods(namespace).List(options) if err != nil { return nil, err } diff --git a/pkg/quota/evaluator/core/replication_controllers.go b/pkg/quota/evaluator/core/replication_controllers.go index 06261460834..4fdcfc0652b 100644 --- a/pkg/quota/evaluator/core/replication_controllers.go +++ b/pkg/quota/evaluator/core/replication_controllers.go @@ -33,7 +33,7 @@ func listReplicationControllersByNamespaceFuncUsingClient(kubeClient clientset.I // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require // structured objects. return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().ReplicationControllers(namespace).List(options) + itemList, err := kubeClient.CoreV1().ReplicationControllers(namespace).List(options) if err != nil { return nil, err } diff --git a/pkg/quota/evaluator/core/resource_quotas.go b/pkg/quota/evaluator/core/resource_quotas.go index 50033e068c2..8161f799193 100644 --- a/pkg/quota/evaluator/core/resource_quotas.go +++ b/pkg/quota/evaluator/core/resource_quotas.go @@ -33,7 +33,7 @@ func listResourceQuotasByNamespaceFuncUsingClient(kubeClient clientset.Interface // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require // structured objects. return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().ResourceQuotas(namespace).List(options) + itemList, err := kubeClient.CoreV1().ResourceQuotas(namespace).List(options) if err != nil { return nil, err } diff --git a/pkg/quota/evaluator/core/secrets.go b/pkg/quota/evaluator/core/secrets.go index bfd678c95aa..b9b3cd72f5c 100644 --- a/pkg/quota/evaluator/core/secrets.go +++ b/pkg/quota/evaluator/core/secrets.go @@ -33,7 +33,7 @@ func listSecretsByNamespaceFuncUsingClient(kubeClient clientset.Interface) gener // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require // structured objects. return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().Secrets(namespace).List(options) + itemList, err := kubeClient.CoreV1().Secrets(namespace).List(options) if err != nil { return nil, err } diff --git a/pkg/quota/evaluator/core/services.go b/pkg/quota/evaluator/core/services.go index 91a5ef0a059..288338daa25 100644 --- a/pkg/quota/evaluator/core/services.go +++ b/pkg/quota/evaluator/core/services.go @@ -48,7 +48,7 @@ func listServicesByNamespaceFuncUsingClient(kubeClient clientset.Interface) gene // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require // structured objects. return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().Services(namespace).List(options) + itemList, err := kubeClient.CoreV1().Services(namespace).List(options) if err != nil { return nil, err } diff --git a/pkg/util/node/node.go b/pkg/util/node/node.go index 0138a56b599..72790e4c8e8 100644 --- a/pkg/util/node/node.go +++ b/pkg/util/node/node.go @@ -146,7 +146,7 @@ func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.N if err != nil { return nil } - _, err = c.Core().Nodes().PatchStatus(string(node), patch) + _, err = c.CoreV1().Nodes().PatchStatus(string(node), patch) return err } diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index 1e0d51d6cb1..661ad3f059c 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -144,7 +144,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { // TODO: caching, currently it is overkill because it calls this function // only when it creates dynamic PV zones := make(sets.String) - nodes, err := kubeClient.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { glog.V(2).Infof("Error listing nodes") return zones, err diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index eeea9f61ca1..25e25c04d28 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -149,7 +149,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu if kubeClient == nil { return nil, fmt.Errorf("failed to get kube client to initialize mounter") } - ep, err := kubeClient.Core().Endpoints(podNs).Get(epName, metav1.GetOptions{}) + ep, err := kubeClient.CoreV1().Endpoints(podNs).Get(epName, metav1.GetOptions{}) if err != nil { glog.Errorf("failed to get endpoints %s[%v]", epName, err) return nil, err @@ -493,7 +493,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll if kubeClient == nil { return fmt.Errorf("failed to get kube client when collecting gids") } - pvList, err := kubeClient.Core().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { glog.Errorf("failed to get existing persistent volumes") return err @@ -814,7 +814,7 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS if kubeClient == nil { return nil, nil, fmt.Errorf("failed to get kube client when creating endpoint service") } - _, err = kubeClient.Core().Endpoints(namespace).Create(endpoint) + _, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint) if err != nil && errors.IsAlreadyExists(err) { glog.V(1).Infof("endpoint [%s] already exist in namespace [%s]", endpoint, namespace) err = nil @@ -834,7 +834,7 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Protocol: "TCP", Port: 1}}}} - _, err = kubeClient.Core().Services(namespace).Create(service) + _, err = kubeClient.CoreV1().Services(namespace).Create(service) if err != nil && errors.IsAlreadyExists(err) { glog.V(1).Infof("service [%s] already exist in namespace [%s]", service, namespace) err = nil @@ -851,7 +851,7 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi if kubeClient == nil { return fmt.Errorf("failed to get kube client when deleting endpoint service") } - err = kubeClient.Core().Services(namespace).Delete(epServiceName, nil) + err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil) if err != nil { glog.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err) return fmt.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err) diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index b3cf958a349..49efdaf097a 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -200,7 +200,7 @@ func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.Vol if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err diff --git a/pkg/volume/testing/testing.go b/pkg/volume/testing/testing.go index 7c6f78481fc..f083ddc6261 100644 --- a/pkg/volume/testing/testing.go +++ b/pkg/volume/testing/testing.go @@ -147,7 +147,7 @@ func (f *fakeVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) { func (f *fakeVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) { return func(namespace, name string) (*v1.Secret, error) { - return f.kubeClient.Core().Secrets(namespace).Get(name, metav1.GetOptions{}) + return f.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) } } @@ -157,7 +157,7 @@ func (f *fakeVolumeHost) GetExec(pluginName string) mount.Exec { func (f *fakeVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) { return func(namespace, name string) (*v1.ConfigMap, error) { - return f.kubeClient.Core().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + return f.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) } } diff --git a/pkg/volume/util.go b/pkg/volume/util.go index 14962f4abde..e2890516980 100644 --- a/pkg/volume/util.go +++ b/pkg/volume/util.go @@ -187,15 +187,15 @@ type realRecyclerClient struct { } func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) { - return c.client.Core().Pods(pod.Namespace).Create(pod) + return c.client.CoreV1().Pods(pod.Namespace).Create(pod) } func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { - return c.client.Core().Pods(namespace).Get(name, metav1.GetOptions{}) + return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) } func (c *realRecyclerClient) DeletePod(name, namespace string) error { - return c.client.Core().Pods(namespace).Delete(name, nil) + return c.client.CoreV1().Pods(namespace).Delete(name, nil) } func (c *realRecyclerClient) Event(eventtype, message string) { @@ -212,13 +212,13 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s Watch: true, } - podWatch, err := c.client.Core().Pods(namespace).Watch(options) + podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options) if err != nil { return nil, err } eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name) - eventWatch, err := c.client.Core().Events(namespace).Watch(metav1.ListOptions{ + eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{ FieldSelector: eventSelector.String(), Watch: true, }) diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index e7e289b93fa..b09e09ed20e 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -664,7 +664,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( } // Fetch current node object - node, fetchErr := og.kubeClient.Core().Nodes().Get(string(nodeName), metav1.GetOptions{}) + node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{}) if fetchErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateErrorDetailed("VerifyControllerAttachedVolume failed fetching node from API server", fetchErr) @@ -698,7 +698,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( func (og *operationGenerator) verifyVolumeIsSafeToDetach( volumeToDetach AttachedVolume) error { // Fetch current node object - node, fetchErr := og.kubeClient.Core().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{}) + node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{}) if fetchErr != nil { if errors.IsNotFound(fetchErr) { glog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", "")) diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index 4d05c0967c2..ad4ac6a0829 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -147,7 +147,7 @@ func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interf if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{}) if err != nil { return secret, err } @@ -163,7 +163,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) if err != nil { return secret, err } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index 20e7bd88d17..f220237369b 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -165,7 +165,7 @@ func (l *lifecycle) Admit(a admission.Attributes) error { // refuse to operate on non-existent namespaces if !exists || forceLiveLookup { // as a last resort, make a call directly to storage - namespace, err = l.client.Core().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) + namespace, err = l.client.CoreV1().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) switch { case errors.IsNotFound(err): return err diff --git a/test/e2e/apimachinery/chunking.go b/test/e2e/apimachinery/chunking.go index 3892ad353be..063a7bd4dac 100644 --- a/test/e2e/apimachinery/chunking.go +++ b/test/e2e/apimachinery/chunking.go @@ -37,7 +37,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { It("should return chunks of results for list calls", func() { ns := f.Namespace.Name c := f.ClientSet - client := c.Core().PodTemplates(ns) + client := c.CoreV1().PodTemplates(ns) By("creating a large number of resources") workqueue.Parallelize(20, numberOfTotalResources, func(i int) { diff --git a/test/e2e/apimachinery/etcd_failure.go b/test/e2e/apimachinery/etcd_failure.go index aa35810f1e1..1d035297b06 100644 --- a/test/e2e/apimachinery/etcd_failure.go +++ b/test/e2e/apimachinery/etcd_failure.go @@ -103,7 +103,7 @@ func masterExec(cmd string) { func checkExistingRCRecovers(f *framework.Framework) { By("assert that the pre-existing replication controller recovers") - podClient := f.ClientSet.Core().Pods(f.Namespace.Name) + podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) rcSelector := labels.Set{"name": "baz"}.AsSelector() By("deleting pods from existing replication controller") diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index ee0ecd038e1..3d091cd1ffb 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -219,8 +219,8 @@ func newGCPod(name string) *v1.Pod { // controllers and pods are rcNum and podNum. It returns error if the // communication with the API server fails. func verifyRemainingReplicationControllersPods(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) { - rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) - pods, err := clientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) + pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) } @@ -264,7 +264,7 @@ func verifyRemainingCronJobsJobsPods(f *framework.Framework, clientSet clientset By(fmt.Sprintf("expected %d jobs, got %d jobs", jobNum, len(jobs.Items))) } - pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) } @@ -332,8 +332,8 @@ var _ = SIGDescribe("Garbage collector", func() { f := framework.NewDefaultFramework("gc") It("should delete pods created by rc when not orphaning", func() { clientSet := f.ClientSet - rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) - podClient := clientSet.Core().Pods(f.Namespace.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) + podClient := clientSet.CoreV1().Pods(f.Namespace.Name) rcName := "simpletest.rc" // TODO: find better way to keep this label unique in the test uniqLabels := map[string]string{"gctest": "delete_pods"} @@ -385,8 +385,8 @@ var _ = SIGDescribe("Garbage collector", func() { It("should orphan pods created by rc if delete options say so", func() { clientSet := f.ClientSet - rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) - podClient := clientSet.Core().Pods(f.Namespace.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) + podClient := clientSet.CoreV1().Pods(f.Namespace.Name) rcName := "simpletest.rc" // TODO: find better way to keep this label unique in the test uniqLabels := map[string]string{"gctest": "orphan_pods"} @@ -454,8 +454,8 @@ var _ = SIGDescribe("Garbage collector", func() { It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() { clientSet := f.ClientSet - rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) - podClient := clientSet.Core().Pods(f.Namespace.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) + podClient := clientSet.CoreV1().Pods(f.Namespace.Name) rcName := "simpletest.rc" // TODO: find better way to keep this label unique in the test uniqLabels := map[string]string{"gctest": "orphan_pods_nil_option"} @@ -619,8 +619,8 @@ var _ = SIGDescribe("Garbage collector", func() { It("should keep the rc around until all its pods are deleted if the deleteOptions says so", func() { clientSet := f.ClientSet - rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) - podClient := clientSet.Core().Pods(f.Namespace.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) + podClient := clientSet.CoreV1().Pods(f.Namespace.Name) rcName := "simpletest.rc" // TODO: find better way to keep this label unique in the test uniqLabels := map[string]string{"gctest": "delete_pods_foreground"} @@ -703,8 +703,8 @@ var _ = SIGDescribe("Garbage collector", func() { // TODO: this should be an integration test It("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() { clientSet := f.ClientSet - rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) - podClient := clientSet.Core().Pods(f.Namespace.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) + podClient := clientSet.CoreV1().Pods(f.Namespace.Name) rc1Name := "simpletest-rc-to-be-deleted" replicas := int32(estimateMaximumPods(clientSet, 10, 100)) halfReplicas := int(replicas / 2) @@ -814,7 +814,7 @@ var _ = SIGDescribe("Garbage collector", func() { // TODO: should be an integration test It("should not be blocked by dependency circle", func() { clientSet := f.ClientSet - podClient := clientSet.Core().Pods(f.Namespace.Name) + podClient := clientSet.CoreV1().Pods(f.Namespace.Name) pod1 := newGCPod("pod1") pod1, err := podClient.Create(pod1) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/apimachinery/generated_clientset.go b/test/e2e/apimachinery/generated_clientset.go index 6c1ae3972eb..0a919d17c84 100644 --- a/test/e2e/apimachinery/generated_clientset.go +++ b/test/e2e/apimachinery/generated_clientset.go @@ -151,7 +151,7 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool) var _ = SIGDescribe("Generated clientset", func() { f := framework.NewDefaultFramework("clientset") It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func() { - podClient := f.ClientSet.Core().Pods(f.Namespace.Name) + podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) By("constructing the pod") name := "pod" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) diff --git a/test/e2e/apimachinery/initializers.go b/test/e2e/apimachinery/initializers.go index 34847f70405..1ef358ba360 100644 --- a/test/e2e/apimachinery/initializers.go +++ b/test/e2e/apimachinery/initializers.go @@ -52,14 +52,14 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() { ch := make(chan struct{}) go func() { - _, err := c.Core().Pods(ns).Create(newUninitializedPod(podName)) + _, err := c.CoreV1().Pods(ns).Create(newUninitializedPod(podName)) Expect(err).NotTo(HaveOccurred()) close(ch) }() // wait to ensure the scheduler does not act on an uninitialized pod err := wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) { - p, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{}) + p, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return false, nil @@ -71,23 +71,23 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() { Expect(err).To(Equal(wait.ErrWaitTimeout)) // verify that we can update an initializing pod - pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) pod.Annotations = map[string]string{"update-1": "test"} - pod, err = c.Core().Pods(ns).Update(pod) + pod, err = c.CoreV1().Pods(ns).Update(pod) Expect(err).NotTo(HaveOccurred()) // verify the list call filters out uninitialized pods - pods, err := c.Core().Pods(ns).List(metav1.ListOptions{IncludeUninitialized: true}) + pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{IncludeUninitialized: true}) Expect(err).NotTo(HaveOccurred()) Expect(pods.Items).To(HaveLen(1)) - pods, err = c.Core().Pods(ns).List(metav1.ListOptions{}) + pods, err = c.CoreV1().Pods(ns).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(pods.Items).To(HaveLen(0)) // clear initializers pod.Initializers = nil - pod, err = c.Core().Pods(ns).Update(pod) + pod, err = c.CoreV1().Pods(ns).Update(pod) Expect(err).NotTo(HaveOccurred()) // pod should now start running @@ -98,12 +98,12 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() { <-ch // verify that we cannot start the pod initializing again - pod, err = c.Core().Pods(ns).Get(podName, metav1.GetOptions{}) + pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) pod.Initializers = &metav1.Initializers{ Pending: []metav1.Initializer{{Name: "Other"}}, } - _, err = c.Core().Pods(ns).Update(pod) + _, err = c.CoreV1().Pods(ns).Update(pod) if !errors.IsInvalid(err) || !strings.Contains(err.Error(), "immutable") { Fail(fmt.Sprintf("expected invalid error: %v", err)) } @@ -145,7 +145,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() { ch := make(chan struct{}) go func() { defer close(ch) - _, err := c.Core().Pods(ns).Create(newInitPod(podName)) + _, err := c.CoreV1().Pods(ns).Create(newInitPod(podName)) Expect(err).NotTo(HaveOccurred()) }() @@ -153,7 +153,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() { By("Waiting until the pod is visible to a client") var pod *v1.Pod err = wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) { - pod, err = c.Core().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true}) + pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true}) if errors.IsNotFound(err) { return false, nil } @@ -170,7 +170,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() { // pretend we are an initializer By("Completing initialization") pod.Initializers = nil - pod, err = c.Core().Pods(ns).Update(pod) + pod, err = c.CoreV1().Pods(ns).Update(pod) Expect(err).NotTo(HaveOccurred()) // ensure create call returns @@ -185,7 +185,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() { podName = "preinitialized-pod" pod = newUninitializedPod(podName) pod.Initializers.Pending = nil - pod, err = c.Core().Pods(ns).Create(pod) + pod, err = c.CoreV1().Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) Expect(pod.Initializers).To(BeNil()) @@ -197,7 +197,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() { v1.MirrorPodAnnotationKey: "true", } pod.Spec.NodeName = "node-does-not-yet-exist" - pod, err = c.Core().Pods(ns).Create(pod) + pod, err = c.CoreV1().Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) Expect(pod.Initializers).To(BeNil()) Expect(pod.Annotations[v1.MirrorPodAnnotationKey]).To(Equal("true")) @@ -259,7 +259,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() { LabelSelector: selector.String(), IncludeUninitialized: true, } - pods, err := c.Core().Pods(ns).List(listOptions) + pods, err := c.CoreV1().Pods(ns).List(listOptions) Expect(err).NotTo(HaveOccurred()) Expect(len(pods.Items)).Should(Equal(1)) }) @@ -349,7 +349,7 @@ func newInitPod(podName string) *v1.Pod { // removeInitializersFromAllPods walks all pods and ensures they don't have the provided initializer, // to guarantee completing the test doesn't block the entire cluster. func removeInitializersFromAllPods(c clientset.Interface, initializerName string) { - pods, err := c.Core().Pods("").List(metav1.ListOptions{IncludeUninitialized: true}) + pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{IncludeUninitialized: true}) if err != nil { return } @@ -358,7 +358,7 @@ func removeInitializersFromAllPods(c clientset.Interface, initializerName string continue } err := clientretry.RetryOnConflict(clientretry.DefaultRetry, func() error { - pod, err := c.Core().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{IncludeUninitialized: true}) + pod, err := c.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{IncludeUninitialized: true}) if err != nil { if errors.IsNotFound(err) { return nil @@ -382,7 +382,7 @@ func removeInitializersFromAllPods(c clientset.Interface, initializerName string pod.Initializers = nil } framework.Logf("Found initializer on pod %s in ns %s", pod.Name, pod.Namespace) - _, err = c.Core().Pods(p.Namespace).Update(pod) + _, err = c.CoreV1().Pods(p.Namespace).Update(pod) return err }) if err != nil { diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index 28896564588..0542113b7b3 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -62,7 +62,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, func() (bool, error) { var cnt = 0 - nsList, err := f.ClientSet.Core().Namespaces().List(metav1.ListOptions{}) + nsList, err := f.ClientSet.CoreV1().Namespaces().List(metav1.ListOptions{}) if err != nil { return false, err } @@ -83,7 +83,7 @@ func waitForPodInNamespace(c clientset.Interface, ns, podName string) *v1.Pod { var pod *v1.Pod var err error err = wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) { - pod, err = c.Core().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true}) + pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true}) if errors.IsNotFound(err) { return false, nil } @@ -119,7 +119,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }, }, } - pod, err = f.ClientSet.Core().Pods(namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Waiting for the pod to have running status") @@ -141,21 +141,21 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }, } go func() { - _, err = f.ClientSet.Core().Pods(namespace.Name).Create(podB) + _, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(podB) // This error is ok, because we will delete the pod before it completes initialization framework.Logf("error from create uninitialized namespace: %v", err) }() podB = waitForPodInNamespace(f.ClientSet, namespace.Name, podB.Name) By("Deleting the namespace") - err = f.ClientSet.Core().Namespaces().Delete(namespace.Name, nil) + err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { - _, err = f.ClientSet.Core().Namespaces().Get(namespace.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { return true, nil } @@ -167,9 +167,9 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) By("Verifying there are no pods in the namespace") - _, err = f.ClientSet.Core().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{}) Expect(err).To(HaveOccurred()) - _, err = f.ClientSet.Core().Pods(namespace.Name).Get(podB.Name, metav1.GetOptions{IncludeUninitialized: true}) + _, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(podB.Name, metav1.GetOptions{IncludeUninitialized: true}) Expect(err).To(HaveOccurred()) } @@ -202,18 +202,18 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { }}, }, } - service, err = f.ClientSet.Core().Services(namespace.Name).Create(service) + service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service) Expect(err).NotTo(HaveOccurred()) By("Deleting the namespace") - err = f.ClientSet.Core().Namespaces().Delete(namespace.Name, nil) + err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Waiting for the namespace to be removed.") maxWaitSeconds := int64(60) framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second, func() (bool, error) { - _, err = f.ClientSet.Core().Namespaces().Get(namespace.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { return true, nil } @@ -225,7 +225,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) By("Verifying there is no service in the namespace") - _, err = f.ClientSet.Core().Services(namespace.Name).Get(service.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{}) Expect(err).To(HaveOccurred()) } diff --git a/test/e2e/apimachinery/table_conversion.go b/test/e2e/apimachinery/table_conversion.go index a3dc3bd6e4d..ffa1a6b77aa 100644 --- a/test/e2e/apimachinery/table_conversion.go +++ b/test/e2e/apimachinery/table_conversion.go @@ -43,11 +43,11 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { podName := "pod-1" framework.Logf("Creating pod %s", podName) - _, err := c.Core().Pods(ns).Create(newTablePod(podName)) + _, err := c.CoreV1().Pods(ns).Create(newTablePod(podName)) Expect(err).NotTo(HaveOccurred()) table := &metav1alpha1.Table{} - err = c.Core().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table) + err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table) Expect(err).NotTo(HaveOccurred()) framework.Logf("Table: %#v", table) @@ -67,7 +67,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { c := f.ClientSet table := &metav1alpha1.Table{} - err := c.Core().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table) + err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table) Expect(err).NotTo(HaveOccurred()) framework.Logf("Table: %#v", table) @@ -85,7 +85,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() { c := f.ClientSet table := &metav1alpha1.Table{} - err := c.Core().RESTClient().Get().Resource("services").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table) + err := c.CoreV1().RESTClient().Get().Resource("services").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table) Expect(err).To(HaveOccurred()) Expect(err.(errors.APIStatus).Status().Code).To(Equal(int32(406))) }) diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index 571b4ef8c82..8b4e70080b2 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -436,7 +436,7 @@ func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reaso if err != nil { return fmt.Errorf("Error in getting cronjob %s/%s: %v", ns, cronJobName, err) } - events, err := c.Core().Events(ns).Search(legacyscheme.Scheme, sj) + events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj) if err != nil { return fmt.Errorf("Error in listing events: %s", err) } diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index 20b4c0c97bc..85266680ee7 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -164,7 +164,7 @@ func replacePods(pods []*v1.Pod, store cache.Store) { // and a list of nodenames across which these containers restarted. func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) { options := metav1.ListOptions{LabelSelector: labelSelector.String()} - pods, err := c.Core().Pods(ns).List(options) + pods, err := c.CoreV1().Pods(ns).List(options) framework.ExpectNoError(err) failedContainers := 0 containerRestartNodes := sets.NewString() @@ -215,12 +215,12 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labelSelector.String() - obj, err := f.ClientSet.Core().Pods(ns).List(options) + obj, err := f.ClientSet.CoreV1().Pods(ns).List(options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labelSelector.String() - return f.ClientSet.Core().Pods(ns).Watch(options) + return f.ClientSet.CoreV1().Pods(ns).Watch(options) }, }, &v1.Pod{}, diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index b9981d77b68..e43849edd40 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -85,7 +85,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { } else { framework.Logf("unable to dump daemonsets: %v", err) } - if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { + if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { framework.Logf("pods: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), pods)) } else { framework.Logf("unable to dump pods: %v", err) @@ -126,7 +126,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { By("Stop a daemon pod, check that the daemon pod is revived.") podList := listDaemonPods(c, ns, label) pod := podList.Items[0] - err = c.Core().Pods(ns).Delete(pod.Name, nil) + err = c.CoreV1().Pods(ns).Delete(pod.Name, nil) Expect(err).NotTo(HaveOccurred()) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive") @@ -243,7 +243,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { pod := podList.Items[0] pod.ResourceVersion = "" pod.Status.Phase = v1.PodFailed - _, err = c.Core().Pods(ns).UpdateStatus(&pod) + _, err = c.CoreV1().Pods(ns).UpdateStatus(&pod) Expect(err).NotTo(HaveOccurred(), "error failing a daemon pod") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive") @@ -549,7 +549,7 @@ func newDaemonSet(dsName, image string, label map[string]string) *extensions.Dae func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *v1.PodList { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.Core().Pods(ns).List(options) + podList, err := c.CoreV1().Pods(ns).List(options) Expect(err).NotTo(HaveOccurred()) Expect(len(podList.Items)).To(BeNumerically(">", 0)) return podList @@ -580,7 +580,7 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error { } func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) { - nodeClient := c.Core().Nodes() + nodeClient := c.CoreV1().Nodes() var newNode *v1.Node var newLabels map[string]string err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, func() (bool, error) { @@ -621,7 +621,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nodeNames []string) func() (bool, error) { return func() (bool, error) { - podList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { framework.Logf("could not get the pod list: %v", err) return false, nil @@ -660,7 +660,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nod func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) { return func() (bool, error) { - nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) nodeNames := make([]string, 0) for _, node := range nodeList.Items { @@ -717,7 +717,7 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error { func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.DaemonSet, image string, maxUnavailable int) func() (bool, error) { return func() (bool, error) { - podList, err := c.Core().Pods(ds.Namespace).List(metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{}) if err != nil { return false, err } diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 147adac8916..111d8760ea3 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -145,7 +145,7 @@ func failureTrap(c clientset.Interface, ns string) { framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err) } options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.Core().Pods(rs.Namespace).List(options) + podList, err := c.CoreV1().Pods(rs.Namespace).List(options) for _, pod := range podList.Items { framework.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod)) } @@ -191,7 +191,7 @@ func stopDeployment(c clientset.Interface, internalClient internalclientset.Inte framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) var pods *v1.PodList if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { - pods, err = c.Core().Pods(ns).List(options) + pods, err = c.CoreV1().Pods(ns).List(options) if err != nil { return false, err } @@ -342,7 +342,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { deploymentName := "test-cleanup-deployment" framework.Logf("Creating deployment %s", deploymentName) - pods, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err) options := metav1.ListOptions{ @@ -350,7 +350,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { } stopCh := make(chan struct{}) defer close(stopCh) - w, err := c.Core().Pods(ns).Watch(options) + w, err := c.CoreV1().Pods(ns).Watch(options) Expect(err).NotTo(HaveOccurred()) go func() { // There should be only one pod being created, which is the pod with the redis image. @@ -947,7 +947,7 @@ func testIterativeDeployments(f *framework.Framework) { selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) Expect(err).NotTo(HaveOccurred()) opts := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.Core().Pods(ns).List(opts) + podList, err := c.CoreV1().Pods(ns).List(opts) Expect(err).NotTo(HaveOccurred()) if len(podList.Items) == 0 { framework.Logf("%02d: no deployment pods to delete", i) @@ -959,7 +959,7 @@ func testIterativeDeployments(f *framework.Framework) { } name := podList.Items[p].Name framework.Logf("%02d: deleting deployment pod %q", i, name) - err := c.Core().Pods(ns).Delete(name, nil) + err := c.CoreV1().Pods(ns).Delete(name, nil) if err != nil && !errors.IsNotFound(err) { Expect(err).NotTo(HaveOccurred()) } diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 8d61c3f4806..6679fbfd350 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -103,7 +103,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri By(fmt.Sprintf("Creating replication controller %s", name)) newRC := newRC(name, replicas, map[string]string{"name": name}, name, image) newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} - _, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(newRC) + _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC) Expect(err).NotTo(HaveOccurred()) // Check that pods for the new RC were created. @@ -121,7 +121,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri } err = f.WaitForPodRunning(pod.Name) if err != nil { - updatePod, getErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) if getErr == nil { err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) } else { @@ -160,11 +160,11 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name) quota := newPodQuota(name, "2") - _, err := c.Core().ResourceQuotas(namespace).Create(quota) + _, err := c.CoreV1().ResourceQuotas(namespace).Create(quota) Expect(err).NotTo(HaveOccurred()) err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - quota, err = c.Core().ResourceQuotas(namespace).Get(name, metav1.GetOptions{}) + quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{}) if err != nil { return false, err } @@ -179,14 +179,14 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name)) rc := newRC(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage) - rc, err = c.Core().ReplicationControllers(namespace).Create(rc) + rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name)) generation := rc.Generation conditions := rc.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - rc, err = c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) + rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) if err != nil { return false, err } @@ -215,7 +215,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) { generation = rc.Generation conditions = rc.Status.Conditions err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - rc, err = c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) + rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) if err != nil { return false, err } @@ -258,12 +258,12 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) { replicas := int32(1) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName) rcSt.Spec.Selector = map[string]string{"name": name} - rc, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rcSt) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt) Expect(err).NotTo(HaveOccurred()) By("Then the orphan pod is adopted") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the RC if errors.IsNotFound(err) { return true, nil @@ -287,7 +287,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { replicas := int32(1) rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName) rcSt.Spec.Selector = map[string]string{"name": name} - rc, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rcSt) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt) Expect(err).NotTo(HaveOccurred()) By("When the matched label of one of its pods change") @@ -296,11 +296,11 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { p := pods.Items[0] err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) pod.Labels = map[string]string{"name": "not-matching-name"} - _, err = f.ClientSet.Core().Pods(f.Namespace.Name).Update(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod) if err != nil && errors.IsConflict(err) { return false, nil } @@ -313,7 +313,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { By("Then the pod is released") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) for _, owner := range p2.OwnerReferences { if *owner.Controller && owner.UID == rc.UID { diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index d4afd7bd644..e2d00f261bd 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -129,7 +129,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s } err = f.WaitForPodRunning(pod.Name) if err != nil { - updatePod, getErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) if getErr == nil { err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) } else { @@ -168,11 +168,11 @@ func testReplicaSetConditionCheck(f *framework.Framework) { By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name)) quota := newPodQuota(name, "2") - _, err := c.Core().ResourceQuotas(namespace).Create(quota) + _, err := c.CoreV1().ResourceQuotas(namespace).Create(quota) Expect(err).NotTo(HaveOccurred()) err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - quota, err = c.Core().ResourceQuotas(namespace).Get(name, metav1.GetOptions{}) + quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{}) if err != nil { return false, err } @@ -272,7 +272,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { By("Then the orphan pod is adopted") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) // The Pod p should either be adopted or deleted by the ReplicaSet if errors.IsNotFound(err) { return true, nil @@ -295,11 +295,11 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { p = &pods.Items[0] err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) pod.Labels = map[string]string{"name": "not-matching-name"} - _, err = f.ClientSet.Core().Pods(f.Namespace.Name).Update(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod) if err != nil && errors.IsConflict(err) { return false, nil } @@ -312,7 +312,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { By("Then the pod is released") err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { - p2, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) + p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) for _, owner := range p2.OwnerReferences { if *owner.Controller && owner.UID == rs.UID { diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index eef79cb43a2..da918169964 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -77,7 +77,7 @@ var _ = SIGDescribe("StatefulSet", func() { By("Creating service " + headlessSvcName + " in namespace " + ns) headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels) - _, err := c.Core().Services(ns).Create(headlessService) + _, err := c.CoreV1().Services(ns).Create(headlessService) Expect(err).NotTo(HaveOccurred()) }) @@ -650,7 +650,7 @@ var _ = SIGDescribe("StatefulSet", func() { It("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() { psLabels := klabels.Set(labels) By("Initializing watcher for selector " + psLabels.String()) - watcher, err := f.ClientSet.Core().Pods(ns).Watch(metav1.ListOptions{ + watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{ LabelSelector: psLabels.AsSelector().String(), }) Expect(err).NotTo(HaveOccurred()) @@ -692,7 +692,7 @@ var _ = SIGDescribe("StatefulSet", func() { Expect(err).NotTo(HaveOccurred()) By("Scale down will halt with unhealthy stateful pod") - watcher, err = f.ClientSet.Core().Pods(ns).Watch(metav1.ListOptions{ + watcher, err = f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{ LabelSelector: psLabels.AsSelector().String(), }) Expect(err).NotTo(HaveOccurred()) @@ -785,7 +785,7 @@ var _ = SIGDescribe("StatefulSet", func() { NodeName: node.Name, }, } - pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name) @@ -803,7 +803,7 @@ var _ = SIGDescribe("StatefulSet", func() { var initialStatefulPodUID types.UID By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name) - w, err := f.ClientSet.Core().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName})) + w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName})) framework.ExpectNoError(err) // we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once _, err = watch.Until(framework.StatefulPodTimeout, w, func(event watch.Event) (bool, error) { @@ -826,13 +826,13 @@ var _ = SIGDescribe("StatefulSet", func() { } By("Removing pod with conflicting port in namespace " + f.Namespace.Name) - err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state") // we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until Eventually(func() error { - statefulPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{}) + statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index 12fa3f336b6..1f516c62186 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -326,27 +326,27 @@ var _ = SIGDescribe("Advanced Audit [Feature:Audit]", func() { }, } - _, err := f.ClientSet.Core().ConfigMaps(namespace).Create(configMap) + _, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Create(configMap) framework.ExpectNoError(err, "failed to create audit-configmap") - _, err = f.ClientSet.Core().ConfigMaps(namespace).Get(configMap.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Get(configMap.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get audit-configmap") - configMapChan, err := f.ClientSet.Core().ConfigMaps(namespace).Watch(watchOptions) + configMapChan, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Watch(watchOptions) framework.ExpectNoError(err, "failed to create watch for config maps") for range configMapChan.ResultChan() { } - _, err = f.ClientSet.Core().ConfigMaps(namespace).Update(configMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Update(configMap) framework.ExpectNoError(err, "failed to update audit-configmap") - _, err = f.ClientSet.Core().ConfigMaps(namespace).Patch(configMap.Name, types.JSONPatchType, patch) + _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Patch(configMap.Name, types.JSONPatchType, patch) framework.ExpectNoError(err, "failed to patch configmap") - _, err = f.ClientSet.Core().ConfigMaps(namespace).List(metav1.ListOptions{}) + _, err = f.ClientSet.CoreV1().ConfigMaps(namespace).List(metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list config maps") - err = f.ClientSet.Core().ConfigMaps(namespace).Delete(configMap.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(namespace).Delete(configMap.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete audit-configmap") }, []auditEvent{ @@ -452,27 +452,27 @@ var _ = SIGDescribe("Advanced Audit [Feature:Audit]", func() { "top-secret": []byte("foo-bar"), }, } - _, err := f.ClientSet.Core().Secrets(namespace).Create(secret) + _, err := f.ClientSet.CoreV1().Secrets(namespace).Create(secret) framework.ExpectNoError(err, "failed to create audit-secret") - _, err = f.ClientSet.Core().Secrets(namespace).Get(secret.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(namespace).Get(secret.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get audit-secret") - secretChan, err := f.ClientSet.Core().Secrets(namespace).Watch(watchOptions) + secretChan, err := f.ClientSet.CoreV1().Secrets(namespace).Watch(watchOptions) framework.ExpectNoError(err, "failed to create watch for secrets") for range secretChan.ResultChan() { } - _, err = f.ClientSet.Core().Secrets(namespace).Update(secret) + _, err = f.ClientSet.CoreV1().Secrets(namespace).Update(secret) framework.ExpectNoError(err, "failed to update audit-secret") - _, err = f.ClientSet.Core().Secrets(namespace).Patch(secret.Name, types.JSONPatchType, patch) + _, err = f.ClientSet.CoreV1().Secrets(namespace).Patch(secret.Name, types.JSONPatchType, patch) framework.ExpectNoError(err, "failed to patch secret") - _, err = f.ClientSet.Core().Secrets(namespace).List(metav1.ListOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(namespace).List(metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list secrets") - err = f.ClientSet.Core().Secrets(namespace).Delete(secret.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(namespace).Delete(secret.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete audit-secret") }, []auditEvent{ @@ -651,7 +651,7 @@ func expectAuditLines(f *framework.Framework, expected []auditEvent) { } // Fetch the log stream. - stream, err := f.ClientSet.Core().RESTClient().Get().AbsPath("/logs/kube-apiserver-audit.log").Stream() + stream, err := f.ClientSet.CoreV1().RESTClient().Get().AbsPath("/logs/kube-apiserver-audit.log").Stream() framework.ExpectNoError(err, "could not read audit log") defer stream.Close() diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 98776d8c4f1..9ac771e0f8d 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -48,7 +48,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { var secrets []v1.ObjectReference framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) { By("waiting for a single token reference") - sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) if apierrors.IsNotFound(err) { framework.Logf("default service account was not found") return false, nil @@ -74,19 +74,19 @@ var _ = SIGDescribe("ServiceAccounts", func() { { By("ensuring the single token reference persists") time.Sleep(2 * time.Second) - sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) framework.ExpectNoError(err) Expect(sa.Secrets).To(Equal(secrets)) } // delete the referenced secret By("deleting the service account token") - framework.ExpectNoError(f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secrets[0].Name, nil)) + framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secrets[0].Name, nil)) // wait for the referenced secret to be removed, and another one autocreated framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { By("waiting for a new token reference") - sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) if err != nil { framework.Logf("error getting default service account: %v", err) return false, err @@ -112,7 +112,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { { By("ensuring the single token reference persists") time.Sleep(2 * time.Second) - sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) framework.ExpectNoError(err) Expect(sa.Secrets).To(Equal(secrets)) } @@ -120,17 +120,17 @@ var _ = SIGDescribe("ServiceAccounts", func() { // delete the reference from the service account By("deleting the reference to the service account token") { - sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) framework.ExpectNoError(err) sa.Secrets = nil - _, updateErr := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Update(sa) + _, updateErr := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Update(sa) framework.ExpectNoError(updateErr) } // wait for another one to be autocreated framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { By("waiting for a new token to be created and added") - sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) if err != nil { framework.Logf("error getting default service account: %v", err) return false, err @@ -152,7 +152,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { { By("ensuring the single token reference persists") time.Sleep(2 * time.Second) - sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) framework.ExpectNoError(err) Expect(sa.Secrets).To(Equal(secrets)) } @@ -165,7 +165,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { // Standard get, update retry loop framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { By("getting the auto-created API token") - sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) if apierrors.IsNotFound(err) { framework.Logf("default service account was not found") return false, nil @@ -179,7 +179,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { return false, nil } for _, secretRef := range sa.Secrets { - secret, err := f.ClientSet.Core().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{}) + secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Error getting secret %s: %v", secretRef.Name, err) continue @@ -253,15 +253,15 @@ var _ = SIGDescribe("ServiceAccounts", func() { falseValue := false mountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount"}, AutomountServiceAccountToken: &trueValue} nomountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "nomount"}, AutomountServiceAccountToken: &falseValue} - mountSA, err = f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Create(mountSA) + mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(mountSA) framework.ExpectNoError(err) - nomountSA, err = f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Create(nomountSA) + nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(nomountSA) framework.ExpectNoError(err) // Standard get, update retry loop framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) { By("getting the auto-created API token") - sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{}) + sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { framework.Logf("mount service account was not found") return false, nil @@ -275,7 +275,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { return false, nil } for _, secretRef := range sa.Secrets { - secret, err := f.ClientSet.Core().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{}) + secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{}) if err != nil { framework.Logf("Error getting secret %s: %v", secretRef.Name, err) continue @@ -365,7 +365,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { AutomountServiceAccountToken: tc.AutomountPodSpec, }, } - createdPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) framework.Logf("created pod %s", tc.PodName) diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index e993aa88e8a..a6fa9ee318a 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun By(fmt.Sprintf("Restoring initial size of the cluster")) setMigSizes(originalSizes) framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, scaleDownTimeout)) - nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) s := time.Now() makeSchedulableLoop: @@ -255,7 +255,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun // annotate all nodes with no-scale-down ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled" - nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{ FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String(), @@ -475,7 +475,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e return err } - _, err = f.ClientSet.Core().Nodes().Patch(string(node.Name), types.StrategicMergePatchType, patchBytes) + _, err = f.ClientSet.CoreV1().Nodes().Patch(string(node.Name), types.StrategicMergePatchType, patchBytes) if err != nil { return err } diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 21b2c41f067..bc535ae7048 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -128,7 +128,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { expectedNodes += size } framework.ExpectNoError(framework.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout)) - nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) s := time.Now() @@ -159,7 +159,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { EventsLoop: for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) { By("Waiting for NotTriggerScaleUp event") - events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(metav1.ListOptions{}) + events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{}) framework.ExpectNoError(err) for _, e := range events.Items { @@ -458,7 +458,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { By(fmt.Sprintf("New nodes: %v\n", newNodesSet)) registeredNodes := sets.NewString() for nodeName := range newNodesSet { - node, err := f.ClientSet.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err == nil && node != nil { registeredNodes.Insert(nodeName) } else { @@ -609,7 +609,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } By("Make remaining nodes unschedulable") - nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) framework.ExpectNoError(err) @@ -685,7 +685,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ngNodes, err := framework.GetGroupNodes(minMig) framework.ExpectNoError(err) Expect(len(ngNodes) == 1).To(BeTrue()) - node, err := f.ClientSet.Core().Nodes().Get(ngNodes[0], metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(ngNodes[0], metav1.GetOptions{}) By(fmt.Sprintf("Target node for scale-down: %s", node.Name)) framework.ExpectNoError(err) @@ -718,7 +718,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { By("Block network connectivity to some nodes to simulate unhealthy cluster") nodesToBreakCount := int(math.Floor(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize)))) - nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) framework.ExpectNoError(err) @@ -762,7 +762,7 @@ func execCmd(args ...string) *exec.Cmd { func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace string, podsPerNode, pdbSize int, verifyFunction func(int)) { increasedSize := manuallyIncreaseClusterSize(f, migSizes) - nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) framework.ExpectNoError(err) @@ -984,7 +984,7 @@ func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, time // WaitForClusterSizeFuncWithUnready waits until the cluster size matches the given function and assumes some unready nodes. func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration, expectedUnready int) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -1011,7 +1011,7 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface, tolerateUnreadyCount int) error { var notready []string for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) { - pods, err := c.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { return fmt.Errorf("failed to get pods: %v", err) } @@ -1051,7 +1051,7 @@ func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interf } func getAnyNode(c clientset.Interface) *v1.Node { - nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -1086,10 +1086,10 @@ func drainNode(f *framework.Framework, node *v1.Node) { By("Manually drain the single node") podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} - pods, err := f.ClientSet.Core().Pods(metav1.NamespaceAll).List(podOpts) + pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) framework.ExpectNoError(err) for _, pod := range pods.Items { - err = f.ClientSet.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err) } } @@ -1097,7 +1097,7 @@ func drainNode(f *framework.Framework, node *v1.Node) { func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error { By(fmt.Sprintf("Taint node %s", node.Name)) for j := 0; j < 3; j++ { - freshNode, err := c.Core().Nodes().Get(node.Name, metav1.GetOptions{}) + freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) if err != nil { return err } @@ -1111,7 +1111,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error { Value: "DisabledForTest", Effect: v1.TaintEffectNoSchedule, }) - _, err = c.Core().Nodes().Update(freshNode) + _, err = c.CoreV1().Nodes().Update(freshNode) if err == nil { return nil } @@ -1134,7 +1134,7 @@ func (CriticalAddonsOnlyError) Error() string { func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAddonsOnly bool) error { By(fmt.Sprintf("Remove taint from node %s", node.Name)) for j := 0; j < 3; j++ { - freshNode, err := c.Core().Nodes().Get(node.Name, metav1.GetOptions{}) + freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) if err != nil { return err } @@ -1152,7 +1152,7 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd return nil } freshNode.Spec.Taints = newTaints - _, err = c.Core().Nodes().Update(freshNode) + _, err = c.CoreV1().Nodes().Update(freshNode) if err == nil { return nil } @@ -1181,7 +1181,7 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id if err != nil { return err } - _, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) if err != nil { return err } @@ -1205,7 +1205,7 @@ func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods in if err != nil { return err } - _, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) if err != nil { return err } @@ -1287,7 +1287,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa if err != nil { return err } - rc, err := f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) + rc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) if err != nil { return err } @@ -1301,7 +1301,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa // (we retry 409 errors in case rc reference got out of sync) for j := 0; j < 3; j++ { *rc.Spec.Replicas = int32((i + 1) * podsPerNode) - rc, err = f.ClientSet.Core().ReplicationControllers(namespace).Update(rc) + rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(rc) if err == nil { break } @@ -1309,14 +1309,14 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa return err } glog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j) - rc, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) + rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) if err != nil { return err } } err = wait.PollImmediate(5*time.Second, podTimeout, func() (bool, error) { - rc, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) + rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) if err != nil || rc.Status.ReadyReplicas < int32((i+1)*podsPerNode) { return false, nil } diff --git a/test/e2e/autoscaling/custom_metrics_autoscaling.go b/test/e2e/autoscaling/custom_metrics_autoscaling.go index 7a633842bca..24c5a09e584 100644 --- a/test/e2e/autoscaling/custom_metrics_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_autoscaling.go @@ -117,7 +117,7 @@ func createDeploymentsToScale(f *framework.Framework, cs clientset.Interface) er if err != nil { return err } - _, err = cs.Core().Pods(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, 100)) + _, err = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, 100)) if err != nil { return err } @@ -127,7 +127,7 @@ func createDeploymentsToScale(f *framework.Framework, cs clientset.Interface) er func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface) { _ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterDeployment, &metav1.DeleteOptions{}) - _ = cs.Core().Pods(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterPod, &metav1.DeleteOptions{}) + _ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterPod, &metav1.DeleteOptions{}) _ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(dummyDeploymentName, &metav1.DeleteOptions{}) } diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index 0ef2af923da..ad1452dd087 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -248,7 +248,7 @@ func getScheduableCores(nodes []v1.Node) int64 { } func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) { - cm, err := c.Core().ConfigMaps(metav1.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{}) + cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{}) if err != nil { return nil, err } @@ -256,7 +256,7 @@ func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) { } func deleteDNSScalingConfigMap(c clientset.Interface) error { - if err := c.Core().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil { + if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil { return err } framework.Logf("DNS autoscaling ConfigMap deleted.") @@ -282,7 +282,7 @@ func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap { } func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error { - _, err := c.Core().ConfigMaps(metav1.NamespaceSystem).Update(configMap) + _, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(configMap) if err != nil { return err } @@ -308,7 +308,7 @@ func getDNSReplicas(c clientset.Interface) (int, error) { func deleteDNSAutoscalerPod(c clientset.Interface) error { label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.Core().Pods(metav1.NamespaceSystem).List(listOpts) + pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts) if err != nil { return err } @@ -317,7 +317,7 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error { } podName := pods.Items[0].Name - if err := c.Core().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil { + if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil { return err } framework.Logf("DNS autoscaling pod %v deleted.", podName) diff --git a/test/e2e/common/apparmor.go b/test/e2e/common/apparmor.go index 385bac4aad9..696963dbaf7 100644 --- a/test/e2e/common/apparmor.go +++ b/test/e2e/common/apparmor.go @@ -160,7 +160,7 @@ profile %s flags=(attach_disconnected) { profileName: profile, }, } - _, err := f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(cm) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm) framework.ExpectNoError(err, "Failed to create apparmor-profiles ConfigMap") } @@ -228,7 +228,7 @@ func createAppArmorProfileLoader(f *framework.Framework) { }, }, } - _, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(loader) + _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(loader) framework.ExpectNoError(err, "Failed to create apparmor-loader ReplicationController") // Wait for loader to be ready. diff --git a/test/e2e/common/autoscaling_utils.go b/test/e2e/common/autoscaling_utils.go index 0e2cb5a7ddc..45f82fc0e0b 100644 --- a/test/e2e/common/autoscaling_utils.go +++ b/test/e2e/common/autoscaling_utils.go @@ -243,7 +243,7 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { defer cancel() err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post()) + proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). Context(ctx). @@ -270,7 +270,7 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { defer cancel() err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post()) + proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). Context(ctx). @@ -297,7 +297,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { defer cancel() err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post()) + proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). Context(ctx). @@ -321,7 +321,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { func (rc *ResourceConsumer) GetReplicas() int { switch rc.kind { case KindRC: - replicationController, err := rc.clientSet.Core().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{}) + replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if replicationController == nil { framework.Failf(rcIsNil) @@ -404,9 +404,9 @@ func (rc *ResourceConsumer) CleanUp() { kind, err := kindOf(rc.kind) framework.ExpectNoError(err) framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, kind, rc.nsName, rc.name)) - framework.ExpectNoError(rc.clientSet.Core().Services(rc.nsName).Delete(rc.name, nil)) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil)) framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, api.Kind("ReplicationController"), rc.nsName, rc.controllerName)) - framework.ExpectNoError(rc.clientSet.Core().Services(rc.nsName).Delete(rc.controllerName, nil)) + framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil)) } func kindOf(kind string) (schema.GroupKind, error) { @@ -424,7 +424,7 @@ func kindOf(kind string) (schema.GroupKind, error) { func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) { By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) - _, err := c.Core().Services(ns).Create(&v1.Service{ + _, err := c.CoreV1().Services(ns).Create(&v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -478,7 +478,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalCli By(fmt.Sprintf("Running controller")) controllerName := name + "-ctrl" - _, err = c.Core().Services(ns).Create(&v1.Service{ + _, err = c.CoreV1().Services(ns).Create(&v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: controllerName, }, diff --git a/test/e2e/common/configmap.go b/test/e2e/common/configmap.go index ba1d6713370..b1f4331c43e 100644 --- a/test/e2e/common/configmap.go +++ b/test/e2e/common/configmap.go @@ -34,7 +34,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() { configMap := newConfigMap(f, name) By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error - if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -77,7 +77,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() { configMap := newEnvFromConfigMap(f, name) By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error - if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } diff --git a/test/e2e/common/configmap_volume.go b/test/e2e/common/configmap_volume.go index 8cca5e5ba7e..8595caf4009 100644 --- a/test/e2e/common/configmap_volume.go +++ b/test/e2e/common/configmap_volume.go @@ -92,7 +92,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -142,7 +142,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { By(fmt.Sprintf("Updating configmap %v", configMap.Name)) configMap.ResourceVersion = "" // to force update configMap.Data["data-1"] = "value-2" - _, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(configMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap) Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) By("waiting to observe update in volume") @@ -196,12 +196,12 @@ var _ = Describe("[sig-storage] ConfigMap", func() { By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) var err error - if deleteConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil { + if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) } By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) - if updateConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil { + if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) } @@ -305,18 +305,18 @@ var _ = Describe("[sig-storage] ConfigMap", func() { Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name)) - err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name) By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name)) updateConfigMap.ResourceVersion = "" // to force update delete(updateConfigMap.Data, "data-1") updateConfigMap.Data["data-3"] = "value-3" - _, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(updateConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap) Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name) By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) - if createConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil { + if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) } @@ -339,7 +339,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -427,7 +427,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -507,7 +507,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } diff --git a/test/e2e/common/events.go b/test/e2e/common/events.go index 174f951066c..47d02df793a 100644 --- a/test/e2e/common/events.go +++ b/test/e2e/common/events.go @@ -48,14 +48,14 @@ func ObserveNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodeP &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = nodeSelector.String() - ls, err := f.ClientSet.Core().Nodes().List(options) + ls, err := f.ClientSet.CoreV1().Nodes().List(options) return ls, err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { // Signal parent goroutine that watching has begun. defer informerStartedGuard.Do(func() { close(informerStartedChan) }) options.FieldSelector = nodeSelector.String() - w, err := f.ClientSet.Core().Nodes().Watch(options) + w, err := f.ClientSet.CoreV1().Nodes().Watch(options) return w, err }, }, @@ -105,13 +105,13 @@ func ObserveEventAfterAction(f *framework.Framework, eventPredicate func(*v1.Eve _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - ls, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options) + ls, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options) return ls, err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { // Signal parent goroutine that watching has begun. defer informerStartedGuard.Do(func() { close(informerStartedChan) }) - w, err := f.ClientSet.Core().Events(f.Namespace.Name).Watch(options) + w, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Watch(options) return w, err }, }, diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index 5b62a91698d..bc58a8c6b47 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -427,7 +427,7 @@ var _ = framework.KubeDescribe("Pods", func() { }, }, } - _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(svc) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc) Expect(err).NotTo(HaveOccurred(), "failed to create service") // Make a client pod that verifies that it has the service environment variables. @@ -491,7 +491,7 @@ var _ = framework.KubeDescribe("Pods", func() { By("submitting the pod to kubernetes") pod = podClient.CreateSync(pod) - req := f.ClientSet.Core().RESTClient().Get(). + req := f.ClientSet.CoreV1().RESTClient().Get(). Namespace(f.Namespace.Name). Resource("pods"). Name(pod.Name). @@ -561,7 +561,7 @@ var _ = framework.KubeDescribe("Pods", func() { By("submitting the pod to kubernetes") podClient.CreateSync(pod) - req := f.ClientSet.Core().RESTClient().Get(). + req := f.ClientSet.CoreV1().RESTClient().Get(). Namespace(f.Namespace.Name). Resource("pods"). Name(pod.Name). diff --git a/test/e2e/common/projected.go b/test/e2e/common/projected.go index 6a8fdbbddb8..2e568263e49 100644 --- a/test/e2e/common/projected.go +++ b/test/e2e/common/projected.go @@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("Projected", func() { secret2.Data = map[string][]byte{ "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), } - if secret2, err = f.ClientSet.Core().Secrets(namespace2.Name).Create(secret2); err != nil { + if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil { framework.Failf("unable to create test secret %s: %v", secret2.Name, err) } doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) @@ -125,7 +125,7 @@ var _ = framework.KubeDescribe("Projected", func() { By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -250,12 +250,12 @@ var _ = framework.KubeDescribe("Projected", func() { By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) var err error - if deleteSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil { + if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil { framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) } By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) - if updateSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(updateSecret); err != nil { + if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil { framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) } @@ -377,18 +377,18 @@ var _ = framework.KubeDescribe("Projected", func() { Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name)) - err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name) By(fmt.Sprintf("Updating secret %v", updateSecret.Name)) updateSecret.ResourceVersion = "" // to force update delete(updateSecret.Data, "data-1") updateSecret.Data["data-3"] = []byte("value-3") - _, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Update(updateSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret) Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name) By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) - if createSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(createSecret); err != nil { + if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil { framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) } @@ -496,7 +496,7 @@ var _ = framework.KubeDescribe("Projected", func() { By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -552,7 +552,7 @@ var _ = framework.KubeDescribe("Projected", func() { By(fmt.Sprintf("Updating configmap %v", configMap.Name)) configMap.ResourceVersion = "" // to force update configMap.Data["data-1"] = "value-2" - _, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(configMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap) Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) By("waiting to observe update in volume") @@ -612,12 +612,12 @@ var _ = framework.KubeDescribe("Projected", func() { By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) var err error - if deleteConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil { + if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) } By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) - if updateConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil { + if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) } @@ -739,18 +739,18 @@ var _ = framework.KubeDescribe("Projected", func() { Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name)) - err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name) By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name)) updateConfigMap.ResourceVersion = "" // to force update delete(updateConfigMap.Data, "data-1") updateConfigMap.Data["data-3"] = "value-3" - _, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(updateConfigMap) + _, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap) Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name) By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) - if createConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil { + if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil { framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) } @@ -778,7 +778,7 @@ var _ = framework.KubeDescribe("Projected", func() { By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -1114,11 +1114,11 @@ var _ = framework.KubeDescribe("Projected", func() { } By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) - if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } By(fmt.Sprintf("Creating secret with name %s", secret.Name)) - if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -1155,7 +1155,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) var err error - if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -1236,7 +1236,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) { By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) var err error - if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -1318,7 +1318,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } @@ -1403,7 +1403,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup in By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error - if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) } diff --git a/test/e2e/common/secrets.go b/test/e2e/common/secrets.go index 92fccb84faa..5eb864c9b5c 100644 --- a/test/e2e/common/secrets.go +++ b/test/e2e/common/secrets.go @@ -41,7 +41,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() { By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -89,7 +89,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() { secret := newEnvFromSecret(f.Namespace.Name, name) By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name)) var err error - if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } diff --git a/test/e2e/common/secrets_volume.go b/test/e2e/common/secrets_volume.go index b1eb513bc90..8ff87fdb8ff 100644 --- a/test/e2e/common/secrets_volume.go +++ b/test/e2e/common/secrets_volume.go @@ -98,7 +98,7 @@ var _ = Describe("[sig-storage] Secrets", func() { secret2.Data = map[string][]byte{ "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), } - if secret2, err = f.ClientSet.Core().Secrets(namespace2.Name).Create(secret2); err != nil { + if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil { framework.Failf("unable to create test secret %s: %v", secret2.Name, err) } doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) @@ -123,7 +123,7 @@ var _ = Describe("[sig-storage] Secrets", func() { By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -233,12 +233,12 @@ var _ = Describe("[sig-storage] Secrets", func() { By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) var err error - if deleteSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil { + if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil { framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) } By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) - if updateSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(updateSecret); err != nil { + if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil { framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) } @@ -336,18 +336,18 @@ var _ = Describe("[sig-storage] Secrets", func() { Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name)) - err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name) By(fmt.Sprintf("Updating secret %v", updateSecret.Name)) updateSecret.ResourceVersion = "" // to force update delete(updateSecret.Data, "data-1") updateSecret.Data["data-3"] = []byte("value-3") - _, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Update(updateSecret) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret) Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name) By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) - if createSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(createSecret); err != nil { + if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil { framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) } @@ -383,7 +383,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -455,7 +455,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) { By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error - if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } diff --git a/test/e2e/common/sysctl.go b/test/e2e/common/sysctl.go index 989fc3a56f5..1d112f07971 100644 --- a/test/e2e/common/sysctl.go +++ b/test/e2e/common/sysctl.go @@ -171,7 +171,7 @@ var _ = framework.KubeDescribe("Sysctls", func() { }) By("Creating a pod with one valid and two invalid sysctls") - client := f.ClientSet.Core().Pods(f.Namespace.Name) + client := f.ClientSet.CoreV1().Pods(f.Namespace.Name) _, err := client.Create(pod) Expect(err).NotTo(BeNil()) diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index 59daf6c81a9..1ef5d4866f1 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -86,14 +86,14 @@ func svcByName(name string, port int) *v1.Service { func NewSVCByName(c clientset.Interface, ns, name string) error { const testPort = 9376 - _, err := c.Core().Services(ns).Create(svcByName(name, testPort)) + _, err := c.CoreV1().Services(ns).Create(svcByName(name, testPort)) return err } // NewRCByName creates a replication controller with a selector by name of name. func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*v1.ReplicationController, error) { By(fmt.Sprintf("creating replication controller %s", name)) - return c.Core().ReplicationControllers(ns).Create(framework.RcByNamePort( + return c.CoreV1().ReplicationControllers(ns).Create(framework.RcByNamePort( name, replicas, framework.ServeHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod)) } @@ -101,7 +101,7 @@ func RestartNodes(c clientset.Interface, nodeNames []string) error { // List old boot IDs. oldBootIDs := make(map[string]string) for _, name := range nodeNames { - node, err := c.Core().Nodes().Get(name, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("error getting node info before reboot: %s", err) } @@ -123,7 +123,7 @@ func RestartNodes(c clientset.Interface, nodeNames []string) error { // Wait for their boot IDs to change. for _, name := range nodeNames { if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) { - node, err := c.Core().Nodes().Get(name, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error getting node info after reboot: %s", err) } diff --git a/test/e2e/common/volumes.go b/test/e2e/common/volumes.go index 5b701980764..5fb440a67c5 100644 --- a/test/e2e/common/volumes.go +++ b/test/e2e/common/volumes.go @@ -139,7 +139,7 @@ var _ = Describe("[sig-storage] GCP Volumes", func() { defer func() { if clean { framework.VolumeTestCleanup(f, config) - err := c.Core().Endpoints(namespace.Name).Delete(name, nil) + err := c.CoreV1().Endpoints(namespace.Name).Delete(name, nil) Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed") } }() diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 3e837595a06..649573f2742 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -386,12 +386,12 @@ func runKubernetesServiceTestContainer(c clientset.Interface, ns string) { return } p.Namespace = ns - if _, err := c.Core().Pods(ns).Create(p); err != nil { + if _, err := c.CoreV1().Pods(ns).Create(p); err != nil { framework.Logf("Failed to create %v: %v", p.Name, err) return } defer func() { - if err := c.Core().Pods(ns).Delete(p.Name, nil); err != nil { + if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil { framework.Logf("Failed to delete pod %v: %v", p.Name, err) } }() diff --git a/test/e2e/events.go b/test/e2e/events.go index 28ce23829f5..c7b074b3293 100644 --- a/test/e2e/events.go +++ b/test/e2e/events.go @@ -38,7 +38,7 @@ var _ = framework.KubeDescribe("Events", func() { It("should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]", func() { - podClient := f.ClientSet.Core().Pods(f.Namespace.Name) + podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) By("creating the pod") name := "send-events-" + string(uuid.NewUUID()) @@ -96,7 +96,7 @@ var _ = framework.KubeDescribe("Events", func() { "source": v1.DefaultSchedulerName, }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options) + events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options) if err != nil { return false, err } @@ -116,7 +116,7 @@ var _ = framework.KubeDescribe("Events", func() { "source": "kubelet", }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - events, err = f.ClientSet.Core().Events(f.Namespace.Name).List(options) + events, err = f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options) if err != nil { return false, err } diff --git a/test/e2e/examples.go b/test/e2e/examples.go index e6d37daf946..b9261bf57d8 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -298,7 +298,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"})) err = wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { - podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: label.String()}) + podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: label.String()}) if err != nil { return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label) } @@ -413,7 +413,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { err := framework.WaitForPodNameRunningInNamespace(c, podName, ns) Expect(err).NotTo(HaveOccurred()) for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { - pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) @@ -577,7 +577,7 @@ func makeHttpRequestToService(c clientset.Interface, ns, service, path string, t var result []byte var err error for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) if errProxy != nil { break } diff --git a/test/e2e/framework/exec_util.go b/test/e2e/framework/exec_util.go index 491f763a234..229c604c47d 100644 --- a/test/e2e/framework/exec_util.go +++ b/test/e2e/framework/exec_util.go @@ -57,7 +57,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) const tty = false - req := f.ClientSet.Core().RESTClient().Post(). + req := f.ClientSet.CoreV1().RESTClient().Post(). Resource("pods"). Name(options.PodName). Namespace(options.Namespace). diff --git a/test/e2e/framework/firewall_util.go b/test/e2e/framework/firewall_util.go index d253fd87806..08bca6a26ca 100644 --- a/test/e2e/framework/firewall_util.go +++ b/test/e2e/framework/firewall_util.go @@ -373,7 +373,7 @@ func WaitForFirewallRule(gceCloud *gcecloud.GCECloud, fwName string, exist bool, } func GetClusterID(c clientset.Interface) (string, error) { - cm, err := c.Core().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{}) + cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{}) if err != nil || cm == nil { return "", fmt.Errorf("error getting cluster ID: %v", err) } diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 626fed47569..57f0588e0b2 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -495,7 +495,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str } } Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName) - service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&v1.Service{ + service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "service-for-" + appName, Labels: map[string]string{ @@ -521,7 +521,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n // one per node, but no more than maxCount. if i <= maxCount { Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName) - _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(&v1.Pod{ + _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(appName+"-pod-%v", i), Labels: labels, @@ -707,9 +707,9 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin if len(selectors) > 0 { selector = labels.SelectorFromSet(labels.Set(selectors)) options := metav1.ListOptions{LabelSelector: selector.String()} - pl, err = cli.Core().Pods(ns).List(options) + pl, err = cli.CoreV1().Pods(ns).List(options) } else { - pl, err = cli.Core().Pods(ns).List(metav1.ListOptions{}) + pl, err = cli.CoreV1().Pods(ns).List(metav1.ListOptions{}) } return pl, err } diff --git a/test/e2e/framework/ingress_utils.go b/test/e2e/framework/ingress_utils.go index 982e244e677..27a517604ee 100644 --- a/test/e2e/framework/ingress_utils.go +++ b/test/e2e/framework/ingress_utils.go @@ -312,14 +312,14 @@ func createIngressTLSSecret(kubeClient clientset.Interface, ing *extensions.Ingr }, } var s *v1.Secret - if s, err = kubeClient.Core().Secrets(ing.Namespace).Get(tls.SecretName, metav1.GetOptions{}); err == nil { + if s, err = kubeClient.CoreV1().Secrets(ing.Namespace).Get(tls.SecretName, metav1.GetOptions{}); err == nil { // TODO: Retry the update. We don't really expect anything to conflict though. Logf("Updating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name) s.Data = secret.Data - _, err = kubeClient.Core().Secrets(ing.Namespace).Update(s) + _, err = kubeClient.CoreV1().Secrets(ing.Namespace).Update(s) } else { Logf("Creating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name) - _, err = kubeClient.Core().Secrets(ing.Namespace).Create(secret) + _, err = kubeClient.CoreV1().Secrets(ing.Namespace).Create(secret) } return host, cert, key, err } @@ -1065,7 +1065,7 @@ func (j *IngressTestJig) pollServiceNodePort(ns, name string, port int) { func (j *IngressTestJig) GetIngressNodePorts(includeDefaultBackend bool) []string { nodePorts := []string{} if includeDefaultBackend { - defaultSvc, err := j.Client.Core().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{}) + defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) nodePorts = append(nodePorts, strconv.Itoa(int(defaultSvc.Spec.Ports[0].NodePort))) } @@ -1080,7 +1080,7 @@ func (j *IngressTestJig) GetIngressNodePorts(includeDefaultBackend bool) []strin } } for _, svcName := range backendSvcs { - svc, err := j.Client.Core().Services(j.Ingress.Namespace).Get(svcName, metav1.GetOptions{}) + svc, err := j.Client.CoreV1().Services(j.Ingress.Namespace).Get(svcName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) nodePorts = append(nodePorts, strconv.Itoa(int(svc.Spec.Ports[0].NodePort))) } @@ -1128,7 +1128,7 @@ func (j *IngressTestJig) GetDistinctResponseFromIngress() (sets.String, error) { func (cont *GCEIngressController) getL7AddonUID() (string, error) { Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap) - cm, err := cont.Client.Core().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{}) + cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{}) if err != nil { return "", err } @@ -1172,14 +1172,14 @@ func (cont *NginxIngressController) Init() { Logf("initializing nginx ingress controller") RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", cont.Ns)) - rc, err := cont.Client.Core().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{}) + rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{}) ExpectNoError(err) cont.rc = rc Logf("waiting for pods with label %v", rc.Spec.Selector) sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector)) ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel)) - pods, err := cont.Client.Core().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()}) + pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()}) ExpectNoError(err) if len(pods.Items) == 0 { Failf("Failed to find nginx ingress controller pods with selector %v", sel) diff --git a/test/e2e/framework/jobs_util.go b/test/e2e/framework/jobs_util.go index 87b620965b1..d61211cac32 100644 --- a/test/e2e/framework/jobs_util.go +++ b/test/e2e/framework/jobs_util.go @@ -215,7 +215,7 @@ func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.D func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) (bool, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.Core().Pods(ns).List(options) + pods, err := c.CoreV1().Pods(ns).List(options) if err != nil { return false, err } diff --git a/test/e2e/framework/kubelet_stats.go b/test/e2e/framework/kubelet_stats.go index 3ab82cec9a7..7ddd4441a87 100644 --- a/test/e2e/framework/kubelet_stats.go +++ b/test/e2e/framework/kubelet_stats.go @@ -161,7 +161,7 @@ func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor client: c, nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate), } - nodes, err := m.client.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := m.client.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err) } @@ -290,7 +290,7 @@ func getStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, er var data []byte if subResourceProxyAvailable { - data, err = c.Core().RESTClient().Get(). + data, err = c.CoreV1().RESTClient().Get(). Context(ctx). Resource("nodes"). SubResource("proxy"). @@ -299,7 +299,7 @@ func getStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, er Do().Raw() } else { - data, err = c.Core().RESTClient().Get(). + data, err = c.CoreV1().RESTClient().Get(). Context(ctx). Prefix("proxy"). Resource("nodes"). @@ -413,7 +413,7 @@ func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary var data []byte if subResourceProxyAvailable { - data, err = c.Core().RESTClient().Get(). + data, err = c.CoreV1().RESTClient().Get(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). @@ -422,7 +422,7 @@ func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary Do().Raw() } else { - data, err = c.Core().RESTClient().Get(). + data, err = c.CoreV1().RESTClient().Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)). @@ -700,7 +700,7 @@ func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingI func (r *ResourceMonitor) Start() { // It should be OK to monitor unschedulable Nodes - nodes, err := r.client.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := r.client.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { Failf("ResourceMonitor: unable to get list of nodes: %v", err) } diff --git a/test/e2e/framework/metrics/api_server_metrics.go b/test/e2e/framework/metrics/api_server_metrics.go index 9f897eb7f21..edc1d82712a 100644 --- a/test/e2e/framework/metrics/api_server_metrics.go +++ b/test/e2e/framework/metrics/api_server_metrics.go @@ -36,7 +36,7 @@ func parseApiServerMetrics(data string) (ApiServerMetrics, error) { } func (g *MetricsGrabber) getMetricsFromApiServer() (string, error) { - rawOutput, err := g.client.Core().RESTClient().Get().RequestURI("/metrics").Do().Raw() + rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics").Do().Raw() if err != nil { return "", err } diff --git a/test/e2e/framework/metrics/kubelet_metrics.go b/test/e2e/framework/metrics/kubelet_metrics.go index 0e6267b513a..cf2d666e8b0 100644 --- a/test/e2e/framework/metrics/kubelet_metrics.go +++ b/test/e2e/framework/metrics/kubelet_metrics.go @@ -65,7 +65,7 @@ func (g *MetricsGrabber) getMetricsFromNode(nodeName string, kubeletPort int) (s var err error var rawOutput []byte go func() { - rawOutput, err = g.client.Core().RESTClient().Get(). + rawOutput, err = g.client.CoreV1().RESTClient().Get(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)). diff --git a/test/e2e/framework/metrics/metrics_grabber.go b/test/e2e/framework/metrics/metrics_grabber.go index f0cbb96ec34..116be3f9997 100644 --- a/test/e2e/framework/metrics/metrics_grabber.go +++ b/test/e2e/framework/metrics/metrics_grabber.go @@ -57,7 +57,7 @@ type MetricsGrabber struct { func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool) (*MetricsGrabber, error) { registeredMaster := false masterName := "" - nodeList, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return nil, err } @@ -101,7 +101,7 @@ func (g *MetricsGrabber) HasRegisteredMaster() bool { } func (g *MetricsGrabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) { - nodes, err := g.client.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector().String()}) + nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector().String()}) if err != nil { return KubeletMetrics{}, err } @@ -210,7 +210,7 @@ func (g *MetricsGrabber) Grab() (MetricsCollection, error) { } if g.grabFromKubelets { result.KubeletMetrics = make(map[string]KubeletMetrics) - nodes, err := g.client.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { errs = append(errs, err) } else { @@ -231,7 +231,7 @@ func (g *MetricsGrabber) Grab() (MetricsCollection, error) { } func (g *MetricsGrabber) getMetricsFromPod(client clientset.Interface, podName string, namespace string, port int) (string, error) { - rawOutput, err := client.Core().RESTClient().Get(). + rawOutput, err := client.CoreV1().RESTClient().Get(). Namespace(namespace). Resource("pods"). SubResource("proxy"). diff --git a/test/e2e/framework/metrics_util.go b/test/e2e/framework/metrics_util.go index ad882ddd12e..90443ce9dc3 100644 --- a/test/e2e/framework/metrics_util.go +++ b/test/e2e/framework/metrics_util.go @@ -416,7 +416,7 @@ func VerifyPodStartupLatency(latency *PodStartupLatency) error { // Resets latency metrics in apiserver. func ResetMetrics(c clientset.Interface) error { Logf("Resetting latency metrics in apiserver...") - body, err := c.Core().RESTClient().Delete().AbsPath("/metrics").DoRaw() + body, err := c.CoreV1().RESTClient().Delete().AbsPath("/metrics").DoRaw() if err != nil { return err } @@ -428,7 +428,7 @@ func ResetMetrics(c clientset.Interface) error { // Retrieves metrics information. func getMetrics(c clientset.Interface) (string, error) { - body, err := c.Core().RESTClient().Get().AbsPath("/metrics").DoRaw() + body, err := c.CoreV1().RESTClient().Get().AbsPath("/metrics").DoRaw() if err != nil { return "", err } @@ -440,7 +440,7 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) { result := SchedulingLatency{} // Check if master Node is registered - nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) ExpectNoError(err) subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, c.Discovery()) @@ -461,7 +461,7 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) { var rawData []byte if subResourceProxyAvailable { - rawData, err = c.Core().RESTClient().Get(). + rawData, err = c.CoreV1().RESTClient().Get(). Context(ctx). Namespace(metav1.NamespaceSystem). Resource("pods"). @@ -470,7 +470,7 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) { Suffix("metrics"). Do().Raw() } else { - rawData, err = c.Core().RESTClient().Get(). + rawData, err = c.CoreV1().RESTClient().Get(). Context(ctx). Prefix("proxy"). Namespace(metav1.NamespaceSystem). diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index 496d4aef1d1..aa9a3040e5b 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -661,11 +661,11 @@ func (config *NetworkingTestConfig) getPodClient() *PodClient { } func (config *NetworkingTestConfig) getServiceClient() coreclientset.ServiceInterface { - return config.f.ClientSet.Core().Services(config.Namespace) + return config.f.ClientSet.CoreV1().Services(config.Namespace) } func (config *NetworkingTestConfig) getNamespacesClient() coreclientset.NamespaceInterface { - return config.f.ClientSet.Core().Namespaces() + return config.f.ClientSet.CoreV1().Namespaces() } func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, namespace, pod, target string) { diff --git a/test/e2e/framework/nodes_util.go b/test/e2e/framework/nodes_util.go index 0926a8a7703..2908d9688fa 100644 --- a/test/e2e/framework/nodes_util.go +++ b/test/e2e/framework/nodes_util.go @@ -233,7 +233,7 @@ func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]str // A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver // knows about all of the nodes. Thus, we retry the list nodes call // until we get the expected number of nodes. - nodeList, errLast = c.Core().Nodes().List(metav1.ListOptions{ + nodeList, errLast = c.CoreV1().Nodes().List(metav1.ListOptions{ FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String()}) if errLast != nil { return false, nil diff --git a/test/e2e/framework/pods.go b/test/e2e/framework/pods.go index f85d2a2cd98..b564cc70b3c 100644 --- a/test/e2e/framework/pods.go +++ b/test/e2e/framework/pods.go @@ -50,7 +50,7 @@ var ImageWhiteList sets.String func (f *Framework) PodClient() *PodClient { return &PodClient{ f: f, - PodInterface: f.ClientSet.Core().Pods(f.Namespace.Name), + PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name), } } @@ -60,7 +60,7 @@ func (f *Framework) PodClient() *PodClient { func (f *Framework) PodClientNS(namespace string) *PodClient { return &PodClient{ f: f, - PodInterface: f.ClientSet.Core().Pods(namespace), + PodInterface: f.ClientSet.CoreV1().Pods(namespace), } } @@ -223,7 +223,7 @@ func (c *PodClient) WaitForFailure(name string, timeout time.Duration) { func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) { var ev *v1.Event err := wait.Poll(Poll, PodStartTimeout, func() (bool, error) { - evnts, err := c.f.ClientSet.Core().Events(pod.Namespace).Search(legacyscheme.Scheme, pod) + evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(legacyscheme.Scheme, pod) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) } diff --git a/test/e2e/framework/rc_util.go b/test/e2e/framework/rc_util.go index 0e5f9c98c52..b3b20b0f800 100644 --- a/test/e2e/framework/rc_util.go +++ b/test/e2e/framework/rc_util.go @@ -86,7 +86,7 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str // none are running, otherwise it does what a synchronous scale operation would do. func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error { listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()} - rcs, err := clientset.Core().ReplicationControllers(ns).List(listOpts) + rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts) if err != nil { return err } @@ -99,7 +99,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil { return err } - rc, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) + rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) if err != nil { return err } @@ -129,12 +129,12 @@ func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, na var updateErr error pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { var err error - if rc, err = c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}); err != nil { + if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rc) - if rc, err = c.Core().ReplicationControllers(namespace).Update(rc); err == nil { + if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(rc); err == nil { Logf("Updating replication controller %q", name) return true, nil } @@ -180,7 +180,7 @@ func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string) // WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false) func WaitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) + _, err := c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) if err != nil { Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err) return !exist, nil @@ -200,7 +200,7 @@ func WaitForReplicationController(c clientset.Interface, namespace, name string, func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { - rcs, err := c.Core().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + rcs, err := c.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) switch { case len(rcs.Items) != 0: Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace) diff --git a/test/e2e/framework/resource_usage_gatherer.go b/test/e2e/framework/resource_usage_gatherer.go index e318842a829..6ed5ff2bffe 100644 --- a/test/e2e/framework/resource_usage_gatherer.go +++ b/test/e2e/framework/resource_usage_gatherer.go @@ -233,7 +233,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt finished: false, }) } else { - pods, err := c.Core().Pods("kube-system").List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods("kube-system").List(metav1.ListOptions{}) if err != nil { Logf("Error while listing Pods: %v", err) return nil, err @@ -243,7 +243,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt g.containerIDs = append(g.containerIDs, container.Name) } } - nodeList, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { Logf("Error while listing Nodes: %v", err) return nil, err diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index a9dab3a201f..91f35757360 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -148,7 +148,7 @@ func (j *ServiceTestJig) CreateTCPServiceWithPort(namespace string, tweak func(s if tweak != nil { tweak(svc) } - result, err := j.Client.Core().Services(namespace).Create(svc) + result, err := j.Client.CoreV1().Services(namespace).Create(svc) if err != nil { Failf("Failed to create TCP Service %q: %v", svc.Name, err) } @@ -163,7 +163,7 @@ func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc if tweak != nil { tweak(svc) } - result, err := j.Client.Core().Services(namespace).Create(svc) + result, err := j.Client.CoreV1().Services(namespace).Create(svc) if err != nil { Failf("Failed to create TCP Service %q: %v", svc.Name, err) } @@ -178,7 +178,7 @@ func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc if tweak != nil { tweak(svc) } - result, err := j.Client.Core().Services(namespace).Create(svc) + result, err := j.Client.CoreV1().Services(namespace).Create(svc) if err != nil { Failf("Failed to create UDP Service %q: %v", svc.Name, err) } @@ -203,7 +203,7 @@ func (j *ServiceTestJig) CreateExternalNameServiceOrFail(namespace string, tweak if tweak != nil { tweak(svc) } - result, err := j.Client.Core().Services(namespace).Create(svc) + result, err := j.Client.CoreV1().Services(namespace).Create(svc) if err != nil { Failf("Failed to create ExternalName Service %q: %v", svc.Name, err) } @@ -335,7 +335,7 @@ func PickNodeIP(c clientset.Interface) string { // endpoints of the given Service are running. func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string { nodes := j.GetNodes(MaxNodesForEndpointsTests) - endpoints, err := j.Client.Core().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) + endpoints, err := j.Client.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) if err != nil { Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err) } @@ -381,7 +381,7 @@ func (j *ServiceTestJig) GetNodesNames(maxNodesForTest int) []string { func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string) { err := wait.PollImmediate(Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) { - endpoints, err := j.Client.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) + endpoints, err := j.Client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) if err != nil { Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err) return false, nil @@ -461,12 +461,12 @@ func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceT // face of timeouts and conflicts. func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.Service)) (*v1.Service, error) { for i := 0; i < 3; i++ { - service, err := j.Client.Core().Services(namespace).Get(name, metav1.GetOptions{}) + service, err := j.Client.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("Failed to get Service %q: %v", name, err) } update(service) - service, err = j.Client.Core().Services(namespace).Update(service) + service, err = j.Client.CoreV1().Services(namespace).Update(service) if err == nil { return service, nil } @@ -558,7 +558,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string func (j *ServiceTestJig) waitForConditionOrFail(namespace, name string, timeout time.Duration, message string, conditionFn func(*v1.Service) bool) *v1.Service { var service *v1.Service pollFunc := func() (bool, error) { - svc, err := j.Client.Core().Services(namespace).Get(name, metav1.GetOptions{}) + svc, err := j.Client.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) if err != nil { return false, err } @@ -679,7 +679,7 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.Replicati if tweak != nil { tweak(rc) } - result, err := j.Client.Core().ReplicationControllers(namespace).Create(rc) + result, err := j.Client.CoreV1().ReplicationControllers(namespace).Create(rc) if err != nil { Failf("Failed to create RC %q: %v", rc.Name, err) } @@ -715,7 +715,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s Logf("Waiting up to %v for %d pods to be created", timeout, replicas) for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := j.Client.Core().Pods(namespace).List(options) + pods, err := j.Client.CoreV1().Pods(namespace).List(options) if err != nil { return nil, err } @@ -783,7 +783,7 @@ func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *Framework, nodeName, podName pod := newNetexecPodSpec(podName, httpPort, udpPort, hostNetwork) pod.Spec.NodeName = nodeName pod.ObjectMeta.Labels = j.Labels - podClient := f.ClientSet.Core().Pods(f.Namespace.Name) + podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) _, err := podClient.Create(pod) ExpectNoError(err) ExpectNoError(f.WaitForPodRunning(podName)) @@ -819,7 +819,7 @@ func (j *ServiceTestJig) LaunchEchoserverPodOnNode(f *Framework, nodeName, podNa pod := newEchoServerPodSpec(podName) pod.Spec.NodeName = nodeName pod.ObjectMeta.Labels = j.Labels - podClient := f.ClientSet.Core().Pods(f.Namespace.Name) + podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) _, err := podClient.Create(pod) ExpectNoError(err) ExpectNoError(f.WaitForPodRunning(podName)) @@ -993,7 +993,7 @@ func (t *ServiceTestFixture) CreateWebserverRC(replicas int32) *v1.ReplicationCo // CreateRC creates a replication controller and records it for cleanup. func (t *ServiceTestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) { - rc, err := t.Client.Core().ReplicationControllers(t.Namespace).Create(rc) + rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(rc) if err == nil { t.rcs[rc.Name] = true } @@ -1002,7 +1002,7 @@ func (t *ServiceTestFixture) CreateRC(rc *v1.ReplicationController) (*v1.Replica // Create a service, and record it for cleanup func (t *ServiceTestFixture) CreateService(service *v1.Service) (*v1.Service, error) { - result, err := t.Client.Core().Services(t.Namespace).Create(service) + result, err := t.Client.CoreV1().Services(t.Namespace).Create(service) if err == nil { t.services[service.Name] = true } @@ -1011,7 +1011,7 @@ func (t *ServiceTestFixture) CreateService(service *v1.Service) (*v1.Service, er // Delete a service, and remove it from the cleanup list func (t *ServiceTestFixture) DeleteService(serviceName string) error { - err := t.Client.Core().Services(t.Namespace).Delete(serviceName, nil) + err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) if err == nil { delete(t.services, serviceName) } @@ -1024,7 +1024,7 @@ func (t *ServiceTestFixture) Cleanup() []error { By("stopping RC " + rcName + " in namespace " + t.Namespace) err := retry.RetryOnConflict(retry.DefaultRetry, func() error { // First, resize the RC to 0. - old, err := t.Client.Core().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{}) + old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return nil @@ -1033,7 +1033,7 @@ func (t *ServiceTestFixture) Cleanup() []error { } x := int32(0) old.Spec.Replicas = &x - if _, err := t.Client.Core().ReplicationControllers(t.Namespace).Update(old); err != nil { + if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(old); err != nil { if errors.IsNotFound(err) { return nil } @@ -1046,7 +1046,7 @@ func (t *ServiceTestFixture) Cleanup() []error { } // TODO(mikedanese): Wait. // Then, delete the RC altogether. - if err := t.Client.Core().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { + if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { if !errors.IsNotFound(err) { errs = append(errs, err) } @@ -1055,7 +1055,7 @@ func (t *ServiceTestFixture) Cleanup() []error { for serviceName := range t.services { By("deleting service " + serviceName + " in namespace " + t.Namespace) - err := t.Client.Core().Services(t.Namespace).Delete(serviceName, nil) + err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) if err != nil { if !errors.IsNotFound(err) { errs = append(errs, err) @@ -1081,14 +1081,14 @@ func UpdateService(c clientset.Interface, namespace, serviceName string, update var service *v1.Service var err error for i := 0; i < 3; i++ { - service, err = c.Core().Services(namespace).Get(serviceName, metav1.GetOptions{}) + service, err = c.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{}) if err != nil { return service, err } update(service) - service, err = c.Core().Services(namespace).Update(service) + service, err = c.CoreV1().Services(namespace).Update(service) if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { return service, err @@ -1136,7 +1136,7 @@ func translatePodNameToUIDOrFail(c clientset.Interface, ns string, expectedEndpo portsByUID := make(PortsByPodUID) for name, portList := range expectedEndpoints { - pod, err := c.Core().Pods(ns).Get(name, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(name, metav1.GetOptions{}) if err != nil { Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) } @@ -1172,7 +1172,7 @@ func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) i := 1 for start := time.Now(); time.Since(start) < ServiceStartTimeout; time.Sleep(1 * time.Second) { - endpoints, err := c.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) + endpoints, err := c.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) if err != nil { Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) continue @@ -1196,7 +1196,7 @@ func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin i++ } - if pods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil { + if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil { for _, pod := range pods.Items { Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) } @@ -1211,7 +1211,7 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli podNames := make([]string, replicas) By("creating service " + name + " in namespace " + ns) - _, err := c.Core().Services(ns).Create(&v1.Service{ + _, err := c.CoreV1().Services(ns).Create(&v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -1258,7 +1258,7 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli } sort.StringSlice(podNames).Sort() - service, err := c.Core().Services(ns).Get(name, metav1.GetOptions{}) + service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) if err != nil { return podNames, "", err } @@ -1273,7 +1273,7 @@ func StopServeHostnameService(clientset clientset.Interface, internalClientset i if err := DeleteRCAndPods(clientset, internalClientset, ns, name); err != nil { return err } - if err := clientset.Core().Services(ns).Delete(name, nil); err != nil { + if err := clientset.CoreV1().Services(ns).Delete(name, nil); err != nil { return err } return nil diff --git a/test/e2e/framework/statefulset_utils.go b/test/e2e/framework/statefulset_utils.go index 426553d99a1..86ca9ae7e06 100644 --- a/test/e2e/framework/statefulset_utils.go +++ b/test/e2e/framework/statefulset_utils.go @@ -175,7 +175,7 @@ func (s *StatefulSetTester) Saturate(ss *apps.StatefulSet) { func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *apps.StatefulSet) { name := getStatefulSetPodNameAtIndex(index, ss) noGrace := int64(0) - if err := s.c.Core().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { + if err := s.c.CoreV1().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err) } } @@ -186,7 +186,7 @@ type VerifyStatefulPodFunc func(*v1.Pod) // VerifyPodAtIndex applies a visitor patter to the Pod at index in ss. verify is is applied to the Pod to "visit" it. func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *apps.StatefulSet, verify VerifyStatefulPodFunc) { name := getStatefulSetPodNameAtIndex(index, ss) - pod, err := s.c.Core().Pods(ss.Namespace).Get(name, metav1.GetOptions{}) + pod, err := s.c.CoreV1().Pods(ss.Namespace).Get(name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ss.Namespace, ss.Name)) verify(pod) } @@ -266,7 +266,7 @@ func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.Statefu func (s *StatefulSetTester) GetPodList(ss *apps.StatefulSet) *v1.PodList { selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector) ExpectNoError(err) - podList, err := s.c.Core().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + podList, err := s.c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) ExpectNoError(err) return podList } @@ -701,7 +701,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { pvNames := sets.NewString() // TODO: Don't assume all pvcs in the ns belong to a statefulset pvcPollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - pvcList, err := c.Core().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { Logf("WARNING: Failed to list pvcs, retrying %v", err) return false, nil @@ -710,7 +710,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { pvNames.Insert(pvc.Spec.VolumeName) // TODO: Double check that there are no pods referencing the pvc Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName) - if err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil { + if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, nil); err != nil { return false, nil } } @@ -721,7 +721,7 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { } pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { - pvList, err := c.Core().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pvList, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { Logf("WARNING: Failed to list pvs, retrying %v", err) return false, nil diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index ae09062b553..3fb3d09f0ce 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -539,7 +539,7 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s start, badPods, desiredPods := time.Now(), []v1.Pod{}, 0 if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) { - podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: successPodSelector.String()}) + podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: successPodSelector.String()}) if err != nil { Logf("Error getting pods in namespace %q: %v", ns, err) if IsRetryableAPIError(err) { @@ -601,7 +601,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN // checked. replicas, replicaOk := int32(0), int32(0) - rcList, err := c.Core().ReplicationControllers(ns).List(metav1.ListOptions{}) + rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{}) if err != nil { Logf("Error getting replication controllers in namespace '%s': %v", ns, err) if IsRetryableAPIError(err) { @@ -627,7 +627,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN replicaOk += rs.Status.ReadyReplicas } - podList, err := c.Core().Pods(ns).List(metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) if err != nil { Logf("Error getting pods in namespace '%s': %v", ns, err) if IsRetryableAPIError(err) { @@ -702,7 +702,7 @@ func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string } func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) { - podList, err := c.Core().Pods(ns).List(metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) if err != nil { logFunc("Error getting pods in namespace '%s': %v", ns, err) return @@ -716,7 +716,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri } func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) { - podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) + podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) if err != nil { logFunc("Error getting pods in namespace %q: %v", ns, err) return @@ -728,7 +728,7 @@ func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string } func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) { - podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) + podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()}) if err != nil { Logf("Error getting pods in namespace %q: %v", ns, err) return @@ -743,7 +743,7 @@ func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[s // Returns the list of deleted namespaces or an error. func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) { By("Deleting namespaces") - nsList, err := c.Core().Namespaces().List(metav1.ListOptions{}) + nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) var deleted []string var wg sync.WaitGroup @@ -773,7 +773,7 @@ OUTER: go func(nsName string) { defer wg.Done() defer GinkgoRecover() - Expect(c.Core().Namespaces().Delete(nsName, nil)).To(Succeed()) + Expect(c.CoreV1().Namespaces().Delete(nsName, nil)).To(Succeed()) Logf("namespace : %v api call to delete is complete ", nsName) }(item.Name) } @@ -790,7 +790,7 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou //Now POLL until all namespaces have been eradicated. return wait.Poll(2*time.Second, timeout, func() (bool, error) { - nsList, err := c.Core().Namespaces().List(metav1.ListOptions{}) + nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) if err != nil { return false, err } @@ -804,7 +804,7 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou } func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { - w, err := c.Core().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName})) + w, err := c.CoreV1().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName})) if err != nil { return err } @@ -815,7 +815,7 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { if apierrs.IsNotFound(err) { Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err) @@ -842,7 +842,7 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pods, err := c.Core().Pods(metav1.NamespaceAll).List(opts) + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts) if err != nil { return err } @@ -875,7 +875,7 @@ func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace st func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) if err != nil { Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err) continue @@ -895,7 +895,7 @@ func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.In func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) if err == nil { Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start)) continue @@ -915,7 +915,7 @@ func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}) + pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}) if err != nil { Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err) continue @@ -951,7 +951,7 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s var got *v1.Namespace if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { var err error - got, err = c.Core().Namespaces().Create(namespaceObj) + got, err = c.CoreV1().Namespaces().Create(namespaceObj) if err != nil { Logf("Unexpected error while creating namespace: %v", err) return false, nil @@ -990,7 +990,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { Logf("Waiting for terminating namespaces to be deleted...") for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) { - namespaces, err := c.Core().Namespaces().List(metav1.ListOptions{}) + namespaces, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) if err != nil { Logf("Listing namespaces failed: %v", err) continue @@ -1015,13 +1015,13 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { // whether there are any pods remaining in a non-terminating state. func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace string, timeout time.Duration) error { startTime := time.Now() - if err := c.Core().Namespaces().Delete(namespace, nil); err != nil { + if err := c.CoreV1().Namespaces().Delete(namespace, nil); err != nil { return err } // wait for namespace to delete or timeout. err := wait.PollImmediate(2*time.Second, timeout, func() (bool, error) { - if _, err := c.Core().Namespaces().Get(namespace, metav1.GetOptions{}); err != nil { + if _, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}); err != nil { if apierrs.IsNotFound(err) { return true, nil } @@ -1075,7 +1075,7 @@ func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace st // logNamespaces logs the number of namespaces by phase // namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs func logNamespaces(c clientset.Interface, namespace string) { - namespaceList, err := c.Core().Namespaces().List(metav1.ListOptions{}) + namespaceList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) if err != nil { Logf("namespace: %v, unable to list namespaces: %v", namespace, err) return @@ -1095,7 +1095,7 @@ func logNamespaces(c clientset.Interface, namespace string) { // logNamespace logs detail about a namespace func logNamespace(c clientset.Interface, namespace string) { - ns, err := c.Core().Namespaces().Get(namespace, metav1.GetOptions{}) + ns, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}) if err != nil { if apierrs.IsNotFound(err) { Logf("namespace: %v no longer exists", namespace) @@ -1110,7 +1110,7 @@ func logNamespace(c clientset.Interface, namespace string) { // countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp. func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) { // check for remaining pods - pods, err := c.Core().Pods(namespace).List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{}) if err != nil { return 0, 0, err } @@ -1346,7 +1346,7 @@ func waitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespa func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.Core().Pods(namespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1372,7 +1372,7 @@ func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.Core().Pods(namespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1390,7 +1390,7 @@ func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.Core().Pods(namespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1413,7 +1413,7 @@ func WaitForPodNotPending(c clientset.Interface, ns, podName string) error { func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc { return func() (bool, error) { - pod, err := c.Core().Pods(namespace).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1497,7 +1497,7 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D "metadata.name": name, "metadata.namespace": ns, }.AsSelector().String()} - w, err := c.Core().ReplicationControllers(ns).Watch(options) + w, err := c.CoreV1().ReplicationControllers(ns).Watch(options) if err != nil { return err } @@ -1525,7 +1525,7 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe return wait.PollImmediate(interval, timeout, func() (bool, error) { Logf("Waiting for pod %s to disappear", podName) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.Core().Pods(ns).List(options) + pods, err := c.CoreV1().Pods(ns).List(options) if err != nil { if IsRetryableAPIError(err) { return false, nil @@ -1551,7 +1551,7 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe // WaitForService waits until the service appears (exist == true), or disappears (exist == false) func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { - _, err := c.Core().Services(namespace).Get(name, metav1.GetOptions{}) + _, err := c.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) switch { case err == nil: Logf("Service %s in namespace %s found.", name, namespace) @@ -1578,7 +1578,7 @@ func WaitForService(c clientset.Interface, namespace, name string, exist bool, i func WaitForServiceWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { - services, err := c.Core().Services(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + services, err := c.CoreV1().Services(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) switch { case len(services.Items) != 0: Logf("Service with %s in namespace %s found.", selector.String(), namespace) @@ -1605,7 +1605,7 @@ func WaitForServiceWithSelector(c clientset.Interface, namespace string, selecto func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { return wait.Poll(interval, timeout, func() (bool, error) { Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum) - list, err := c.Core().Endpoints(namespace).List(metav1.ListOptions{}) + list, err := c.CoreV1().Endpoints(namespace).List(metav1.ListOptions{}) if err != nil { return false, err } @@ -1629,7 +1629,7 @@ func countEndpointsNum(e *v1.Endpoints) int { func WaitForEndpoint(c clientset.Interface, ns, name string) error { for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) { - endpoint, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{}) + endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) if apierrs.IsNotFound(err) { Logf("Endpoint %s/%s is not ready yet", ns, name) continue @@ -1665,7 +1665,7 @@ func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Sele func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) { successes := 0 options := metav1.ListOptions{LabelSelector: r.label.String()} - currentPods, err := r.c.Core().Pods(r.ns).List(options) + currentPods, err := r.c.CoreV1().Pods(r.ns).List(options) Expect(err).NotTo(HaveOccurred()) for i, pod := range r.pods.Items { // Check that the replica list remains unchanged, otherwise we have problems. @@ -1682,7 +1682,7 @@ func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) { var body []byte if subResourceProxyAvailable { - body, err = r.c.Core().RESTClient().Get(). + body, err = r.c.CoreV1().RESTClient().Get(). Context(ctx). Namespace(r.ns). Resource("pods"). @@ -1691,7 +1691,7 @@ func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) { Do(). Raw() } else { - body, err = r.c.Core().RESTClient().Get(). + body, err = r.c.CoreV1().RESTClient().Get(). Context(ctx). Prefix("proxy"). Namespace(r.ns). @@ -1806,7 +1806,7 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, options := metav1.ListOptions{LabelSelector: label.String()} // List the pods, making sure we observe all the replicas. - pods, err := c.Core().Pods(ns).List(options) + pods, err := c.CoreV1().Pods(ns).List(options) if err != nil { return nil, err } @@ -1881,7 +1881,7 @@ func ServiceResponding(c clientset.Interface, ns, name string) error { By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) { - proxyRequest, errProxy := GetServicesProxyRequest(c, c.Core().RESTClient().Get()) + proxyRequest, errProxy := GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) if errProxy != nil { Logf("Failed to get services proxy request: %v:", errProxy) return false, nil @@ -2298,7 +2298,7 @@ func DumpEventsInNamespace(eventsLister EventsLister, namespace string) { func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { DumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) { - return c.Core().Events(ns).List(opts) + return c.CoreV1().Events(ns).List(opts) }, namespace) // If cluster is large, then the following logs are basically useless, because: @@ -2306,7 +2306,7 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { // 2. there are so many of them that working with them are mostly impossible // So we dump them only if the cluster is relatively small. maxNodesForDump := 20 - if nodes, err := c.Core().Nodes().List(metav1.ListOptions{}); err == nil { + if nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}); err == nil { if len(nodes.Items) <= maxNodesForDump { dumpAllPodInfo(c) dumpAllNodeInfo(c) @@ -2332,7 +2332,7 @@ func (o byFirstTimestamp) Less(i, j int) bool { } func dumpAllPodInfo(c clientset.Interface) { - pods, err := c.Core().Pods("").List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{}) if err != nil { Logf("unable to fetch pod debug info: %v", err) } @@ -2341,7 +2341,7 @@ func dumpAllPodInfo(c clientset.Interface) { func dumpAllNodeInfo(c clientset.Interface) { // It should be OK to list unschedulable Nodes here. - nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { Logf("unable to fetch node list: %v", err) return @@ -2356,7 +2356,7 @@ func dumpAllNodeInfo(c clientset.Interface) { func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) { for _, n := range nodeNames { logFunc("\nLogging node info for node %v", n) - node, err := c.Core().Nodes().Get(n, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(n, metav1.GetOptions{}) if err != nil { logFunc("Error getting node info %v", err) } @@ -2400,7 +2400,7 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event { "source": "kubelet", }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - events, err := c.Core().Events(metav1.NamespaceSystem).List(options) + events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(options) if err != nil { Logf("Unexpected error retrieving node events %v", err) return []v1.Event{} @@ -2413,7 +2413,7 @@ func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList { var nodes *v1.NodeList var err error if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { - nodes, err = c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -2496,7 +2496,7 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er ResourceVersion: "0", FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(), } - nodes, err := c.Core().Nodes().List(opts) + nodes, err := c.CoreV1().Nodes().List(opts) if err != nil { Logf("Unexpected error listing nodes: %v", err) if IsRetryableAPIError(err) { @@ -2549,7 +2549,7 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration { } func GetNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) { - nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil || len(nodes.Items) == 0 { return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err) } @@ -2576,7 +2576,7 @@ func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, la func AddOrUpdateLabelOnNodeAndReturnOldValue(c clientset.Interface, nodeName string, labelKey, labelValue string) string { var oldValue string - node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) ExpectNoError(err) oldValue = node.Labels[labelKey] ExpectNoError(testutil.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue})) @@ -2585,7 +2585,7 @@ func AddOrUpdateLabelOnNodeAndReturnOldValue(c clientset.Interface, nodeName str func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) { By("verifying the node has the label " + labelKey + " " + labelValue) - node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) ExpectNoError(err) Expect(node.Labels[labelKey]).To(Equal(labelValue)) } @@ -2611,7 +2611,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) func VerifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) { By("verifying the node doesn't have the taint " + taint.ToString()) - nodeUpdated, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + nodeUpdated, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) ExpectNoError(err) if taintutils.TaintExists(nodeUpdated.Spec.Taints, taint) { Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName) @@ -2627,7 +2627,7 @@ func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) } func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) { - node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { return false, err } @@ -2783,7 +2783,7 @@ func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label label func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) { options := metav1.ListOptions{LabelSelector: label.String()} - pods, err = c.Core().Pods(ns).List(options) + pods, err = c.CoreV1().Pods(ns).List(options) if err != nil { if IsRetryableAPIError(err) { continue @@ -2832,7 +2832,7 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) { switch kind { case api.Kind("ReplicationController"): - return c.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) + return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) case extensionsinternal.Kind("ReplicaSet"): return c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{}) case extensionsinternal.Kind("Deployment"): @@ -2849,7 +2849,7 @@ func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, n func deleteResource(c clientset.Interface, kind schema.GroupKind, ns, name string, deleteOption *metav1.DeleteOptions) error { switch kind { case api.Kind("ReplicationController"): - return c.Core().ReplicationControllers(ns).Delete(name, deleteOption) + return c.CoreV1().ReplicationControllers(ns).Delete(name, deleteOption) case extensionsinternal.Kind("ReplicaSet"): return c.Extensions().ReplicaSets(ns).Delete(name, deleteOption) case extensionsinternal.Kind("Deployment"): @@ -3082,7 +3082,7 @@ func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds in label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := metav1.ListOptions{LabelSelector: label.String()} return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { - pods, err := c.Core().Pods(ns).List(options) + pods, err := c.CoreV1().Pods(ns).List(options) if err != nil { return false, nil } @@ -3098,7 +3098,7 @@ func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds in // Waits for the number of events on the given object to reach a desired count. func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { - events, err := c.Core().Events(ns).Search(legacyscheme.Scheme, objOrRef) + events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, objOrRef) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) } @@ -3117,7 +3117,7 @@ func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, de // Waits for the number of events on the given object to be at least a desired count. func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Object, atLeastEventsCount int) error { return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { - events, err := c.Core().Events(ns).Search(legacyscheme.Scheme, objOrRef) + events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, objOrRef) if err != nil { return false, fmt.Errorf("error in listing events: %s", err) } @@ -3352,7 +3352,7 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration // until it's Running func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod { hostExecPod := NewHostExecPodSpec(ns, name) - pod, err := client.Core().Pods(ns).Create(hostExecPod) + pod, err := client.CoreV1().Pods(ns).Create(hostExecPod) ExpectNoError(err) err = WaitForPodRunningInNamespace(client, pod) ExpectNoError(err) @@ -3390,10 +3390,10 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw if tweak != nil { tweak(execPod) } - created, err := client.Core().Pods(ns).Create(execPod) + created, err := client.CoreV1().Pods(ns).Create(execPod) Expect(err).NotTo(HaveOccurred()) err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) { - retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{}) + retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{}) if err != nil { if IsRetryableAPIError(err) { return false, nil @@ -3426,13 +3426,13 @@ func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]s }, }, } - _, err := c.Core().Pods(ns).Create(pod) + _, err := c.CoreV1().Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) } func DeletePodOrFail(c clientset.Interface, ns, name string) { By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns)) - err := c.Core().Pods(ns).Delete(name, nil) + err := c.CoreV1().Pods(ns).Delete(name, nil) Expect(err).NotTo(HaveOccurred()) } @@ -3628,7 +3628,7 @@ func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) boo func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool { Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue) for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - node, err := c.Core().Nodes().Get(name, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { Logf("Couldn't get node %s", name) continue @@ -3653,7 +3653,7 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error { err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. - nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { if IsRetryableAPIError(err) { return false, nil @@ -3697,7 +3697,7 @@ func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error err := wait.PollImmediate(Poll, timeout, func() (bool, error) { notReady = nil // It should be OK to list unschedulable Nodes here. - nodes, err := c.Core().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) if err != nil { if IsRetryableAPIError(err) { return false, nil @@ -3709,7 +3709,7 @@ func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error notReady = append(notReady, node) } } - pods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"}) + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } @@ -3901,7 +3901,7 @@ func sshRestartMaster() error { func WaitForApiserverUp(c clientset.Interface) error { for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { - body, err := c.Core().RESTClient().Get().AbsPath("/healthz").Do().Raw() + body, err := c.CoreV1().RESTClient().Get().AbsPath("/healthz").Do().Raw() if err == nil && string(body) == "ok" { return nil } @@ -3970,7 +3970,7 @@ func CheckForControllerManagerHealthy(duration time.Duration) error { // Returns number of ready Nodes excluding Master Node. func NumberOfReadyNodes(c clientset.Interface) (int, error) { - nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -3989,7 +3989,7 @@ func NumberOfReadyNodes(c clientset.Interface) (int, error) { // By cluster size we mean number of Nodes excluding Master Node. func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -4020,7 +4020,7 @@ func GenerateMasterRegexp(prefix string) string { // waitForMasters waits until the cluster has the desired number of ready masters in it. func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { Logf("Failed to list nodes: %v", err) continue @@ -4058,7 +4058,7 @@ func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeou // address. Returns an error if the node the pod is on doesn't have an External // address. func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) { - node, err := client.Core().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{}) if err != nil { return "", err } @@ -4208,7 +4208,7 @@ func LookForString(expectedString string, timeout time.Duration, fn func() strin // getSvcNodePort returns the node port for the given service:port. func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) { - svc, err := client.Core().Services(ns).Get(name, metav1.GetOptions{}) + svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) if err != nil { return 0, err } @@ -4234,7 +4234,7 @@ func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (s // kube-proxy NodePorts won't work. var nodes *v1.NodeList if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { - nodes, err = client.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { @@ -4273,7 +4273,7 @@ func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName // utility function for gomega Eventually func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) { - logs, err := c.Core().RESTClient().Get(). + logs, err := c.CoreV1().RESTClient().Get(). Resource("pods"). Namespace(namespace). Name(podName).SubResource("log"). @@ -4412,7 +4412,7 @@ func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient. finished := make(chan struct{}) go func() { if subResourceProxyAvailable { - result = c.Core().RESTClient().Get(). + result = c.CoreV1().RESTClient().Get(). Resource("nodes"). SubResource("proxy"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). @@ -4420,7 +4420,7 @@ func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient. Do() } else { - result = c.Core().RESTClient().Get(). + result = c.CoreV1().RESTClient().Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). @@ -4485,7 +4485,7 @@ func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) { RestartPolicy: v1.RestartPolicyNever, }, } - podClient := f.ClientSet.Core().Pods(f.Namespace.Name) + podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) _, err := podClient.Create(pod) ExpectNoError(err) ExpectNoError(f.WaitForPodRunning(podName)) @@ -4534,7 +4534,7 @@ func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, pingC RestartPolicy: v1.RestartPolicyNever, }, } - podClient := f.ClientSet.Core().Pods(f.Namespace.Name) + podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) _, err := podClient.Create(pod) if err != nil { return err @@ -4577,12 +4577,12 @@ func CoreDump(dir string) { func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*v1.Pod)) (*v1.Pod, error) { for i := 0; i < 3; i++ { - pod, err := client.Core().Pods(ns).Get(name, metav1.GetOptions{}) + pod, err := client.CoreV1().Pods(ns).Get(name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("Failed to get pod %q: %v", name, err) } update(pod) - pod, err = client.Core().Pods(ns).Update(pod) + pod, err = client.CoreV1().Pods(ns).Update(pod) if err == nil { return pod, nil } @@ -4594,7 +4594,7 @@ func UpdatePodWithRetries(client clientset.Interface, ns, name string, update fu } func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) { - pods, err := c.Core().Pods(ns).List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) if err != nil { return []*v1.Pod{}, err } @@ -4683,7 +4683,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int { timeout := 10 * time.Minute startTime := time.Now() - allPods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) + allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) ExpectNoError(err) // API server returns also Pods that succeeded. We need to filter them out. currentPods := make([]v1.Pod, 0, len(allPods.Items)) @@ -4698,7 +4698,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int { for len(currentlyNotScheduledPods) != 0 { time.Sleep(2 * time.Second) - allPods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) + allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) ExpectNoError(err) scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods) @@ -4714,7 +4714,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int { func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) { nodes := &v1.NodeList{} masters := sets.NewString() - all, _ := c.Core().Nodes().List(metav1.ListOptions{}) + all, _ := c.CoreV1().Nodes().List(metav1.ListOptions{}) for _, n := range all.Items { if system.IsMasterNode(n.Name) { masters.Insert(n.Name) @@ -4726,7 +4726,7 @@ func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeL } func ListNamespaceEvents(c clientset.Interface, ns string) error { - ls, err := c.Core().Events(ns).List(metav1.ListOptions{}) + ls, err := c.CoreV1().Events(ns).List(metav1.ListOptions{}) if err != nil { return err } @@ -4855,7 +4855,7 @@ func getMaster(c clientset.Interface) Address { master := Address{} // Populate the internal IP. - eps, err := c.Core().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) + eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) if err != nil { Failf("Failed to get kubernetes endpoints: %v", err) } @@ -5035,7 +5035,7 @@ func PrintSummaries(summaries []TestDataSummary, testBaseName string) { } func DumpDebugInfo(c clientset.Interface, ns string) { - sl, _ := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) for _, s := range sl.Items { desc, _ := RunKubectl("describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns)) Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc) diff --git a/test/e2e/instrumentation/logging/utils/logging_agent.go b/test/e2e/instrumentation/logging/utils/logging_agent.go index f42dc28c6da..e2452091b88 100644 --- a/test/e2e/instrumentation/logging/utils/logging_agent.go +++ b/test/e2e/instrumentation/logging/utils/logging_agent.go @@ -87,5 +87,5 @@ func EnsureLoggingAgentRestartsCount(f *framework.Framework, appName string, max func getLoggingAgentPods(f *framework.Framework, appName string) (*api_v1.PodList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": appName})) options := meta_v1.ListOptions{LabelSelector: label.String()} - return f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) + return f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options) } diff --git a/test/e2e/instrumentation/monitoring/cadvisor.go b/test/e2e/instrumentation/monitoring/cadvisor.go index 76bd4a0cf76..439bccbc480 100644 --- a/test/e2e/instrumentation/monitoring/cadvisor.go +++ b/test/e2e/instrumentation/monitoring/cadvisor.go @@ -40,7 +40,7 @@ var _ = instrumentation.SIGDescribe("Cadvisor", func() { func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) { // It should be OK to list unschedulable Nodes here. By("getting list of nodes") - nodeList, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) var errors []error @@ -70,7 +70,7 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) // Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally. statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource)) - _, err = c.Core().RESTClient().Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() + _, err = c.CoreV1().RESTClient().Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() if err != nil { errors = append(errors, err) } diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index 8b615296937..a739df480db 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -161,21 +161,21 @@ func testAdapter(f *framework.Framework, kubeClient clientset.Interface, customM } func cleanupSDExporterPod(f *framework.Framework, cs clientset.Interface) { - err := cs.Core().Pods(f.Namespace.Name).Delete(stackdriverExporterPod1, &metav1.DeleteOptions{}) + err := cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod1, &metav1.DeleteOptions{}) if err != nil { framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err) } - err = cs.Core().Pods(f.Namespace.Name).Delete(stackdriverExporterPod2, &metav1.DeleteOptions{}) + err = cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod2, &metav1.DeleteOptions{}) if err != nil { framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err) } } func createSDExporterPods(f *framework.Framework, cs clientset.Interface) error { - _, err := cs.Core().Pods(f.Namespace.Name).Create(StackdriverExporterPod(stackdriverExporterPod1, f.Namespace.Name, stackdriverExporterLabel, CustomMetricName, CustomMetricValue)) + _, err := cs.CoreV1().Pods(f.Namespace.Name).Create(StackdriverExporterPod(stackdriverExporterPod1, f.Namespace.Name, stackdriverExporterLabel, CustomMetricName, CustomMetricValue)) if err != nil { return err } - _, err = cs.Core().Pods(f.Namespace.Name).Create(StackdriverExporterPod(stackdriverExporterPod2, f.Namespace.Name, stackdriverExporterLabel, UnusedMetricName, UnusedMetricValue)) + _, err = cs.CoreV1().Pods(f.Namespace.Name).Create(StackdriverExporterPod(stackdriverExporterPod2, f.Namespace.Name, stackdriverExporterLabel, UnusedMetricName, UnusedMetricValue)) return err } diff --git a/test/e2e/instrumentation/monitoring/influxdb.go b/test/e2e/instrumentation/monitoring/influxdb.go index 0ea374a977a..0d181a9c580 100644 --- a/test/e2e/instrumentation/monitoring/influxdb.go +++ b/test/e2e/instrumentation/monitoring/influxdb.go @@ -76,7 +76,7 @@ func Query(c clientset.Interface, query string) (*influxdb.Response, error) { var result []byte if subResourceProxyAvailable { - result, err = c.Core().RESTClient().Get(). + result, err = c.CoreV1().RESTClient().Get(). Context(ctx). Namespace("kube-system"). Resource("services"). @@ -89,7 +89,7 @@ func Query(c clientset.Interface, query string) (*influxdb.Response, error) { Do(). Raw() } else { - result, err = c.Core().RESTClient().Get(). + result, err = c.CoreV1().RESTClient().Get(). Context(ctx). Prefix("proxy"). Namespace("kube-system"). @@ -138,7 +138,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, if err != nil { return nil, err } - rcList, err := c.Core().ReplicationControllers(metav1.NamespaceSystem).List(options) + rcList, err := c.CoreV1().ReplicationControllers(metav1.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -154,7 +154,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, for _, rc := range rcList.Items { selector := labels.Set(rc.Spec.Selector).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options) + podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -169,7 +169,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, for _, rc := range deploymentList.Items { selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options) + podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -184,7 +184,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, for _, ps := range psList.Items { selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options) + podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(options) if err != nil { return nil, err } @@ -200,7 +200,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string, } func expectedServicesExist(c clientset.Interface) error { - serviceList, err := c.Core().Services(metav1.NamespaceSystem).List(metav1.ListOptions{}) + serviceList, err := c.CoreV1().Services(metav1.NamespaceSystem).List(metav1.ListOptions{}) if err != nil { return err } @@ -219,7 +219,7 @@ func expectedServicesExist(c clientset.Interface) error { func getAllNodesInCluster(c clientset.Interface) ([]string, error) { // It should be OK to list unschedulable Nodes here. - nodeList, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return nil, err } @@ -331,7 +331,7 @@ func testMonitoringUsingHeapsterInfluxdb(c clientset.Interface) { func printDebugInfo(c clientset.Interface) { set := labels.Set{"k8s-app": "heapster"} options := metav1.ListOptions{LabelSelector: set.AsSelector().String()} - podList, err := c.Core().Pods(metav1.NamespaceSystem).List(options) + podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(options) if err != nil { framework.Logf("Error while listing pods %v", err) return diff --git a/test/e2e/instrumentation/monitoring/metrics_grabber.go b/test/e2e/instrumentation/monitoring/metrics_grabber.go index 5e115d049b5..e0d038a6a46 100644 --- a/test/e2e/instrumentation/monitoring/metrics_grabber.go +++ b/test/e2e/instrumentation/monitoring/metrics_grabber.go @@ -61,7 +61,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { gin.It("should grab all metrics from a Scheduler.", func() { gin.By("Proxying to Pod through the API server") // Check if master Node is registered - nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) var masterRegistered = false @@ -82,7 +82,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { gin.It("should grab all metrics from a ControllerManager.", func() { gin.By("Proxying to Pod through the API server") // Check if master Node is registered - nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) var masterRegistered = false diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 205ddd3ae9a..f62b5b17da6 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -899,7 +899,7 @@ metadata: // Node // It should be OK to list unschedulable Nodes here. - nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) node := nodes.Items[0] output = framework.RunKubectlOrDie("describe", "node", node.Name) @@ -955,7 +955,7 @@ metadata: }) validateService := func(name string, servicePort int, timeout time.Duration) { err := wait.Poll(framework.Poll, timeout, func() (bool, error) { - endpoints, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{}) + endpoints, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) if err != nil { // log the real error framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err) @@ -986,7 +986,7 @@ metadata: }) Expect(err).NotTo(HaveOccurred()) - service, err := c.Core().Services(ns).Get(name, metav1.GetOptions{}) + service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) if len(service.Spec.Ports) != 1 { @@ -1213,7 +1213,7 @@ metadata: By("running the image " + nginxImage) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) By("verifying the rc " + rcName + " was created") - rc, err := c.Core().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) + rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting rc %s: %v", rcName, err) } @@ -1269,7 +1269,7 @@ metadata: By("running the image " + nginxImage) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) By("verifying the rc " + rcName + " was created") - rc, err := c.Core().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) + rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting rc %s: %v", rcName, err) } @@ -1427,7 +1427,7 @@ metadata: By("running the image " + nginxImage) framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+nginxImage, nsFlag) By("verifying the pod " + podName + " was created") - pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting pod %s: %v", podName, err) } @@ -1478,7 +1478,7 @@ metadata: framework.RunKubectlOrDieInput(podJson, "replace", "-f", "-", nsFlag) By("verifying the pod " + podName + " has the right image " + busyboxImage) - pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting deployment %s: %v", podName, err) } @@ -1679,7 +1679,7 @@ metadata: framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag) By("verifying that the quota was created") - quota, err := c.Core().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) } @@ -1709,7 +1709,7 @@ metadata: framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag) By("verifying that the quota was created") - quota, err := c.Core().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) } @@ -1886,7 +1886,7 @@ func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse } func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) if errProxy != nil { return "", errProxy } @@ -1955,7 +1955,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) { label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) options := metav1.ListOptions{LabelSelector: label.String()} - rcs, err = c.Core().ReplicationControllers(ns).List(options) + rcs, err = c.CoreV1().ReplicationControllers(ns).List(options) Expect(err).NotTo(HaveOccurred()) if len(rcs.Items) > 0 { break @@ -2001,7 +2001,7 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) var body []byte if subResourceProxyAvailable { - body, err = c.Core().RESTClient().Get(). + body, err = c.CoreV1().RESTClient().Get(). Namespace(ns). Resource("pods"). SubResource("proxy"). @@ -2010,7 +2010,7 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) Do(). Raw() } else { - body, err = c.Core().RESTClient().Get(). + body, err = c.CoreV1().RESTClient().Get(). Prefix("proxy"). Namespace(ns). Resource("pods"). diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index a06d264818f..167126f55ea 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -211,7 +211,7 @@ func runPortForward(ns, podName string, port int) *portForwardCommand { func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { By("Creating the target pod") pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { @@ -267,7 +267,7 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { By("Creating the target pod") pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { @@ -312,7 +312,7 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) { By("Creating the target pod") pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { @@ -382,7 +382,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { By("Creating the pod") pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) - if _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod); err != nil { + if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { @@ -397,7 +397,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { } }() - req := f.ClientSet.Core().RESTClient().Get(). + req := f.ClientSet.CoreV1().RESTClient().Get(). Namespace(f.Namespace.Name). Resource("pods"). Name(pod.Name). diff --git a/test/e2e/lifecycle/addon_update.go b/test/e2e/lifecycle/addon_update.go index 17a3adc5f70..50fd426630f 100644 --- a/test/e2e/lifecycle/addon_update.go +++ b/test/e2e/lifecycle/addon_update.go @@ -298,7 +298,7 @@ var _ = SIGDescribe("Addon update", func() { // Delete the "ensure exist class" addon at the end. defer func() { framework.Logf("Cleaning up ensure exist class addon.") - Expect(f.ClientSet.Core().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(HaveOccurred()) + Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(HaveOccurred()) }() waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", true) @@ -331,7 +331,7 @@ var _ = SIGDescribe("Addon update", func() { waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true) By("verify invalid addons weren't created") - _, err = f.ClientSet.Core().ReplicationControllers(addonNsName).Get("invalid-addon-test", metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get("invalid-addon-test", metav1.GetOptions{}) Expect(err).To(HaveOccurred()) // Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function. diff --git a/test/e2e/lifecycle/reboot.go b/test/e2e/lifecycle/reboot.go index 72125407ea4..b246ab0fa62 100644 --- a/test/e2e/lifecycle/reboot.go +++ b/test/e2e/lifecycle/reboot.go @@ -67,7 +67,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { // events for the kube-system namespace on failures namespaceName := metav1.NamespaceSystem By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName)) - events, err := f.ClientSet.Core().Events(namespaceName).List(metav1.ListOptions{}) + events, err := f.ClientSet.CoreV1().Events(namespaceName).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) for _, e := range events.Items { @@ -224,7 +224,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { // Get the node initially. framework.Logf("Getting %s", name) - node, err := c.Core().Nodes().Get(name, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { framework.Logf("Couldn't get node %s", name) return false diff --git a/test/e2e/lifecycle/resize_nodes.go b/test/e2e/lifecycle/resize_nodes.go index c2e3e78be17..49e87455993 100644 --- a/test/e2e/lifecycle/resize_nodes.go +++ b/test/e2e/lifecycle/resize_nodes.go @@ -33,12 +33,12 @@ import ( const resizeNodeReadyTimeout = 2 * time.Minute func resizeRC(c clientset.Interface, ns, name string, replicas int32) error { - rc, err := c.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) + rc, err := c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) if err != nil { return err } *(rc.Spec.Replicas) = replicas - _, err = c.Core().ReplicationControllers(rc.Namespace).Update(rc) + _, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(rc) return err } diff --git a/test/e2e/lifecycle/restart.go b/test/e2e/lifecycle/restart.go index da28be9ec3e..d26c1f94b77 100644 --- a/test/e2e/lifecycle/restart.go +++ b/test/e2e/lifecycle/restart.go @@ -163,7 +163,7 @@ func restartNodes(f *framework.Framework, nodeNames []string) error { // List old boot IDs. oldBootIDs := make(map[string]string) for _, name := range nodeNames { - node, err := f.ClientSet.Core().Nodes().Get(name, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("error getting node info before reboot: %s", err) } @@ -185,7 +185,7 @@ func restartNodes(f *framework.Framework, nodeNames []string) error { // Wait for their boot IDs to change. for _, name := range nodeNames { if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) { - node, err := f.ClientSet.Core().Nodes().Get(name, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error getting node info after reboot: %s", err) } diff --git a/test/e2e/multicluster/ubernetes_lite.go b/test/e2e/multicluster/ubernetes_lite.go index 2e6f5bce27d..3be72fb4c7b 100644 --- a/test/e2e/multicluster/ubernetes_lite.go +++ b/test/e2e/multicluster/ubernetes_lite.go @@ -82,7 +82,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) }}, }, } - _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(serviceSpec) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(serviceSpec) Expect(err).NotTo(HaveOccurred()) // Now create some pods behind the service @@ -132,7 +132,7 @@ func getZoneNameForNode(node v1.Node) (string, error) { // Find the names of all zones in which we have nodes in this cluster. func getZoneNames(c clientset.Interface) ([]string, error) { zoneNames := sets.NewString() - nodes, err := c.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return nil, err } @@ -156,7 +156,7 @@ func getZoneCount(c clientset.Interface) (int, error) { // Find the name of the zone in which the pod is scheduled func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) { By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName)) - node, err := c.Core().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) return getZoneNameForNode(*node) } @@ -196,7 +196,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { name := "ubelite-spread-rc-" + string(uuid.NewUUID()) By(fmt.Sprintf("Creating replication controller %s", name)) - controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{ + controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ Namespace: f.Namespace.Name, Name: name, @@ -310,7 +310,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) By("Creating pods for each static PV") for _, config := range configs { podConfig := framework.MakePod(ns, []*v1.PersistentVolumeClaim{config.pvc}, false, "") - config.pod, err = c.Core().Pods(ns).Create(podConfig) + config.pod, err = c.CoreV1().Pods(ns).Create(podConfig) Expect(err).NotTo(HaveOccurred()) } diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index 2b6e8e14818..edb73262a7f 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -180,7 +180,7 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client var contents []byte for _, fileName := range fileNames { if subResourceProxyAvailable { - contents, err = client.Core().RESTClient().Get(). + contents, err = client.CoreV1().RESTClient().Get(). Context(ctx). Namespace(pod.Namespace). Resource("pods"). @@ -189,7 +189,7 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client Suffix(fileDir, fileName). Do().Raw() } else { - contents, err = client.Core().RESTClient().Get(). + contents, err = client.CoreV1().RESTClient().Get(). Context(ctx). Prefix("proxy"). Resource("pods"). @@ -221,7 +221,7 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) { By("submitting the pod to kubernetes") - podClient := f.ClientSet.Core().Pods(f.Namespace.Name) + podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) defer func() { By("deleting the pod") defer GinkgoRecover() @@ -249,7 +249,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) { By("submitting the pod to kubernetes") - podClient := f.ClientSet.Core().Pods(f.Namespace.Name) + podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) defer func() { By("deleting the pod") defer GinkgoRecover() @@ -317,21 +317,21 @@ var _ = SIGDescribe("DNS", func() { "dns-test": "true", } headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) - _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) Expect(err).NotTo(HaveOccurred()) defer func() { By("deleting the test headless service") defer GinkgoRecover() - f.ClientSet.Core().Services(f.Namespace.Name).Delete(headlessService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() regularService := framework.CreateServiceSpec("test-service-2", "", false, testServiceSelector) - regularService, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(regularService) + regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService) Expect(err).NotTo(HaveOccurred()) defer func() { By("deleting the test service") defer GinkgoRecover() - f.ClientSet.Core().Services(f.Namespace.Name).Delete(regularService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil) }() // All the names we need to be able to resolve. @@ -367,12 +367,12 @@ var _ = SIGDescribe("DNS", func() { serviceName := "dns-test-service-2" podHostname := "dns-querier-2" headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector) - _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) Expect(err).NotTo(HaveOccurred()) defer func() { By("deleting the test headless service") defer GinkgoRecover() - f.ClientSet.Core().Services(f.Namespace.Name).Delete(headlessService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podHostname, serviceName, f.Namespace.Name) @@ -398,12 +398,12 @@ var _ = SIGDescribe("DNS", func() { By("Creating a test externalName service") serviceName := "dns-test-service-3" externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil) - _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(externalNameService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService) Expect(err).NotTo(HaveOccurred()) defer func() { By("deleting the test externalName service") defer GinkgoRecover() - f.ClientSet.Core().Services(f.Namespace.Name).Delete(externalNameService.Name, nil) + f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil) }() hostFQDN := fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name) @@ -453,7 +453,7 @@ var _ = SIGDescribe("DNS", func() { By("creating a third pod to probe DNS") pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd) - svc, err := f.ClientSet.Core().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{}) + svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP) diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index d01339f23a4..ddcb2680308 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -64,7 +64,7 @@ func (t *dnsTestCommon) init() { label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := t.f.ClientSet.Core().Pods("kube-system").List(options) + pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(options) Expect(err).NotTo(HaveOccurred()) Expect(len(pods.Items)).Should(BeNumerically(">=", 1)) @@ -142,16 +142,16 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) { "metadata.name": t.name, }.AsSelector().String(), } - cmList, err := t.c.Core().ConfigMaps(t.ns).List(options) + cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(options) Expect(err).NotTo(HaveOccurred()) if len(cmList.Items) == 0 { By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)) - _, err := t.c.Core().ConfigMaps(t.ns).Create(cm) + _, err := t.c.CoreV1().ConfigMaps(t.ns).Create(cm) Expect(err).NotTo(HaveOccurred()) } else { By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)) - _, err := t.c.Core().ConfigMaps(t.ns).Update(cm) + _, err := t.c.CoreV1().ConfigMaps(t.ns).Update(cm) Expect(err).NotTo(HaveOccurred()) } } @@ -159,7 +159,7 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) { func (t *dnsTestCommon) deleteConfigMap() { By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name)) t.cm = nil - err := t.c.Core().ConfigMaps(t.ns).Delete(t.name, nil) + err := t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil) Expect(err).NotTo(HaveOccurred()) } @@ -191,7 +191,7 @@ func (t *dnsTestCommon) createUtilPod() { } var err error - t.utilPod, err = t.c.Core().Pods(t.f.Namespace.Name).Create(t.utilPod) + t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod) Expect(err).NotTo(HaveOccurred()) framework.Logf("Created pod %v", t.utilPod) Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred()) @@ -216,13 +216,13 @@ func (t *dnsTestCommon) createUtilPod() { }, } - t.utilService, err = t.c.Core().Services(t.f.Namespace.Name).Create(t.utilService) + t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService) Expect(err).NotTo(HaveOccurred()) framework.Logf("Created service %v", t.utilService) } func (t *dnsTestCommon) deleteUtilPod() { - podClient := t.c.Core().Pods(t.f.Namespace.Name) + podClient := t.c.CoreV1().Pods(t.f.Namespace.Name) if err := podClient.Delete(t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Logf("Delete of pod %v:%v failed: %v", t.utilPod.Namespace, t.utilPod.Name, err) @@ -263,18 +263,18 @@ func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) { } var err error - t.dnsServerPod, err = t.c.Core().Pods(t.f.Namespace.Name).Create(t.dnsServerPod) + t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod) Expect(err).NotTo(HaveOccurred()) framework.Logf("Created pod %v", t.dnsServerPod) Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred()) - t.dnsServerPod, err = t.c.Core().Pods(t.f.Namespace.Name).Get( + t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get( t.dnsServerPod.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) } func (t *dnsTestCommon) deleteDNSServerPod() { - podClient := t.c.Core().Pods(t.f.Namespace.Name) + podClient := t.c.CoreV1().Pods(t.f.Namespace.Name) if err := podClient.Delete(t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Logf("Delete of pod %v:%v failed: %v", t.utilPod.Namespace, t.dnsServerPod.Name, err) diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index d5ddd53a637..3e11edc08b7 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -45,7 +45,7 @@ var _ = SIGDescribe("DNS configMap federations", func() { func (t *dnsFederationsConfigMapTest) run() { t.init() - defer t.c.Core().ConfigMaps(t.ns).Delete(t.name, nil) + defer t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil) t.createUtilPod() defer t.deleteUtilPod() @@ -175,7 +175,7 @@ func (t *dnsNameserverTest) run() { "dnsmasq", moreForeverTestTimeout) - t.c.Core().ConfigMaps(t.ns).Delete(t.name, nil) + t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil) // Wait for the deleted ConfigMap to take effect, otherwise the // configuration can bleed into other tests. t.checkDNSRecordFrom( diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index ab304d747e6..8c510c2f3ee 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -103,7 +103,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { for _, ns := range namespaces { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.Core().Pods(ns.Name).List(options) + pods, err := c.CoreV1().Pods(ns.Name).List(options) Expect(err).NotTo(HaveOccurred()) err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods) Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond") @@ -123,7 +123,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { // This code is probably unnecessary, but let's stay on the safe side. label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.Core().Pods(namespaces[0].Name).List(options) + pods, err := c.CoreV1().Pods(namespaces[0].Name).List(options) if err != nil || pods == nil || len(pods.Items) == 0 { framework.Failf("no running pods found") diff --git a/test/e2e/network/networking.go b/test/e2e/network/networking.go index 6f27c379367..7c2bff0f6a5 100644 --- a/test/e2e/network/networking.go +++ b/test/e2e/network/networking.go @@ -76,7 +76,7 @@ var _ = SIGDescribe("Networking", func() { } for _, test := range tests { By(fmt.Sprintf("testing: %s", test.path)) - data, err := f.ClientSet.Core().RESTClient().Get(). + data, err := f.ClientSet.CoreV1().RESTClient().Get(). AbsPath(test.path). DoRaw() if err != nil { diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index b1b23ab9e13..37a9b816b26 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -103,7 +103,7 @@ var _ = SIGDescribe("Proxy", func() { It("should proxy through a service and a pod [Conformance]", func() { start := time.Now() labels := map[string]string{"proxy-service-target": "true"} - service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&v1.Service{ + service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "proxy-service-", }, @@ -293,7 +293,7 @@ var _ = SIGDescribe("Proxy", func() { } if len(errs) != 0 { - body, err := f.ClientSet.Core().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &v1.PodLogOptions{}).Do().Raw() + body, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &v1.PodLogOptions{}).Do().Raw() if err != nil { framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err) } else { @@ -314,7 +314,7 @@ func doProxy(f *framework.Framework, path string, i int) (body []byte, statusCod // chance of the things we are talking to being confused for an error // that apiserver would have emitted. start := time.Now() - body, err = f.ClientSet.Core().RESTClient().Get().AbsPath(path).Do().StatusCode(&statusCode).Raw() + body, err = f.ClientSet.CoreV1().RESTClient().Get().AbsPath(path).Do().StatusCode(&statusCode).Raw() d = time.Since(start) if len(body) > 0 { framework.Logf("(%v) %v: %s (%v; %v)", i, path, truncate(body, maxDisplayBodyLen), statusCode, d) diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 40fd89ee066..5d04e07b66f 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -1260,7 +1260,7 @@ var _ = SIGDescribe("Services", func() { By("Remove pods immediately") label := labels.SelectorFromSet(labels.Set(t.Labels)) options := metav1.ListOptions{LabelSelector: label.String()} - podClient := t.Client.Core().Pods(f.Namespace.Name) + podClient := t.Client.CoreV1().Pods(f.Namespace.Name) pods, err := podClient.List(options) if err != nil { framework.Logf("warning: error retrieving pods: %s", err) @@ -1396,7 +1396,7 @@ var _ = SIGDescribe("Services", func() { }) framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName) if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { - svc, err := jig.Client.Core().Services(namespace).Get(serviceName, metav1.GetOptions{}) + svc, err := jig.Client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1420,7 +1420,7 @@ var _ = SIGDescribe("Services", func() { }) framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName) if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { - svc, err := jig.Client.Core().Services(namespace).Get(serviceName, metav1.GetOptions{}) + svc, err := jig.Client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{}) if err != nil { return false, err } @@ -1618,7 +1618,7 @@ var _ = SIGDescribe("ESIPP [Slow]", func() { err := cs.CoreV1().Pods(namespace).Delete(execPodName, nil) Expect(err).NotTo(HaveOccurred()) }() - execPod, err := f.ClientSet.Core().Pods(namespace).Get(execPodName, metav1.GetOptions{}) + execPod, err := f.ClientSet.CoreV1().Pods(namespace).Get(execPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.Logf("Waiting up to %v wget %v", framework.KubeProxyLagTimeout, path) @@ -1750,10 +1750,10 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam }) defer func() { framework.Logf("Cleaning up the exec pod") - err := c.Core().Pods(ns).Delete(execPodName, nil) + err := c.CoreV1().Pods(ns).Delete(execPodName, nil) Expect(err).NotTo(HaveOccurred()) }() - execPod, err := f.ClientSet.Core().Pods(ns).Get(execPodName, metav1.GetOptions{}) + execPod, err := f.ClientSet.CoreV1().Pods(ns).Get(execPodName, metav1.GetOptions{}) framework.ExpectNoError(err) var stdout string diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go index 66b06de7a16..0961d95971f 100644 --- a/test/e2e/network/service_latency.go +++ b/test/e2e/network/service_latency.go @@ -74,9 +74,9 @@ var _ = SIGDescribe("Service endpoints latency", func() { ) // Turn off rate limiting--it interferes with our measurements. - oldThrottle := f.ClientSet.Core().RESTClient().GetRateLimiter() - f.ClientSet.Core().RESTClient().(*restclient.RESTClient).Throttle = flowcontrol.NewFakeAlwaysRateLimiter() - defer func() { f.ClientSet.Core().RESTClient().(*restclient.RESTClient).Throttle = oldThrottle }() + oldThrottle := f.ClientSet.CoreV1().RESTClient().GetRateLimiter() + f.ClientSet.CoreV1().RESTClient().(*restclient.RESTClient).Throttle = flowcontrol.NewFakeAlwaysRateLimiter() + defer func() { f.ClientSet.CoreV1().RESTClient().(*restclient.RESTClient).Throttle = oldThrottle }() failing := sets.NewString() d, err := runServiceLatencies(f, parallelTrials, totalTrials) @@ -286,11 +286,11 @@ func startEndpointWatcher(f *framework.Framework, q *endpointQueries) { _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - obj, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(options) + obj, err := f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).List(options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options) + return f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).Watch(options) }, }, &v1.Endpoints{}, @@ -335,7 +335,7 @@ func singleServiceLatency(f *framework.Framework, name string, q *endpointQuerie }, } startTime := time.Now() - gotSvc, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(svc) + gotSvc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc) if err != nil { return 0, err } diff --git a/test/e2e/network/serviceloadbalancers.go b/test/e2e/network/serviceloadbalancers.go index a70d01fd913..743b753512b 100644 --- a/test/e2e/network/serviceloadbalancers.go +++ b/test/e2e/network/serviceloadbalancers.go @@ -101,7 +101,7 @@ func (h *haproxyControllerTester) start(namespace string) (err error) { framework.Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args) } - rc, err = h.client.Core().ReplicationControllers(rc.Namespace).Create(rc) + rc, err = h.client.CoreV1().ReplicationControllers(rc.Namespace).Create(rc) if err != nil { return } @@ -115,7 +115,7 @@ func (h *haproxyControllerTester) start(namespace string) (err error) { labelSelector := labels.SelectorFromSet( labels.Set(map[string]string{"name": h.rcName})) options := metav1.ListOptions{LabelSelector: labelSelector.String()} - pods, err := h.client.Core().Pods(h.rcNamespace).List(options) + pods, err := h.client.CoreV1().Pods(h.rcNamespace).List(options) if err != nil { return err } @@ -139,7 +139,7 @@ func (h *haproxyControllerTester) start(namespace string) (err error) { } func (h *haproxyControllerTester) stop() error { - return h.client.Core().ReplicationControllers(h.rcNamespace).Delete(h.rcName, nil) + return h.client.CoreV1().ReplicationControllers(h.rcNamespace).Delete(h.rcName, nil) } func (h *haproxyControllerTester) lookup(ingressKey string) string { @@ -171,7 +171,7 @@ func (s *ingManager) start(namespace string) (err error) { Expect(err).NotTo(HaveOccurred()) rc.Namespace = namespace rc.Spec.Template.Labels["name"] = rc.Name - rc, err = s.client.Core().ReplicationControllers(rc.Namespace).Create(rc) + rc, err = s.client.CoreV1().ReplicationControllers(rc.Namespace).Create(rc) if err != nil { return } @@ -188,7 +188,7 @@ func (s *ingManager) start(namespace string) (err error) { svc, err = manifest.SvcFromManifest(svcPath) Expect(err).NotTo(HaveOccurred()) svc.Namespace = namespace - svc, err = s.client.Core().Services(svc.Namespace).Create(svc) + svc, err = s.client.CoreV1().Services(svc.Namespace).Create(svc) if err != nil { return } diff --git a/test/e2e/network_partition.go b/test/e2e/network_partition.go index 2b00b149807..10aa30cf6da 100644 --- a/test/e2e/network_partition.go +++ b/test/e2e/network_partition.go @@ -94,7 +94,7 @@ func podOnNode(podName, nodeName string, image string) *v1.Pod { } func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error { - pod, err := c.Core().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage)) + pod, err := c.CoreV1().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage)) if err == nil { framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) } else { @@ -142,14 +142,14 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] By("choose a node - we will block all network traffic on this node") var podOpts metav1.ListOptions nodeOpts := metav1.ListOptions{} - nodes, err := c.Core().Nodes().List(nodeOpts) + nodes, err := c.CoreV1().Nodes().List(nodeOpts) Expect(err).NotTo(HaveOccurred()) framework.FilterNodes(nodes, func(node v1.Node) bool { if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) { return false } podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} - pods, err := c.Core().Pods(metav1.NamespaceAll).List(podOpts) + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) if err != nil || len(pods.Items) <= 0 { return false } @@ -173,12 +173,12 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = nodeSelector.String() - obj, err := f.ClientSet.Core().Nodes().List(options) + obj, err := f.ClientSet.CoreV1().Nodes().List(options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = nodeSelector.String() - return f.ClientSet.Core().Nodes().Watch(options) + return f.ClientSet.CoreV1().Nodes().Watch(options) }, }, &v1.Node{}, @@ -245,11 +245,11 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] By("choose a node with at least one pod - we will block some network traffic on this node") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled + pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled Expect(err).NotTo(HaveOccurred()) nodeName := pods.Items[0].Spec.NodeName - node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // This creates a temporary network partition, verifies that 'podNameToDisappear', @@ -287,7 +287,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] // verify that it is really on the requested node { - pod, err := c.Core().Pods(ns).Get(additionalPod, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) if pod.Spec.NodeName != node.Name { framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) @@ -310,11 +310,11 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] By("choose a node with at least one pod - we will block some network traffic on this node") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled + pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled Expect(err).NotTo(HaveOccurred()) nodeName := pods.Items[0].Spec.NodeName - node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // This creates a temporary network partition, verifies that 'podNameToDisappear', @@ -351,7 +351,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] framework.SkipUnlessProviderIs("gce", "gke") By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name) headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels) - _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService) + _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) framework.ExpectNoError(err) c = f.ClientSet ns = f.Namespace.Name @@ -392,7 +392,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps) pod := pst.GetPodList(ps).Items[0] - node, err := c.Core().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) // Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear', @@ -432,11 +432,11 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] By("choose a node with at least one pod - we will block some network traffic on this node") options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled + pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled Expect(err).NotTo(HaveOccurred()) nodeName := pods.Items[0].Spec.NodeName - node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // This creates a temporary network partition, verifies that the job has 'parallelism' number of @@ -482,7 +482,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] return false } podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} - pods, err := c.Core().Pods(metav1.NamespaceAll).List(podOpts) + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) if err != nil || len(pods.Items) <= 0 { return false } @@ -496,7 +496,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] if err := framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil { framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) } - pods, err := c.Core().Pods(metav1.NamespaceAll).List(podOpts) + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) framework.ExpectNoError(err) podTolerationTimes := map[string]time.Duration{} // This test doesn't add tolerations by itself, but because they may be present in the cluster @@ -545,12 +545,12 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = nodeSelector.String() - obj, err := f.ClientSet.Core().Nodes().List(options) + obj, err := f.ClientSet.CoreV1().Nodes().List(options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = nodeSelector.String() - return f.ClientSet.Core().Nodes().Watch(options) + return f.ClientSet.CoreV1().Nodes().Watch(options) }, }, &v1.Node{}, @@ -601,7 +601,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow] sleepTime := maxTolerationTime + 20*time.Second By(fmt.Sprintf("Sleeping for %v and checking if all Pods were evicted", sleepTime)) time.Sleep(sleepTime) - pods, err = c.Core().Pods(v1.NamespaceAll).List(podOpts) + pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(podOpts) framework.ExpectNoError(err) seenRunning := []string{} for _, pod := range pods.Items { diff --git a/test/e2e/node/security_context.go b/test/e2e/node/security_context.go index 22062836c28..48c531ab923 100644 --- a/test/e2e/node/security_context.go +++ b/test/e2e/node/security_context.go @@ -167,7 +167,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) } pod.Spec.Containers[0].Command = []string{"sleep", "6000"} - client := f.ClientSet.Core().Pods(f.Namespace.Name) + client := f.ClientSet.CoreV1().Pods(f.Namespace.Name) pod, err := client.Create(pod) framework.ExpectNoError(err, "Error creating pod %v", pod) @@ -181,7 +181,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) Expect(err).To(BeNil()) Expect(content).To(ContainSubstring(testContent)) - foundPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // Confirm that the file can be accessed from a second diff --git a/test/e2e/pod_gc.go b/test/e2e/pod_gc.go index 11e0f9acfe1..6d9c52f4021 100644 --- a/test/e2e/pod_gc.go +++ b/test/e2e/pod_gc.go @@ -40,7 +40,7 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect pod, err := createTerminatingPod(f) pod.ResourceVersion = "" pod.Status.Phase = v1.PodFailed - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).UpdateStatus(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(pod) if err != nil { framework.Failf("err failing pod: %v", err) } @@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold)) pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) { - pods, err = f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { framework.Logf("Failed to list pod %v", err) return false, nil @@ -95,5 +95,5 @@ func createTerminatingPod(f *framework.Framework) (*v1.Pod, error) { SchedulerName: "please don't schedule my pods", }, } - return f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) } diff --git a/test/e2e/pre_stop.go b/test/e2e/pre_stop.go index 81233ee19ee..6b14d372e13 100644 --- a/test/e2e/pre_stop.go +++ b/test/e2e/pre_stop.go @@ -54,13 +54,13 @@ func testPreStop(c clientset.Interface, ns string) { }, } By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) - podDescr, err := c.Core().Pods(ns).Create(podDescr) + podDescr, err := c.CoreV1().Pods(ns).Create(podDescr) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. defer func() { By("Deleting the server pod") - c.Core().Pods(ns).Delete(podDescr.Name, nil) + c.CoreV1().Pods(ns).Delete(podDescr.Name, nil) }() By("Waiting for pods to come up.") @@ -69,7 +69,7 @@ func testPreStop(c clientset.Interface, ns string) { val := "{\"Source\": \"prestop\"}" - podOut, err := c.Core().Pods(ns).Get(podDescr.Name, metav1.GetOptions{}) + podOut, err := c.CoreV1().Pods(ns).Get(podDescr.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "getting pod info") preStopDescr := &v1.Pod{ @@ -97,7 +97,7 @@ func testPreStop(c clientset.Interface, ns string) { } By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) - preStopDescr, err = c.Core().Pods(ns).Create(preStopDescr) + preStopDescr, err = c.CoreV1().Pods(ns).Create(preStopDescr) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) deletePreStop := true @@ -105,7 +105,7 @@ func testPreStop(c clientset.Interface, ns string) { defer func() { if deletePreStop { By("Deleting the tester pod") - c.Core().Pods(ns).Delete(preStopDescr.Name, nil) + c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil) } }() @@ -114,7 +114,7 @@ func testPreStop(c clientset.Interface, ns string) { // Delete the pod with the preStop handler. By("Deleting pre-stop pod") - if err := c.Core().Pods(ns).Delete(preStopDescr.Name, nil); err == nil { + if err := c.CoreV1().Pods(ns).Delete(preStopDescr.Name, nil); err == nil { deletePreStop = false } framework.ExpectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) @@ -131,7 +131,7 @@ func testPreStop(c clientset.Interface, ns string) { var body []byte if subResourceProxyAvailable { - body, err = c.Core().RESTClient().Get(). + body, err = c.CoreV1().RESTClient().Get(). Context(ctx). Namespace(ns). Resource("pods"). @@ -140,7 +140,7 @@ func testPreStop(c clientset.Interface, ns string) { Suffix("read"). DoRaw() } else { - body, err = c.Core().RESTClient().Get(). + body, err = c.CoreV1().RESTClient().Get(). Context(ctx). Prefix("proxy"). Namespace(ns). diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index 1b99328af86..3f906d88bd5 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -234,7 +234,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration { // Print some data about Pod to Node allocation By("Printing Pod to Node allocation data") - podList, err := dtc.ClientSets[0].Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) + podList, err := dtc.ClientSets[0].CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) framework.ExpectNoError(err) pausePodAllocation := make(map[string]int) systemPodAllocation := make(map[string][]string) @@ -615,12 +615,12 @@ var _ = SIGDescribe("Density", func() { &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String() - obj, err := c.Core().Pods(nsName).List(options) + obj, err := c.CoreV1().Pods(nsName).List(options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String() - return c.Core().Pods(nsName).Watch(options) + return c.CoreV1().Pods(nsName).Watch(options) }, }, &v1.Pod{}, @@ -704,7 +704,7 @@ var _ = SIGDescribe("Density", func() { "source": v1.DefaultSchedulerName, }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - schedEvents, err := c.Core().Events(nsName).List(options) + schedEvents, err := c.CoreV1().Events(nsName).List(options) framework.ExpectNoError(err) for k := range createTimes { for _, event := range schedEvents.Items { @@ -822,7 +822,7 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, }, } for attempt := 1; attempt <= MaxLatencyPodCreationTries; attempt++ { - _, err := c.Core().ReplicationControllers(ns).Create(rc) + _, err := c.CoreV1().ReplicationControllers(ns).Create(rc) if err == nil || apierrs.IsAlreadyExists(err) { break } diff --git a/test/e2e/scalability/load.go b/test/e2e/scalability/load.go index 09f28f387ed..2c3223b39b0 100644 --- a/test/e2e/scalability/load.go +++ b/test/e2e/scalability/load.go @@ -197,7 +197,7 @@ var _ = SIGDescribe("Load capacity", func() { services := generateServicesForConfigs(configs) createService := func(i int) { defer GinkgoRecover() - _, err := clientset.Core().Services(services[i].Namespace).Create(services[i]) + _, err := clientset.CoreV1().Services(services[i].Namespace).Create(services[i]) framework.ExpectNoError(err) } workqueue.Parallelize(serviceOperationsParallelism, len(services), createService) @@ -206,7 +206,7 @@ var _ = SIGDescribe("Load capacity", func() { framework.Logf("Starting to delete services...") deleteService := func(i int) { defer GinkgoRecover() - err := clientset.Core().Services(services[i].Namespace).Delete(services[i].Name, nil) + err := clientset.CoreV1().Services(services[i].Namespace).Delete(services[i].Name, nil) framework.ExpectNoError(err) } workqueue.Parallelize(serviceOperationsParallelism, len(services), deleteService) @@ -566,7 +566,7 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling LabelSelector: selector.String(), ResourceVersion: "0", } - _, err := config.GetClient().Core().Pods(config.GetNamespace()).List(options) + _, err := config.GetClient().CoreV1().Pods(config.GetNamespace()).List(options) framework.ExpectNoError(err, fmt.Sprintf("listing pods from rc %v", config.GetName())) } diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index cbb52185f17..267acb072f0 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -47,11 +47,11 @@ var _ = SIGDescribe("LimitRange", func() { min, max, defaultLimit, defaultRequest, maxLimitRequestRatio) - limitRange, err := f.ClientSet.Core().LimitRanges(f.Namespace.Name).Create(limitRange) + limitRange, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange) Expect(err).NotTo(HaveOccurred()) By("Fetching the LimitRange to ensure it has proper values") - limitRange, err = f.ClientSet.Core().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{}) + limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{}) expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit} actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default} err = equalResourceRequirement(expected, actual) @@ -59,11 +59,11 @@ var _ = SIGDescribe("LimitRange", func() { By("Creating a Pod with no resource requirements") pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{}) - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring Pod has resource requirements applied from LimitRange") - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) for i := range pod.Spec.Containers { err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) @@ -76,11 +76,11 @@ var _ = SIGDescribe("LimitRange", func() { By("Creating a Pod with partial resource requirements") pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi"), getResourceList("300m", "")) - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring Pod has merged resource requirements applied from LimitRange") - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // This is an interesting case, so it's worth a comment // If you specify a Limit, and no Request, the Limit will default to the Request @@ -97,12 +97,12 @@ var _ = SIGDescribe("LimitRange", func() { By("Failing to create a Pod with less than min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi"), v1.ResourceList{}) - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) Expect(err).To(HaveOccurred()) By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi"), v1.ResourceList{}) - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) Expect(err).To(HaveOccurred()) }) diff --git a/test/e2e/scheduling/resource_quota.go b/test/e2e/scheduling/resource_quota.go index 74fe7099457..cc1772ab7b0 100644 --- a/test/e2e/scheduling/resource_quota.go +++ b/test/e2e/scheduling/resource_quota.go @@ -73,7 +73,7 @@ var _ = SIGDescribe("ResourceQuota", func() { By("Creating a Service") service := newTestServiceForQuota("test-service", v1.ServiceTypeClusterIP) - service, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(service) + service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(service) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures service creation") @@ -84,7 +84,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting a Service") - err = f.ClientSet.Core().Services(f.Namespace.Name).Delete(service.Name, nil) + err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(service.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") @@ -97,7 +97,7 @@ var _ = SIGDescribe("ResourceQuota", func() { By("Discovering how many secrets are in namespace by default") found, unchanged := 0, 0 wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { - secrets, err := f.ClientSet.Core().Secrets(f.Namespace.Name).List(metav1.ListOptions{}) + secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) if len(secrets.Items) == found { // loop until the number of secrets has stabilized for 5 seconds @@ -127,7 +127,7 @@ var _ = SIGDescribe("ResourceQuota", func() { By("Creating a Secret") secret := newTestSecretForQuota("test-secret") - secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret) + secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures secret creation") @@ -139,7 +139,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting a secret") - err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secret.Name, nil) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") @@ -168,12 +168,12 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceMemory] = resource.MustParse("252Mi") pod := newTestPodForQuota(f, podName, requests, v1.ResourceList{}) pod.Initializers = &metav1.Initializers{Pending: []metav1.Initializer{{Name: "unhandled"}}} - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) // because no one is handling the initializer, server will return a 504 timeout if err != nil && !errors.IsTimeout(err) { framework.Failf("expect err to be timeout error, got %v", err) } - createdPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(podName, metav1.GetOptions{}) + createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) By("Ensuring only pod count is charged") @@ -215,7 +215,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(createdPod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(createdPod.Name, metav1.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released the pod usage") @@ -233,12 +233,12 @@ var _ = SIGDescribe("ResourceQuota", func() { podName = "too-large-pod" pod = newTestPodForQuota(f, podName, requests, v1.ResourceList{}) pod.Initializers = &metav1.Initializers{Pending: []metav1.Initializer{{Name: "unhandled"}}} - _, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) // because no one is handling the initializer, server will return a 504 timeout if err != nil && !errors.IsTimeout(err) { framework.Failf("expect err to be timeout error, got %v", err) } - createdPod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Get(podName, metav1.GetOptions{}) + createdPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) By("Ensuring only charges pod count") @@ -262,7 +262,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(createdPod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(createdPod.Name, metav1.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring ResourceQuota status doesn't change") @@ -293,7 +293,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceCPU] = resource.MustParse("500m") requests[v1.ResourceMemory] = resource.MustParse("252Mi") pod := newTestPodForQuota(f, podName, requests, v1.ResourceList{}) - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) podToUpdate := pod @@ -310,7 +310,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceCPU] = resource.MustParse("600m") requests[v1.ResourceMemory] = resource.MustParse("100Mi") pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{}) - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) Expect(err).To(HaveOccurred()) By("Ensuring a pod cannot update its resource requirements") @@ -319,7 +319,7 @@ var _ = SIGDescribe("ResourceQuota", func() { requests[v1.ResourceCPU] = resource.MustParse("100m") requests[v1.ResourceMemory] = resource.MustParse("100Mi") podToUpdate.Spec.Containers[0].Resources.Requests = requests - _, err = f.ClientSet.Core().Pods(f.Namespace.Name).Update(podToUpdate) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(podToUpdate) Expect(err).To(HaveOccurred()) By("Ensuring attempts to update pod resource requirements did not change quota usage") @@ -327,7 +327,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released the pod usage") @@ -354,7 +354,7 @@ var _ = SIGDescribe("ResourceQuota", func() { By("Creating a ConfigMap") configMap := newTestConfigMapForQuota("test-configmap") - configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap) + configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures configMap creation") @@ -365,7 +365,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting a ConfigMap") - err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil) + err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") @@ -390,7 +390,7 @@ var _ = SIGDescribe("ResourceQuota", func() { By("Creating a ReplicationController") replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0) - replicationController, err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(replicationController) + replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(replicationController) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures replication controller creation") @@ -400,7 +400,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting a ReplicationController") - err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Delete(replicationController.Name, nil) + err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(replicationController.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") @@ -426,7 +426,7 @@ var _ = SIGDescribe("ResourceQuota", func() { By("Creating a PersistentVolumeClaim") pvc := newTestPersistentVolumeClaimForQuota("test-claim") - pvc, err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Create(pvc) + pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(pvc) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures persistent volume claim creation") @@ -437,7 +437,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting a PersistentVolumeClaim") - err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil) + err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") @@ -468,7 +468,7 @@ var _ = SIGDescribe("ResourceQuota", func() { By("Creating a PersistentVolumeClaim with storage class") pvc := newTestPersistentVolumeClaimForQuota("test-claim") pvc.Spec.StorageClassName = &classGold - pvc, err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Create(pvc) + pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(pvc) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status captures persistent volume claim creation") @@ -482,7 +482,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting a PersistentVolumeClaim") - err = f.ClientSet.Core().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil) + err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released usage") @@ -525,7 +525,7 @@ var _ = SIGDescribe("ResourceQuota", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod := newTestPodForQuota(f, podName, requests, limits) - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with not terminating scope captures the pod usage") @@ -547,7 +547,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released the pod usage") @@ -564,7 +564,7 @@ var _ = SIGDescribe("ResourceQuota", func() { pod = newTestPodForQuota(f, podName, requests, limits) activeDeadlineSeconds := int64(3600) pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with terminating scope captures the pod usage") @@ -586,7 +586,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released the pod usage") @@ -620,7 +620,7 @@ var _ = SIGDescribe("ResourceQuota", func() { By("Creating a best-effort pod") pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{}) - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with best effort scope captures the pod usage") @@ -634,7 +634,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released the pod usage") @@ -650,7 +650,7 @@ var _ = SIGDescribe("ResourceQuota", func() { limits[v1.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceMemory] = resource.MustParse("400Mi") pod = newTestPodForQuota(f, "burstable-pod", requests, limits) - pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota with not best effort scope captures the pod usage") @@ -664,7 +664,7 @@ var _ = SIGDescribe("ResourceQuota", func() { Expect(err).NotTo(HaveOccurred()) By("Deleting the pod") - err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)) Expect(err).NotTo(HaveOccurred()) By("Ensuring resource quota status released the pod usage") @@ -826,18 +826,18 @@ func newTestSecretForQuota(name string) *v1.Secret { // createResourceQuota in the specified namespace func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) { - return c.Core().ResourceQuotas(namespace).Create(resourceQuota) + return c.CoreV1().ResourceQuotas(namespace).Create(resourceQuota) } // deleteResourceQuota with the specified name func deleteResourceQuota(c clientset.Interface, namespace, name string) error { - return c.Core().ResourceQuotas(namespace).Delete(name, nil) + return c.CoreV1().ResourceQuotas(namespace).Delete(name, nil) } // wait for resource quota status to show the expected used resources value func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.ResourceList) error { return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) { - resourceQuota, err := c.Core().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) + resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 15a205c0f5c..653c2a0f970 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -70,7 +70,7 @@ var _ = SIGDescribe("EmptyDir wrapper volumes", func() { } var err error - if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { framework.Failf("unable to create test secret %s: %v", secret.Name, err) } @@ -126,11 +126,11 @@ var _ = SIGDescribe("EmptyDir wrapper volumes", func() { defer func() { By("Cleaning up the secret") - if err := f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil { framework.Failf("unable to delete secret %v: %v", secret.Name, err) } By("Cleaning up the git vol pod") - if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err) } }() @@ -218,17 +218,17 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle }, } - if gitServerSvc, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(gitServerSvc); err != nil { + if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(gitServerSvc); err != nil { framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) } return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() { By("Cleaning up the git server pod") - if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { + if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) } By("Cleaning up the git server svc") - if err := f.ClientSet.Core().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil { + if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil { framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) } } @@ -268,7 +268,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { "data-1": "value-1", }, } - _, err := f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap) + _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap) framework.ExpectNoError(err) } return @@ -277,7 +277,7 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) { func deleteConfigMaps(f *framework.Framework, configMapNames []string) { By("Cleaning up the configMaps") for _, configMapName := range configMapNames { - err := f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil) + err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil) Expect(err).NotTo(HaveOccurred(), "unable to delete configMap %v", configMapName) } } @@ -368,7 +368,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume }, }, } - _, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rc) + _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rc) Expect(err).NotTo(HaveOccurred(), "error creating replication controller") defer func() { diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index 602ec182f70..b7bfb583ad7 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -69,8 +69,8 @@ var _ = SIGDescribe("Pod Disks", func() { cs = f.ClientSet ns = f.Namespace.Name - podClient = cs.Core().Pods(ns) - nodeClient = cs.Core().Nodes() + podClient = cs.CoreV1().Pods(ns) + nodeClient = cs.CoreV1().Nodes() nodes = framework.GetReadySchedulableNodesOrDie(cs) Expect(len(nodes.Items)).To(BeNumerically(">=", minNodes), fmt.Sprintf("Requires at least %d nodes", minNodes)) host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name) diff --git a/test/e2e/storage/persistent_volumes-disruptive.go b/test/e2e/storage/persistent_volumes-disruptive.go index dd70136fa40..a36abd21af3 100644 --- a/test/e2e/storage/persistent_volumes-disruptive.go +++ b/test/e2e/storage/persistent_volumes-disruptive.go @@ -281,7 +281,7 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew } }() By(fmt.Sprintf("Deleting Pod %q", clientPod.Name)) - err = c.Core().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) By("Starting the kubelet and waiting for pod to delete.") kubeletCommand(kStart, c, clientPod) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 989e3b0b4b5..6744fc78cb5 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -329,13 +329,13 @@ var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [S // Create a persistent volume claim for local volume: the above volume will be bound. By("Creating a persistent volume claim") - claim, err := config.client.Core().PersistentVolumeClaims(config.ns).Create(newLocalClaim(config)) + claim, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(newLocalClaim(config)) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForPersistentVolumeClaimPhase( v1.ClaimBound, config.client, claim.Namespace, claim.Name, framework.Poll, 1*time.Minute) Expect(err).NotTo(HaveOccurred()) - claim, err = config.client.Core().PersistentVolumeClaims(config.ns).Get(claim.Name, metav1.GetOptions{}) + claim, err = config.client.CoreV1().PersistentVolumeClaims(config.ns).Get(claim.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(claim.Spec.VolumeName).To(Equal(oldPV.Name)) @@ -344,7 +344,7 @@ var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [S writeCmd, _ := createWriteAndReadCmds(volumePath, testFile, testFileContent) err = framework.IssueSSHCommand(writeCmd, framework.TestContext.Provider, config.node0) Expect(err).NotTo(HaveOccurred()) - err = config.client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, &metav1.DeleteOptions{}) + err = config.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, &metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) By("Waiting for a new PersistentVolume to be re-created") @@ -388,7 +388,7 @@ func checkPodEvents(config *localTestConfig, podName string, ep *eventPatterns) "reason": ep.reason, }.AsSelector().String() options := metav1.ListOptions{FieldSelector: selector} - events, err := config.client.Core().Events(config.ns).List(options) + events, err := config.client.CoreV1().Events(config.ns).List(options) Expect(err).NotTo(HaveOccurred()) Expect(len(events.Items)).NotTo(Equal(0)) for _, p := range ep.pattern { @@ -482,7 +482,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum // podNode wraps RunKubectl to get node where pod is running func podNodeName(config *localTestConfig, pod *v1.Pod) (string, error) { - runtimePod, runtimePodErr := config.client.Core().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + runtimePod, runtimePodErr := config.client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) return runtimePod.Spec.NodeName, runtimePodErr } @@ -735,7 +735,7 @@ func cleanupLocalVolumeProvisioner(config *localTestConfig, volumePath string) { By("Cleaning up persistent volume") pv, err := findLocalPersistentVolume(config.client, volumePath) Expect(err).NotTo(HaveOccurred()) - err = config.client.Core().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{}) + err = config.client.CoreV1().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) } @@ -884,7 +884,7 @@ func waitForLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1 var pv *v1.PersistentVolume for start := time.Now(); time.Since(start) < 10*time.Minute && pv == nil; time.Sleep(5 * time.Second) { - pvs, err := c.Core().PersistentVolumes().List(metav1.ListOptions{}) + pvs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{}) if err != nil { return nil, err } @@ -910,7 +910,7 @@ func waitForLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1 // findLocalPersistentVolume finds persistent volume with 'spec.local.path' equals 'volumePath'. func findLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1.PersistentVolume, error) { - pvs, err := c.Core().PersistentVolumes().List(metav1.ListOptions{}) + pvs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/test/e2e/ui/dashboard.go b/test/e2e/ui/dashboard.go index 9e416bebde8..ebf75fe561a 100644 --- a/test/e2e/ui/dashboard.go +++ b/test/e2e/ui/dashboard.go @@ -55,7 +55,7 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() { By("Checking to make sure we get a response from the kubernetes-dashboard.") err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) { var status int - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get()) + proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { framework.Logf("Get services proxy request failed: %v", errProxy) } @@ -87,7 +87,7 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() { By("Checking that the ApiServer /ui endpoint redirects to a valid server.") var status int - err = f.ClientSet.Core().RESTClient().Get(). + err = f.ClientSet.CoreV1().RESTClient().Get(). AbsPath("/ui"). Timeout(framework.SingleCallTimeout). Do(). diff --git a/test/e2e/upgrades/apparmor.go b/test/e2e/upgrades/apparmor.go index dac14f83fc9..b0f2bd2ab9d 100644 --- a/test/e2e/upgrades/apparmor.go +++ b/test/e2e/upgrades/apparmor.go @@ -99,7 +99,7 @@ func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) { By("Verifying nodes are AppArmor enabled") - nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err, "Failed to list nodes") for _, node := range nodes.Items { Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{ diff --git a/test/e2e/upgrades/configmaps.go b/test/e2e/upgrades/configmaps.go index af39c3b1b1f..4abbeb31353 100644 --- a/test/e2e/upgrades/configmaps.go +++ b/test/e2e/upgrades/configmaps.go @@ -56,7 +56,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) { By("Creating a ConfigMap") var err error - if t.configMap, err = f.ClientSet.Core().ConfigMaps(ns.Name).Create(t.configMap); err != nil { + if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil { framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err) } diff --git a/test/e2e/upgrades/kube_proxy_migration.go b/test/e2e/upgrades/kube_proxy_migration.go index 030cc41e18e..aad911144b2 100644 --- a/test/e2e/upgrades/kube_proxy_migration.go +++ b/test/e2e/upgrades/kube_proxy_migration.go @@ -210,7 +210,7 @@ func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error { func getKubeProxyStaticPods(c clientset.Interface) (*v1.PodList, error) { label := labels.SelectorFromSet(labels.Set(map[string]string{clusterComponentKey: kubeProxyLabelName})) listOpts := metav1.ListOptions{LabelSelector: label.String()} - return c.Core().Pods(metav1.NamespaceSystem).List(listOpts) + return c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts) } func getKubeProxyDaemonSet(c clientset.Interface) (*extensions.DaemonSetList, error) { diff --git a/test/e2e/upgrades/secrets.go b/test/e2e/upgrades/secrets.go index 807c47f6c1a..85c067e1983 100644 --- a/test/e2e/upgrades/secrets.go +++ b/test/e2e/upgrades/secrets.go @@ -54,7 +54,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) { By("Creating a secret") var err error - if t.secret, err = f.ClientSet.Core().Secrets(ns.Name).Create(t.secret); err != nil { + if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil { framework.Failf("unable to create test secret %s: %v", t.secret.Name, err) } diff --git a/test/e2e/upgrades/sysctl.go b/test/e2e/upgrades/sysctl.go index 5ef0422ee3f..a8334322ec6 100644 --- a/test/e2e/upgrades/sysctl.go +++ b/test/e2e/upgrades/sysctl.go @@ -54,13 +54,13 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u switch upgrade { case MasterUpgrade: By("Checking the safe sysctl pod keeps running on master upgrade") - pod, err := f.ClientSet.Core().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(pod.Status.Phase).To(Equal(v1.PodRunning)) } By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade") - pod, err := f.ClientSet.Core().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { Expect(err).NotTo(HaveOccurred()) } diff --git a/test/e2e_node/benchmark_util.go b/test/e2e_node/benchmark_util.go index fc4fd9555f7..bcaf924830a 100644 --- a/test/e2e_node/benchmark_util.go +++ b/test/e2e_node/benchmark_util.go @@ -154,7 +154,7 @@ func getThroughputPerfData(batchLag time.Duration, e2eLags []framework.PodLatenc // name of the node, and the node capacities. func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[string]string { nodeName := framework.TestContext.NodeName - node, err := f.ClientSet.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) cpu, ok := node.Status.Capacity[v1.ResourceCPU] diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index bf22f9d5f4b..31b6e5b49f0 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -83,7 +83,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() { f.PodClientNS(kubeapi.NamespaceSystem).CreateSyncInNamespace(criticalPod, kubeapi.NamespaceSystem) // Check that non-critical pods other than the besteffort have been evicted - updatedPodList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}) + updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) framework.ExpectNoError(err) for _, p := range updatedPodList.Items { if p.Name == nonCriticalBestEffort.Name { @@ -108,7 +108,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() { }) func getNodeCPUAndMemoryCapacity(f *framework.Framework) v1.ResourceList { - nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) // Assuming that there is only one node, because this is a node e2e test. Expect(len(nodeList.Items)).To(Equal(1)) diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index addf9bbe7b6..ab459dd240e 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -498,12 +498,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() - obj, err := f.ClientSet.Core().Pods(ns).List(options) + obj, err := f.ClientSet.CoreV1().Pods(ns).List(options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() - return f.ClientSet.Core().Pods(ns).Watch(options) + return f.ClientSet.CoreV1().Pods(ns).Watch(options) }, }, &v1.Pod{}, diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index 530f0bcc686..4303842ebf9 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -316,7 +316,7 @@ func getRestartingContainerCommand(path string, containerNum int, restarts int32 } func verifyPodRestartCount(f *framework.Framework, podName string, expectedNumContainers int, expectedRestartCount int32) error { - updatedPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(podName, metav1.GetOptions{}) + updatedPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/e2e_node/gpus.go b/test/e2e_node/gpus.go index ee1f6e82d92..41c364db619 100644 --- a/test/e2e_node/gpus.go +++ b/test/e2e_node/gpus.go @@ -33,7 +33,7 @@ import ( ) func getGPUsAvailable(f *framework.Framework) int64 { - nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err, "getting node list") var gpusAvailable int64 for _, node := range nodeList.Items { @@ -43,7 +43,7 @@ func getGPUsAvailable(f *framework.Framework) int64 { } func gpusExistOnAllNodes(f *framework.Framework) bool { - nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err, "getting node list") for _, node := range nodeList.Items { if node.Name == "kubernetes-master" { @@ -107,7 +107,7 @@ var _ = framework.KubeDescribe("GPU [Serial]", func() { By("Checking the containers in the pod had restarted at-least twice successfully thereby ensuring GPUs are reused") const minContainerRestartCount = 2 Eventually(func() bool { - p, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(podSuccess.Name, metav1.GetOptions{}) + p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(podSuccess.Name, metav1.GetOptions{}) if err != nil { framework.Logf("failed to get pod status: %v", err) return false diff --git a/test/e2e_node/memory_eviction_test.go b/test/e2e_node/memory_eviction_test.go index 1059b62b2ca..1c27d36bbf8 100644 --- a/test/e2e_node/memory_eviction_test.go +++ b/test/e2e_node/memory_eviction_test.go @@ -63,7 +63,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu // Wait for the memory pressure condition to disappear from the node status before continuing. By("waiting for the memory pressure condition on the node to disappear before ending the test.") Eventually(func() error { - nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return fmt.Errorf("tried to get node list but got error: %v", err) } @@ -169,15 +169,15 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu By("polling the Status.Phase of each pod and checking for violations of the eviction order.") Eventually(func() error { - gteed, gtErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(guaranteed.Name, metav1.GetOptions{}) + gteed, gtErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(guaranteed.Name, metav1.GetOptions{}) framework.ExpectNoError(gtErr, fmt.Sprintf("getting pod %s", guaranteed.Name)) gteedPh := gteed.Status.Phase - burst, buErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(burstable.Name, metav1.GetOptions{}) + burst, buErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(burstable.Name, metav1.GetOptions{}) framework.ExpectNoError(buErr, fmt.Sprintf("getting pod %s", burstable.Name)) burstPh := burst.Status.Phase - best, beErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(besteffort.Name, metav1.GetOptions{}) + best, beErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(besteffort.Name, metav1.GetOptions{}) framework.ExpectNoError(beErr, fmt.Sprintf("getting pod %s", besteffort.Name)) bestPh := best.Status.Phase @@ -193,7 +193,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu // see the eviction manager reporting a pressure condition for a while without the besteffort failing, // and we see that the manager did in fact evict the besteffort (this should be in the Kubelet log), we // will have more reason to believe the phase is out of date. - nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { glog.Errorf("tried to get node list but got error: %v", err) } diff --git a/test/e2e_node/mirror_pod_test.go b/test/e2e_node/mirror_pod_test.go index a56c8895905..5f2a4030223 100644 --- a/test/e2e_node/mirror_pod_test.go +++ b/test/e2e_node/mirror_pod_test.go @@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() { }) It("should be updated when static pod updated [Conformance]", func() { By("get mirror pod uid") - pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) uid := pod.UID @@ -74,19 +74,19 @@ var _ = framework.KubeDescribe("MirrorPod", func() { }, 2*time.Minute, time.Second*4).Should(BeNil()) By("check the mirror pod container image is updated") - pod, err = f.ClientSet.Core().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) + pod, err = f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) Expect(len(pod.Spec.Containers)).Should(Equal(1)) Expect(pod.Spec.Containers[0].Image).Should(Equal(image)) }) It("should be recreated when mirror pod gracefully deleted [Conformance]", func() { By("get mirror pod uid") - pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) uid := pod.UID By("delete the mirror pod with grace period 30s") - err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(30)) + err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(30)) Expect(err).ShouldNot(HaveOccurred()) By("wait for the mirror pod to be recreated") @@ -96,12 +96,12 @@ var _ = framework.KubeDescribe("MirrorPod", func() { }) It("should be recreated when mirror pod forcibly deleted [Conformance]", func() { By("get mirror pod uid") - pod, err := f.ClientSet.Core().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) uid := pod.UID By("delete the mirror pod with grace period 0s") - err = f.ClientSet.Core().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(0)) + err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(0)) Expect(err).ShouldNot(HaveOccurred()) By("wait for the mirror pod to be recreated") @@ -158,7 +158,7 @@ func deleteStaticPod(dir, name, namespace string) error { } func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error { - _, err := cl.Core().Pods(namespace).Get(name, metav1.GetOptions{}) + _, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) if errors.IsNotFound(err) { return nil } @@ -166,7 +166,7 @@ func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) err } func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error { - pod, err := cl.Core().Pods(namespace).Get(name, metav1.GetOptions{}) + pod, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) } @@ -177,7 +177,7 @@ func checkMirrorPodRunning(cl clientset.Interface, name, namespace string) error } func checkMirrorPodRecreatedAndRunnig(cl clientset.Interface, name, namespace string, oUID types.UID) error { - pod, err := cl.Core().Pods(namespace).Get(name, metav1.GetOptions{}) + pod, err := cl.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err) } diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index 9f903ab30c1..31d7432bfa5 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -176,7 +176,7 @@ func runTest(f *framework.Framework) error { return fmt.Errorf("Expected Node Allocatable Cgroup Does not exist") } // TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings. - nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{}) + nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return err } diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index dea4b7fe4b1..3c9b6453ec9 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { By("Create the test log file") Expect(err).NotTo(HaveOccurred()) By("Create config map for the node problem detector") - _, err = c.Core().ConfigMaps(ns).Create(&v1.ConfigMap{ + _, err = c.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: configName}, Data: map[string]string{path.Base(configFile): config}, }) @@ -331,20 +331,20 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { By(fmt.Sprintf("Wait for %d events generated", test.events)) Eventually(func() error { - return verifyEvents(c.Core().Events(eventNamespace), eventListOptions, test.events, tempReason, tempMessage) + return verifyEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.events, tempReason, tempMessage) }, pollTimeout, pollInterval).Should(Succeed()) By(fmt.Sprintf("Make sure only %d events generated", test.events)) Consistently(func() error { - return verifyEvents(c.Core().Events(eventNamespace), eventListOptions, test.events, tempReason, tempMessage) + return verifyEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.events, tempReason, tempMessage) }, pollConsistent, pollInterval).Should(Succeed()) By(fmt.Sprintf("Make sure node condition %q is set", condition)) Eventually(func() error { - return verifyNodeCondition(c.Core().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage) + return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage) }, pollTimeout, pollInterval).Should(Succeed()) By(fmt.Sprintf("Make sure node condition %q is stable", condition)) Consistently(func() error { - return verifyNodeCondition(c.Core().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage) + return verifyNodeCondition(c.CoreV1().Nodes(), condition, test.conditionType, test.conditionReason, test.conditionMessage) }, pollConsistent, pollInterval).Should(Succeed()) } }) @@ -361,12 +361,12 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() { By("Wait for the node problem detector to disappear") Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed()) By("Delete the config map") - c.Core().ConfigMaps(ns).Delete(configName, nil) + c.CoreV1().ConfigMaps(ns).Delete(configName, nil) By("Clean up the events") - Expect(c.Core().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(Succeed()) + Expect(c.CoreV1().Events(eventNamespace).DeleteCollection(metav1.NewDeleteOptions(0), eventListOptions)).To(Succeed()) By("Clean up the node condition") patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition)) - c.Core().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do() + c.CoreV1().RESTClient().Patch(types.StrategicMergePatchType).Resource("nodes").Name(framework.TestContext.NodeName).SubResource("status").Body(patch).Do() }) }) }) diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index 5e3ab6b7b30..a263d4c2749 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -318,9 +318,9 @@ while true; do sleep 1; done if testCase.secret { secret.Name = "image-pull-secret-" + string(uuid.NewUUID()) By("create image pull secret") - _, err := f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret) + _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) Expect(err).NotTo(HaveOccurred()) - defer f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secret.Name, nil) + defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil) container.ImagePullSecrets = []string{secret.Name} } // checkContainerStatus checks whether the container status matches expectation. diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index e9fc1a653a8..de6f25d02e8 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -279,7 +279,7 @@ func decodeConfigz(resp *http.Response) (*kubeletconfig.KubeletConfiguration, er // creates a configmap containing kubeCfg in kube-system namespace func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*apiv1.ConfigMap, error) { cmap := newKubeletConfigMap("testcfg", internalKC) - cmap, err := f.ClientSet.Core().ConfigMaps("kube-system").Create(cmap) + cmap, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cmap) if err != nil { return nil, err } diff --git a/test/integration/apiserver/patch_test.go b/test/integration/apiserver/patch_test.go index 4cb6b726824..56bf90919c9 100644 --- a/test/integration/apiserver/patch_test.go +++ b/test/integration/apiserver/patch_test.go @@ -44,14 +44,14 @@ func TestPatchConflicts(t *testing.T) { defer framework.DeleteTestingNamespace(ns, s, t) // Create the object we're going to conflict on - clientSet.Core().Secrets(ns.Name).Create(&v1.Secret{ + clientSet.CoreV1().Secrets(ns.Name).Create(&v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test", // Populate annotations so the strategic patch descends, compares, and notices the $patch directive Annotations: map[string]string{"initial": "value"}, }, }) - client := clientSet.Core().RESTClient() + client := clientSet.CoreV1().RESTClient() successes := int32(0) diff --git a/test/integration/client/client_test.go b/test/integration/client/client_test.go index c59049387b6..928693f8485 100644 --- a/test/integration/client/client_test.go +++ b/test/integration/client/client_test.go @@ -789,20 +789,20 @@ func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace s }, }, } - pod, err := c.Core().Pods(namespace).Create(&podBody) + pod, err := c.CoreV1().Pods(namespace).Create(&podBody) if err != nil { t.Fatalf("Failed creating selflinktest pod: %v", err) } - if err = c.Core().RESTClient().Get().RequestURI(pod.SelfLink).Do().Into(pod); err != nil { + if err = c.CoreV1().RESTClient().Get().RequestURI(pod.SelfLink).Do().Into(pod); err != nil { t.Errorf("Failed listing pod with supplied self link '%v': %v", pod.SelfLink, err) } - podList, err := c.Core().Pods(namespace).List(metav1.ListOptions{}) + podList, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{}) if err != nil { t.Errorf("Failed listing pods: %v", err) } - if err = c.Core().RESTClient().Get().RequestURI(podList.SelfLink).Do().Into(podList); err != nil { + if err = c.CoreV1().RESTClient().Get().RequestURI(podList.SelfLink).Do().Into(podList); err != nil { t.Errorf("Failed listing pods with supplied self link '%v': %v", podList.SelfLink, err) } @@ -813,7 +813,7 @@ func runSelfLinkTestOnNamespace(t *testing.T, c clientset.Interface, namespace s continue } found = true - err = c.Core().RESTClient().Get().RequestURI(item.SelfLink).Do().Into(pod) + err = c.CoreV1().RESTClient().Get().RequestURI(item.SelfLink).Do().Into(pod) if err != nil { t.Errorf("Failed listing pod with supplied self link '%v': %v", item.SelfLink, err) } diff --git a/test/integration/configmap/configmap_test.go b/test/integration/configmap/configmap_test.go index f115d627697..25600f7bc53 100644 --- a/test/integration/configmap/configmap_test.go +++ b/test/integration/configmap/configmap_test.go @@ -56,7 +56,7 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) }, } - if _, err := client.Core().ConfigMaps(cfg.Namespace).Create(&cfg); err != nil { + if _, err := client.CoreV1().ConfigMaps(cfg.Namespace).Create(&cfg); err != nil { t.Errorf("unable to create test configMap: %v", err) } defer deleteConfigMapOrErrorf(t, client, cfg.Namespace, cfg.Name) @@ -111,14 +111,14 @@ func DoTestConfigMap(t *testing.T, client clientset.Interface, ns *v1.Namespace) } pod.ObjectMeta.Name = "uses-configmap" - if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) } func deleteConfigMapOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.Core().ConfigMaps(ns).Delete(name, nil); err != nil { + if err := c.CoreV1().ConfigMaps(ns).Delete(name, nil); err != nil { t.Errorf("unable to delete ConfigMap %v: %v", name, err) } } diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go index 7d771d1a759..9b90d84c485 100644 --- a/test/integration/deployment/util.go +++ b/test/integration/deployment/util.go @@ -216,7 +216,7 @@ func (d *deploymentTester) markAllPodsReady() { var readyPods int32 err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { readyPods = 0 - pods, err := d.c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()}) + pods, err := d.c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { d.t.Logf("failed to list Deployment pods, will retry later: %v", err) return false, nil diff --git a/test/integration/evictions/evictions_test.go b/test/integration/evictions/evictions_test.go index b0ded14413f..5a7ba8451d5 100644 --- a/test/integration/evictions/evictions_test.go +++ b/test/integration/evictions/evictions_test.go @@ -75,12 +75,12 @@ func TestConcurrentEvictionRequests(t *testing.T) { podName := fmt.Sprintf(podNameFormat, i) pod := newPod(podName) - if _, err := clientSet.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := clientSet.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } addPodConditionReady(pod) - if _, err := clientSet.Core().Pods(ns.Name).UpdateStatus(pod); err != nil { + if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil { t.Fatal(err) } } @@ -124,7 +124,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { // should not return here otherwise we would leak the pod } - _, err = clientSet.Core().Pods(ns.Name).Get(podName, metav1.GetOptions{}) + _, err = clientSet.CoreV1().Pods(ns.Name).Get(podName, metav1.GetOptions{}) switch { case errors.IsNotFound(err): atomic.AddUint32(&numberPodsEvicted, 1) @@ -138,7 +138,7 @@ func TestConcurrentEvictionRequests(t *testing.T) { } // delete pod which still exists due to error - e := clientSet.Core().Pods(ns.Name).Delete(podName, deleteOption) + e := clientSet.CoreV1().Pods(ns.Name).Delete(podName, deleteOption) if e != nil { errCh <- e } diff --git a/test/integration/framework/perf_utils.go b/test/integration/framework/perf_utils.go index 966a271dca9..e6d91e71498 100644 --- a/test/integration/framework/perf_utils.go +++ b/test/integration/framework/perf_utils.go @@ -73,7 +73,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error { }, } for i := 0; i < numNodes; i++ { - if _, err := p.client.Core().Nodes().Create(baseNode); err != nil { + if _, err := p.client.CoreV1().Nodes().Create(baseNode); err != nil { glog.Fatalf("Error creating node: %v", err) } } @@ -96,7 +96,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error { func (p *IntegrationTestNodePreparer) CleanupNodes() error { nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client) for i := range nodes.Items { - if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil { + if err := p.client.CoreV1().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil { glog.Errorf("Error while deleting Node: %v", err) } } diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index 5bd613f5e34..030951e82b3 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -288,11 +288,11 @@ func setup(t *testing.T, workerCount int) *testContext { func createNamespaceOrDie(name string, c clientset.Interface, t *testing.T) *v1.Namespace { ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} - if _, err := c.Core().Namespaces().Create(ns); err != nil { + if _, err := c.CoreV1().Namespaces().Create(ns); err != nil { t.Fatalf("failed to create namespace: %v", err) } falseVar := false - _, err := c.Core().ServiceAccounts(ns.Name).Create(&v1.ServiceAccount{ + _, err := c.CoreV1().ServiceAccounts(ns.Name).Create(&v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{Name: "default"}, AutomountServiceAccountToken: &falseVar, }) @@ -305,7 +305,7 @@ func createNamespaceOrDie(name string, c clientset.Interface, t *testing.T) *v1. func deleteNamespaceOrDie(name string, c clientset.Interface, t *testing.T) { zero := int64(0) background := metav1.DeletePropagationBackground - err := c.Core().Namespaces().Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}) + err := c.CoreV1().Namespaces().Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &zero, PropagationPolicy: &background}) if err != nil { t.Fatalf("failed to delete namespace %q: %v", name, err) } @@ -321,8 +321,8 @@ func TestCascadingDeletion(t *testing.T) { ns := createNamespaceOrDie("gc-cascading-deletion", clientSet, t) defer deleteNamespaceOrDie(ns.Name, clientSet, t) - rcClient := clientSet.Core().ReplicationControllers(ns.Name) - podClient := clientSet.Core().Pods(ns.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name)) if err != nil { @@ -408,7 +408,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) { ns := createNamespaceOrDie("gc-non-existing-owner", clientSet, t) defer deleteNamespaceOrDie(ns.Name, clientSet, t) - podClient := clientSet.Core().Pods(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) pod := newPod(garbageCollectedPodName, ns.Name, []metav1.OwnerReference{{UID: "doesn't matter", Name: toBeDeletedRCName}}) _, err := podClient.Create(pod) @@ -432,8 +432,8 @@ func TestCreateWithNonExistentOwner(t *testing.T) { func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options *metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) { defer wg.Done() - rcClient := clientSet.Core().ReplicationControllers(namespace) - podClient := clientSet.Core().Pods(namespace) + rcClient := clientSet.CoreV1().ReplicationControllers(namespace) + podClient := clientSet.CoreV1().Pods(namespace) // create rc. rcName := "test.rc." + nameSuffix rc := newOwnerRC(rcName, namespace) @@ -468,8 +468,8 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet } func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) { - rcClient := clientSet.Core().ReplicationControllers(namespace) - podClient := clientSet.Core().Pods(namespace) + rcClient := clientSet.CoreV1().ReplicationControllers(namespace) + podClient := clientSet.CoreV1().Pods(namespace) pods, err := podClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) @@ -530,7 +530,7 @@ func TestStressingCascadingDeletion(t *testing.T) { t.Logf("number of remaining replication controllers and pods are as expected") // verify the remaining pods all have "orphan" in their names. - podClient := clientSet.Core().Pods(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) pods, err := podClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) @@ -561,8 +561,8 @@ func TestOrphaning(t *testing.T) { ns := createNamespaceOrDie("gc-orphaning", clientSet, t) defer deleteNamespaceOrDie(ns.Name, clientSet, t) - podClient := clientSet.Core().Pods(ns.Name) - rcClient := clientSet.Core().ReplicationControllers(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set toBeDeletedRC := newOwnerRC(toBeDeletedRCName, ns.Name) toBeDeletedRC, err := rcClient.Create(toBeDeletedRC) @@ -631,8 +631,8 @@ func TestSolidOwnerDoesNotBlockWaitingOwner(t *testing.T) { ns := createNamespaceOrDie("gc-foreground1", clientSet, t) defer deleteNamespaceOrDie(ns.Name, clientSet, t) - podClient := clientSet.Core().Pods(ns.Name) - rcClient := clientSet.Core().ReplicationControllers(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name)) if err != nil { @@ -691,8 +691,8 @@ func TestNonBlockingOwnerRefDoesNotBlock(t *testing.T) { ns := createNamespaceOrDie("gc-foreground2", clientSet, t) defer deleteNamespaceOrDie(ns.Name, clientSet, t) - podClient := clientSet.Core().Pods(ns.Name) - rcClient := clientSet.Core().ReplicationControllers(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name)) if err != nil { @@ -756,8 +756,8 @@ func TestBlockingOwnerRefDoesBlock(t *testing.T) { ns := createNamespaceOrDie("foo", clientSet, t) defer deleteNamespaceOrDie(ns.Name, clientSet, t) - podClient := clientSet.Core().Pods(ns.Name) - rcClient := clientSet.Core().ReplicationControllers(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) // create the RC with the orphan finalizer set toBeDeletedRC, err := rcClient.Create(newOwnerRC(toBeDeletedRCName, ns.Name)) if err != nil { @@ -879,7 +879,7 @@ func TestMixedRelationships(t *testing.T) { ns := createNamespaceOrDie("crd-mixed", clientSet, t) - configMapClient := clientSet.Core().ConfigMaps(ns.Name) + configMapClient := clientSet.CoreV1().ConfigMaps(ns.Name) definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, clientPool, ns.Name) @@ -977,7 +977,7 @@ func TestCRDDeletionCascading(t *testing.T) { ns := createNamespaceOrDie("crd-mixed", clientSet, t) - configMapClient := clientSet.Core().ConfigMaps(ns.Name) + configMapClient := clientSet.CoreV1().ConfigMaps(ns.Name) definition, resourceClient := createRandomCustomResourceDefinition(t, apiExtensionClient, clientPool, ns.Name) diff --git a/test/integration/replicaset/replicaset_test.go b/test/integration/replicaset/replicaset_test.go index fb28316a92f..7ad3489c7c9 100644 --- a/test/integration/replicaset/replicaset_test.go +++ b/test/integration/replicaset/replicaset_test.go @@ -115,7 +115,7 @@ func newMatchingPod(podName, namespace string) *v1.Pod { // communication with the API server fails. func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rsNum, podNum int) (bool, error) { rsClient := clientSet.Extensions().ReplicaSets(namespace) - podClient := clientSet.Core().Pods(namespace) + podClient := clientSet.CoreV1().Pods(namespace) pods, err := podClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) @@ -205,7 +205,7 @@ func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.R createdRSs = append(createdRSs, createdRS) } for _, pod := range pods { - createdPod, err := clientSet.Core().Pods(pod.Namespace).Create(pod) + createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(pod) if err != nil { t.Fatalf("Failed to create pod %s: %v", pod.Name, err) } @@ -297,7 +297,7 @@ func updateRS(t *testing.T, rsClient typedv1beta1.ReplicaSetInterface, rsName st // Verify ControllerRef of a RS pod that has incorrect attributes is automatically patched by the RS func testPodControllerRefPatch(t *testing.T, c clientset.Interface, pod *v1.Pod, ownerReference *metav1.OwnerReference, rs *v1beta1.ReplicaSet, expectedOwnerReferenceNum int) { ns := rs.Namespace - podClient := c.Core().Pods(ns) + podClient := c.CoreV1().Pods(ns) updatePod(t, podClient, pod.Name, func(pod *v1.Pod) { pod.OwnerReferences = []metav1.OwnerReference{*ownerReference} }) @@ -350,7 +350,7 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1 } pod.Status.Conditions = append(pod.Status.Conditions, *condition) } - _, err := clientSet.Core().Pods(pod.Namespace).UpdateStatus(pod) + _, err := clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) if err != nil { // When status fails to be updated, we continue to next pod continue @@ -460,7 +460,7 @@ func TestAdoption(t *testing.T) { defer framework.DeleteTestingNamespace(ns, s, t) rsClient := clientSet.Extensions().ReplicaSets(ns.Name) - podClient := clientSet.Core().Pods(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) const rsName = "rs" rs, err := rsClient.Create(newRS(rsName, ns.Name, 1)) if err != nil { @@ -590,7 +590,7 @@ func TestDeletingAndFailedPods(t *testing.T) { waitRSStable(t, c, rs) // Verify RS creates 2 pods - podClient := c.Core().Pods(ns.Name) + podClient := c.CoreV1().Pods(ns.Name) pods := getPods(t, podClient, labelMap()) if len(pods.Items) != 2 { t.Fatalf("len(pods) = %d, want 2", len(pods.Items)) @@ -602,7 +602,7 @@ func TestDeletingAndFailedPods(t *testing.T) { updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { pod.Finalizers = []string{"fake.example.com/blockDeletion"} }) - if err := c.Core().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil { + if err := c.CoreV1().Pods(ns.Name).Delete(deletingPod.Name, &metav1.DeleteOptions{}); err != nil { t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err) } @@ -658,7 +658,7 @@ func TestOverlappingRSs(t *testing.T) { } // Expect 3 total Pods to be created - podClient := c.Core().Pods(ns.Name) + podClient := c.CoreV1().Pods(ns.Name) pods := getPods(t, podClient, labelMap()) if len(pods.Items) != 3 { t.Errorf("len(pods) = %d, want 3", len(pods.Items)) @@ -690,7 +690,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { waitRSStable(t, c, rs) // Orphaning: RS should remove OwnerReference from a pod when the pod's labels change to not match its labels - podClient := c.Core().Pods(ns.Name) + podClient := c.CoreV1().Pods(ns.Name) pods := getPods(t, podClient, labelMap()) if len(pods.Items) != 1 { t.Fatalf("len(pods) = %d, want 1", len(pods.Items)) @@ -766,7 +766,7 @@ func TestGeneralPodAdoption(t *testing.T) { rs = rss[0] waitRSStable(t, c, rs) - podClient := c.Core().Pods(ns.Name) + podClient := c.CoreV1().Pods(ns.Name) pods := getPods(t, podClient, labelMap()) if len(pods.Items) != 1 { t.Fatalf("len(pods) = %d, want 1", len(pods.Items)) @@ -804,7 +804,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) { t.Fatalf("Unexpected .Status.AvailableReplicas: Expected 0, saw %d", rs.Status.AvailableReplicas) } - podClient := c.Core().Pods(ns.Name) + podClient := c.CoreV1().Pods(ns.Name) pods := getPods(t, podClient, labelMap()) if len(pods.Items) != 3 { t.Fatalf("len(pods) = %d, want 3", len(pods.Items)) @@ -878,7 +878,7 @@ func TestExtraPodsAdoptionAndDeletion(t *testing.T) { // Verify the extra pod is deleted eventually by determining whether number of // all pods within namespace matches .spec.replicas of the RS (2 in this case) - podClient := c.Core().Pods(ns.Name) + podClient := c.CoreV1().Pods(ns.Name) if err := wait.PollImmediate(interval, timeout, func() (bool, error) { // All pods have labelMap as their labels pods := getPods(t, podClient, labelMap()) @@ -909,7 +909,7 @@ func TestFullyLabeledReplicas(t *testing.T) { }) // Set one of the pods to have extra labels - podClient := c.Core().Pods(ns.Name) + podClient := c.CoreV1().Pods(ns.Name) pods := getPods(t, podClient, labelMap()) if len(pods.Items) != 2 { t.Fatalf("len(pods) = %d, want 2", len(pods.Items)) diff --git a/test/integration/replicationcontroller/replicationcontroller_test.go b/test/integration/replicationcontroller/replicationcontroller_test.go index 7773da152b6..aac6bb5fe69 100644 --- a/test/integration/replicationcontroller/replicationcontroller_test.go +++ b/test/integration/replicationcontroller/replicationcontroller_test.go @@ -99,8 +99,8 @@ func newMatchingPod(podName, namespace string) *v1.Pod { // controllers and pods are rcNum and podNum. It returns error if the // communication with the API server fails. func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespace string, rcNum, podNum int) (bool, error) { - rcClient := clientSet.Core().ReplicationControllers(namespace) - podClient := clientSet.Core().Pods(namespace) + rcClient := clientSet.CoreV1().ReplicationControllers(namespace) + podClient := clientSet.CoreV1().Pods(namespace) pods, err := podClient.List(metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("Failed to list pods: %v", err) @@ -212,8 +212,8 @@ func TestAdoption(t *testing.T) { ns := framework.CreateTestingNamespace(fmt.Sprintf("adoption-%d", i), s, t) defer framework.DeleteTestingNamespace(ns, s, t) - rcClient := clientSet.Core().ReplicationControllers(ns.Name) - podClient := clientSet.Core().Pods(ns.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) const rcName = "rc" rc, err := rcClient.Create(newRC(rcName, ns.Name, 1)) if err != nil { @@ -249,8 +249,8 @@ func TestAdoption(t *testing.T) { } func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.ReplicationController, pods []*v1.Pod, ns string) { - rcClient := clientSet.Core().ReplicationControllers(ns) - podClient := clientSet.Core().Pods(ns) + rcClient := clientSet.CoreV1().ReplicationControllers(ns) + podClient := clientSet.CoreV1().Pods(ns) for _, rc := range rcs { if _, err := rcClient.Create(rc); err != nil { t.Fatalf("Failed to create replication controller %s: %v", rc.Name, err) @@ -264,7 +264,7 @@ func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.Replic } func waitRCStable(t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController, ns string) { - rcClient := clientSet.Core().ReplicationControllers(ns) + rcClient := clientSet.CoreV1().ReplicationControllers(ns) if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { updatedRC, err := rcClient.Get(rc.Name, metav1.GetOptions{}) if err != nil { @@ -304,7 +304,7 @@ func TestUpdateSelectorToAdopt(t *testing.T) { // change the rc's selector to match both pods patch := `{"spec":{"selector":{"uniqueKey":null}}}` - rcClient := clientSet.Core().ReplicationControllers(ns.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) rc, err := rcClient.Patch(rc.Name, types.StrategicMergePatchType, []byte(patch)) if err != nil { t.Fatalf("Failed to patch replication controller: %v", err) @@ -342,7 +342,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) { // change the rc's selector to match both pods patch := `{"spec":{"selector":{"uniqueKey":"1"},"template":{"metadata":{"labels":{"uniqueKey":"1"}}}}}` - rcClient := clientSet.Core().ReplicationControllers(ns.Name) + rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) rc, err := rcClient.Patch(rc.Name, types.StrategicMergePatchType, []byte(patch)) if err != nil { t.Fatalf("Failed to patch replication controller: %v", err) @@ -354,7 +354,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) { }); err != nil { t.Fatal(err) } - podClient := clientSet.Core().Pods(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) pod2, err = podClient.Get(pod2.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get pod2: %v", err) @@ -385,7 +385,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) { // change the rc's selector to match both pods patch := `{"metadata":{"labels":{"name":null}}}` - podClient := clientSet.Core().Pods(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) pod2, err := podClient.Patch(pod2.Name, types.StrategicMergePatchType, []byte(patch)) if err != nil { t.Fatalf("Failed to patch pod2: %v", err) @@ -432,7 +432,7 @@ func TestUpdateLabelToBeAdopted(t *testing.T) { // change the rc's selector to match both pods patch := `{"metadata":{"labels":{"uniqueKey":"1"}}}` - podClient := clientSet.Core().Pods(ns.Name) + podClient := clientSet.CoreV1().Pods(ns.Name) pod2, err := podClient.Patch(pod2.Name, types.StrategicMergePatchType, []byte(patch)) if err != nil { t.Fatalf("Failed to patch pod2: %v", err) diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 08c0196f8cf..b24d952d6b0 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -568,11 +568,11 @@ func TestMultiScheduler(t *testing.T) { // - note: these two pods belong to default scheduler which no longer exists podWithNoAnnotation2 := createPod("pod-with-no-annotation2", nil) podWithAnnotationFitsDefault2 := createPod("pod-with-annotation-fits-default2", schedulerAnnotationFitsDefault) - testPodNoAnnotation2, err := clientSet.Core().Pods(ns.Name).Create(podWithNoAnnotation2) + testPodNoAnnotation2, err := clientSet.CoreV1().Pods(ns.Name).Create(podWithNoAnnotation2) if err != nil { t.Fatalf("Failed to create pod: %v", err) } - testPodWithAnnotationFitsDefault2, err := clientSet.Core().Pods(ns.Name).Create(podWithAnnotationFitsDefault2) + testPodWithAnnotationFitsDefault2, err := clientSet.CoreV1().Pods(ns.Name).Create(podWithAnnotationFitsDefault2) if err != nil { t.Fatalf("Failed to create pod: %v", err) } diff --git a/test/integration/scheduler_perf/scheduler_test.go b/test/integration/scheduler_perf/scheduler_test.go index 62e1f1bfd78..8da4b2212bf 100644 --- a/test/integration/scheduler_perf/scheduler_test.go +++ b/test/integration/scheduler_perf/scheduler_test.go @@ -220,11 +220,11 @@ func (na nodeAffinity) mutatePodTemplate(pod *v1.Pod) { // generateNodes generates nodes to be used for scheduling. func (inputConfig *schedulerPerfConfig) generateNodes(config *testConfig) { for i := 0; i < inputConfig.NodeCount; i++ { - config.schedulerSupportFunctions.GetClient().Core().Nodes().Create(config.mutatedNodeTemplate) + config.schedulerSupportFunctions.GetClient().CoreV1().Nodes().Create(config.mutatedNodeTemplate) } for i := 0; i < config.numNodes-inputConfig.NodeCount; i++ { - config.schedulerSupportFunctions.GetClient().Core().Nodes().Create(baseNodeTemplate) + config.schedulerSupportFunctions.GetClient().CoreV1().Nodes().Create(baseNodeTemplate) } } diff --git a/test/integration/secrets/secrets_test.go b/test/integration/secrets/secrets_test.go index d07505e0909..385d73eb16d 100644 --- a/test/integration/secrets/secrets_test.go +++ b/test/integration/secrets/secrets_test.go @@ -31,7 +31,7 @@ import ( ) func deleteSecretOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.Core().Secrets(ns).Delete(name, nil); err != nil { + if err := c.CoreV1().Secrets(ns).Delete(name, nil); err != nil { t.Errorf("unable to delete secret %v: %v", name, err) } } @@ -62,7 +62,7 @@ func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) { }, } - if _, err := client.Core().Secrets(s.Namespace).Create(&s); err != nil { + if _, err := client.CoreV1().Secrets(s.Namespace).Create(&s); err != nil { t.Errorf("unable to create test secret: %v", err) } defer deleteSecretOrErrorf(t, client, s.Namespace, s.Name) @@ -102,14 +102,14 @@ func DoTestSecrets(t *testing.T, client clientset.Interface, ns *v1.Namespace) { // Create a pod to consume secret. pod.ObjectMeta.Name = "uses-secret" - if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) // Create a pod that consumes non-existent secret. pod.ObjectMeta.Name = "uses-non-existent-secret" - if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil { + if _, err := client.CoreV1().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } defer integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name) diff --git a/test/integration/storageclasses/storage_classes_test.go b/test/integration/storageclasses/storage_classes_test.go index 055b89155df..09f01fdad52 100644 --- a/test/integration/storageclasses/storage_classes_test.go +++ b/test/integration/storageclasses/storage_classes_test.go @@ -79,7 +79,7 @@ func DoTestStorageClasses(t *testing.T, client clientset.Interface, ns *v1.Names } pvc.ObjectMeta.Name = "uses-storageclass" - if _, err := client.Core().PersistentVolumeClaims(ns.Name).Create(pvc); err != nil { + if _, err := client.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc); err != nil { t.Errorf("Failed to create pvc: %v", err) } defer deletePersistentVolumeClaimOrErrorf(t, client, ns.Name, pvc.Name) @@ -92,7 +92,7 @@ func deleteStorageClassOrErrorf(t *testing.T, c clientset.Interface, ns, name st } func deletePersistentVolumeClaimOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.Core().PersistentVolumeClaims(ns).Delete(name, nil); err != nil { + if err := c.CoreV1().PersistentVolumeClaims(ns).Delete(name, nil); err != nil { t.Errorf("unable to delete persistent volume claim %v: %v", name, err) } } diff --git a/test/integration/utils.go b/test/integration/utils.go index 629a58a2d87..c399ea33714 100644 --- a/test/integration/utils.go +++ b/test/integration/utils.go @@ -28,7 +28,7 @@ import ( ) func DeletePodOrErrorf(t *testing.T, c clientset.Interface, ns, name string) { - if err := c.Core().Pods(ns).Delete(name, nil); err != nil { + if err := c.CoreV1().Pods(ns).Delete(name, nil); err != nil { t.Errorf("unable to delete pod %v: %v", name, err) } } diff --git a/test/utils/density_utils.go b/test/utils/density_utils.go index c63b84e7672..c607e5c940e 100644 --- a/test/utils/density_utils.go +++ b/test/utils/density_utils.go @@ -42,7 +42,7 @@ func AddLabelsToNode(c clientset.Interface, nodeName string, labels map[string]s patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString) var err error for attempt := 0; attempt < retries; attempt++ { - _, err = c.Core().Nodes().Patch(nodeName, types.MergePatchType, []byte(patch)) + _, err = c.CoreV1().Nodes().Patch(nodeName, types.MergePatchType, []byte(patch)) if err != nil { if !apierrs.IsConflict(err) { return err @@ -61,7 +61,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri var node *v1.Node var err error for attempt := 0; attempt < retries; attempt++ { - node, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { return err } @@ -74,7 +74,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri } delete(node.Labels, labelKey) } - _, err = c.Core().Nodes().Update(node) + _, err = c.CoreV1().Nodes().Update(node) if err != nil { if !apierrs.IsConflict(err) { return err @@ -92,7 +92,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri // VerifyLabelsRemoved checks if Node for given nodeName does not have any of labels from labelKeys. // Return non-nil error if it does. func VerifyLabelsRemoved(c clientset.Interface, nodeName string, labelKeys []string) error { - node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/utils/deployment.go b/test/utils/deployment.go index 896a610fea4..6526df29599 100644 --- a/test/utils/deployment.go +++ b/test/utils/deployment.go @@ -51,7 +51,7 @@ func LogReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []* func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, logf LogfFn) { minReadySeconds := deployment.Spec.MinReadySeconds podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { - return c.Core().Pods(namespace).List(options) + return c.CoreV1().Pods(namespace).List(options) } podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc) diff --git a/test/utils/pod_store.go b/test/utils/pod_store.go index b7f4d37975b..5005a1f601f 100644 --- a/test/utils/pod_store.go +++ b/test/utils/pod_store.go @@ -39,13 +39,13 @@ func NewPodStore(c clientset.Interface, namespace string, label labels.Selector, ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.LabelSelector = label.String() options.FieldSelector = field.String() - obj, err := c.Core().Pods(namespace).List(options) + obj, err := c.CoreV1().Pods(namespace).List(options) return runtime.Object(obj), err }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.LabelSelector = label.String() options.FieldSelector = field.String() - return c.Core().Pods(namespace).Watch(options) + return c.CoreV1().Pods(namespace).Watch(options) }, } store := cache.NewStore(cache.MetaNamespaceKeyFunc) diff --git a/test/utils/runners.go b/test/utils/runners.go index e3dc589b1e9..564f960e952 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -61,7 +61,7 @@ func removePtr(replicas *int32) int32 { func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) { // Wait until it's scheduled - p, err := c.Core().Pods(namespace).Get(name, metav1.GetOptions{ResourceVersion: "0"}) + p, err := c.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{ResourceVersion: "0"}) if err == nil && p.Spec.NodeName != "" { return p, nil } @@ -69,7 +69,7 @@ func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, time startTime := time.Now() for startTime.Add(timeout).After(time.Now()) { time.Sleep(pollingPeriod) - p, err := c.Core().Pods(namespace).Get(name, metav1.GetOptions{ResourceVersion: "0"}) + p, err := c.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{ResourceVersion: "0"}) if err == nil && p.Spec.NodeName != "" { return p, nil } @@ -83,7 +83,7 @@ func RunPodAndGetNodeName(c clientset.Interface, pod *v1.Pod, timeout time.Durat var err error // Create a Pod for i := 0; i < retries; i++ { - _, err = c.Core().Pods(namespace).Create(pod) + _, err = c.CoreV1().Pods(namespace).Create(pod) if err == nil || apierrs.IsAlreadyExists(err) { break } @@ -553,7 +553,7 @@ func (config *RCConfig) create() error { config.applyTo(rc.Spec.Template) - _, err := config.Client.Core().ReplicationControllers(config.Namespace).Create(rc) + _, err := config.Client.CoreV1().ReplicationControllers(config.Namespace).Create(rc) if err != nil { return fmt.Errorf("Error creating replication controller: %v", err) } @@ -760,7 +760,7 @@ func (config *RCConfig) start() error { if oldRunning != config.Replicas { // List only pods from a given replication controller. options := metav1.ListOptions{LabelSelector: label.String()} - if pods, err := config.Client.Core().Pods(metav1.NamespaceAll).List(options); err == nil { + if pods, err := config.Client.CoreV1().Pods(metav1.NamespaceAll).List(options); err == nil { for _, pod := range pods.Items { config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp) @@ -789,7 +789,7 @@ func StartPods(c clientset.Interface, replicas int, namespace string, podNamePre pod.ObjectMeta.Labels["name"] = podName pod.ObjectMeta.Labels["startPodsID"] = startPodsID pod.Spec.Containers[0].Name = podName - _, err := c.Core().Pods(namespace).Create(&pod) + _, err := c.CoreV1().Pods(namespace).Create(&pod) if err != nil { return err } @@ -890,7 +890,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo return nil } for attempt := 0; attempt < retries; attempt++ { - if _, err = client.Core().Nodes().Patch(node.Name, types.MergePatchType, []byte(patch)); err == nil { + if _, err = client.CoreV1().Nodes().Patch(node.Name, types.MergePatchType, []byte(patch)); err == nil { return nil } if !apierrs.IsConflict(err) { @@ -903,7 +903,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error { for attempt := 0; attempt < retries; attempt++ { - node, err := client.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("Skipping cleanup of Node: failed to get Node %v: %v", nodeName, err) } @@ -911,7 +911,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare if apiequality.Semantic.DeepEqual(node, updatedNode) { return nil } - if _, err = client.Core().Nodes().Update(updatedNode); err == nil { + if _, err = client.CoreV1().Nodes().Update(updatedNode); err == nil { return nil } if !apierrs.IsConflict(err) { @@ -988,7 +988,7 @@ func MakePodSpec() v1.PodSpec { func makeCreatePod(client clientset.Interface, namespace string, podTemplate *v1.Pod) error { var err error for attempt := 0; attempt < retries; attempt++ { - if _, err := client.Core().Pods(namespace).Create(podTemplate); err == nil { + if _, err := client.CoreV1().Pods(namespace).Create(podTemplate); err == nil { return nil } glog.Errorf("Error while creating pod, maybe retry: %v", err) @@ -1033,7 +1033,7 @@ func createController(client clientset.Interface, controllerName, namespace stri } var err error for attempt := 0; attempt < retries; attempt++ { - if _, err := client.Core().ReplicationControllers(namespace).Create(rc); err == nil { + if _, err := client.CoreV1().ReplicationControllers(namespace).Create(rc); err == nil { return nil } glog.Errorf("Error while creating rc, maybe retry: %v", err) @@ -1093,7 +1093,7 @@ func (config *SecretConfig) Run() error { secret.StringData[k] = v } - _, err := config.Client.Core().Secrets(config.Namespace).Create(secret) + _, err := config.Client.CoreV1().Secrets(config.Namespace).Create(secret) if err != nil { return fmt.Errorf("Error creating secret: %v", err) } @@ -1102,7 +1102,7 @@ func (config *SecretConfig) Run() error { } func (config *SecretConfig) Stop() error { - if err := config.Client.Core().Secrets(config.Namespace).Delete(config.Name, &metav1.DeleteOptions{}); err != nil { + if err := config.Client.CoreV1().Secrets(config.Namespace).Delete(config.Name, &metav1.DeleteOptions{}); err != nil { return fmt.Errorf("Error deleting secret: %v", err) } config.LogFunc("Deleted secret %v/%v", config.Namespace, config.Name) @@ -1152,7 +1152,7 @@ func (config *ConfigMapConfig) Run() error { configMap.Data[k] = v } - _, err := config.Client.Core().ConfigMaps(config.Namespace).Create(configMap) + _, err := config.Client.CoreV1().ConfigMaps(config.Namespace).Create(configMap) if err != nil { return fmt.Errorf("Error creating configmap: %v", err) } @@ -1161,7 +1161,7 @@ func (config *ConfigMapConfig) Run() error { } func (config *ConfigMapConfig) Stop() error { - if err := config.Client.Core().ConfigMaps(config.Namespace).Delete(config.Name, &metav1.DeleteOptions{}); err != nil { + if err := config.Client.CoreV1().ConfigMaps(config.Namespace).Delete(config.Name, &metav1.DeleteOptions{}); err != nil { return fmt.Errorf("Error deleting configmap: %v", err) } config.LogFunc("Deleted configmap %v/%v", config.Namespace, config.Name) @@ -1240,7 +1240,7 @@ func (config *DaemonConfig) Run() error { var nodes *v1.NodeList for i := 0; i < retries; i++ { // Wait for all daemons to be running - nodes, err = config.Client.Core().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) + nodes, err = config.Client.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) if err == nil { break } else if i+1 == retries {