use core client with explicit version globally

This commit is contained in:
Kevin 2017-10-25 23:54:32 +08:00
parent d945927077
commit 4c8539cece
190 changed files with 921 additions and 921 deletions

View File

@ -87,11 +87,11 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me
replicaCalc := podautoscaler.NewReplicaCalculator(
metricsClient,
hpaClient.Core(),
hpaClient.CoreV1(),
ctx.Options.HorizontalPodAutoscalerTolerance,
)
go podautoscaler.NewHorizontalController(
hpaClientGoClient.Core(),
hpaClientGoClient.CoreV1(),
scaleClient,
hpaClient.Autoscaling(),
restMapper,

View File

@ -250,7 +250,7 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) {
api.Kind("ConfigMap"),
}
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
QuotaClient: resourceQuotaControllerClient.Core(),
QuotaClient: resourceQuotaControllerClient.CoreV1(),
ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(),
ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.Options.ResourceQuotaSyncPeriod.Duration),
Registry: resourceQuotaRegistry,
@ -258,8 +258,8 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) {
ReplenishmentResyncPeriod: ResyncPeriod(&ctx.Options),
GroupKindsToReplenish: groupKindsToReplenish,
}
if resourceQuotaControllerClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", resourceQuotaControllerClient.Core().RESTClient().GetRateLimiter())
if resourceQuotaControllerClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", resourceQuotaControllerClient.CoreV1().RESTClient().GetRateLimiter())
}
go resourcequotacontroller.NewResourceQuotaController(

View File

@ -540,7 +540,7 @@ func (gce *GCECloud) Initialize(clientBuilder controller.ControllerClientBuilder
if gce.OnXPN() {
gce.eventBroadcaster = record.NewBroadcaster()
gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(gce.client.Core().RESTClient()).Events("")})
gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(gce.client.CoreV1().RESTClient()).Events("")})
gce.eventRecorder = gce.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "gce-cloudprovider"})
}

View File

@ -101,7 +101,7 @@ func (gce *GCECloud) watchClusterID() {
},
}
listerWatcher := cache.NewListWatchFromClient(gce.ClusterID.client.Core().RESTClient(), "configmaps", UIDNamespace, fields.Everything())
listerWatcher := cache.NewListWatchFromClient(gce.ClusterID.client.CoreV1().RESTClient(), "configmaps", UIDNamespace, fields.Everything())
var controller cache.Controller
gce.ClusterID.store, controller = cache.NewInformer(newSingleObjectListerWatcher(listerWatcher, UIDConfigMapName), &v1.ConfigMap{}, updateFuncFrequency, mapEventHandler)
@ -189,7 +189,7 @@ func (ci *ClusterID) getOrInitialize() error {
UIDProvider: newId,
}
if _, err := ci.client.Core().ConfigMaps(UIDNamespace).Create(cfg); err != nil {
if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(cfg); err != nil {
glog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err)
return err
}

View File

@ -99,19 +99,19 @@ func NewBootstrapSigner(cl clientset.Interface, options BootstrapSignerOptions)
secretNamespace: options.TokenSecretNamespace,
syncQueue: workqueue.NewNamed("bootstrap_signer_queue"),
}
if cl.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("bootstrap_signer", cl.Core().RESTClient().GetRateLimiter())
if cl.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("bootstrap_signer", cl.CoreV1().RESTClient().GetRateLimiter())
}
configMapSelector := fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ConfigMapName})
e.configMaps, e.configMapsController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {
lo.FieldSelector = configMapSelector.String()
return e.client.Core().ConfigMaps(options.ConfigMapNamespace).List(lo)
return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).List(lo)
},
WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) {
lo.FieldSelector = configMapSelector.String()
return e.client.Core().ConfigMaps(options.ConfigMapNamespace).Watch(lo)
return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).Watch(lo)
},
},
&v1.ConfigMap{},
@ -127,11 +127,11 @@ func NewBootstrapSigner(cl clientset.Interface, options BootstrapSignerOptions)
&cache.ListWatch{
ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {
lo.FieldSelector = secretSelector.String()
return e.client.Core().Secrets(e.secretNamespace).List(lo)
return e.client.CoreV1().Secrets(e.secretNamespace).List(lo)
},
WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) {
lo.FieldSelector = secretSelector.String()
return e.client.Core().Secrets(e.secretNamespace).Watch(lo)
return e.client.CoreV1().Secrets(e.secretNamespace).Watch(lo)
},
},
&v1.Secret{},
@ -227,7 +227,7 @@ func (e *BootstrapSigner) signConfigMap() {
}
func (e *BootstrapSigner) updateConfigMap(cm *v1.ConfigMap) {
_, err := e.client.Core().ConfigMaps(cm.Namespace).Update(cm)
_, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(cm)
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
glog.V(3).Infof("Error updating ConfigMap: %v", err)
}

View File

@ -71,8 +71,8 @@ func NewTokenCleaner(cl clientset.Interface, options TokenCleanerOptions) *Token
client: cl,
tokenSecretNamespace: options.TokenSecretNamespace,
}
if cl.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("token_cleaner", cl.Core().RESTClient().GetRateLimiter())
if cl.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("token_cleaner", cl.CoreV1().RESTClient().GetRateLimiter())
}
secretSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(bootstrapapi.SecretTypeBootstrapToken)})
@ -80,11 +80,11 @@ func NewTokenCleaner(cl clientset.Interface, options TokenCleanerOptions) *Token
&cache.ListWatch{
ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {
lo.FieldSelector = secretSelector.String()
return e.client.Core().Secrets(e.tokenSecretNamespace).List(lo)
return e.client.CoreV1().Secrets(e.tokenSecretNamespace).List(lo)
},
WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) {
lo.FieldSelector = secretSelector.String()
return e.client.Core().Secrets(e.tokenSecretNamespace).Watch(lo)
return e.client.CoreV1().Secrets(e.tokenSecretNamespace).Watch(lo)
},
},
&v1.Secret{},
@ -118,7 +118,7 @@ func (tc *TokenCleaner) evalSecret(o interface{}) {
if len(secret.UID) > 0 {
options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}}
}
err := tc.client.Core().Secrets(secret.Namespace).Delete(secret.Name, options)
err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(secret.Name, options)
// NotFound isn't a real error (it's already been deleted)
// Conflict isn't a real error (the UID precondition failed)
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {

View File

@ -57,7 +57,7 @@ func NewCertificateController(
// Send events to the apiserver
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
cc := &CertificateController{
kubeClient: kubeClient,

View File

@ -235,7 +235,7 @@ func (pvlc *PersistentVolumeLabelController) updateVolume(vol *v1.PersistentVolu
return err
}
_, err = pvlc.kubeClient.Core().PersistentVolumes().Patch(string(volName), types.StrategicMergePatchType, patchBytes)
_, err = pvlc.kubeClient.CoreV1().PersistentVolumes().Patch(string(volName), types.StrategicMergePatchType, patchBytes)
if err != nil {
return fmt.Errorf("failed to update PersistentVolume %s: %v", volName, err)
}

View File

@ -546,7 +546,7 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v
}
func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
_, err := r.KubeClient.Core().Pods(namespace).Patch(name, types.StrategicMergePatchType, data)
_, err := r.KubeClient.CoreV1().Pods(namespace).Patch(name, types.StrategicMergePatchType, data)
return err
}
@ -589,7 +589,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT
if labels.Set(pod.Labels).AsSelectorPreValidated().Empty() {
return fmt.Errorf("unable to create pods, no labels")
}
if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil {
if newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(pod); err != nil {
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err)
return err
} else {
@ -610,7 +610,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
return fmt.Errorf("object does not have ObjectMeta, %v", err)
}
glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID)
if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil {
if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil {
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
return fmt.Errorf("unable to delete pods: %v", err)
} else {
@ -925,10 +925,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
}
if err != nil {
return err
@ -982,10 +982,10 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
}
if err != nil {
return err
@ -1030,7 +1030,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
}
_, err = c.Core().Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes)
_, err = c.CoreV1().Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes)
return err
}

View File

@ -70,10 +70,10 @@ func NewCronJobController(kubeClient clientset.Interface) *CronJobController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.Core().RESTClient().GetRateLimiter())
metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
jm := &CronJobController{

View File

@ -134,10 +134,10 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().RESTClient().GetRateLimiter())
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
dsc := &DaemonSetsController{
kubeClient: kubeClient,

View File

@ -228,7 +228,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur
toUpdate.Labels = make(map[string]string)
}
toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
_, err = dsc.kubeClient.Core().Pods(ds.Namespace).Update(toUpdate)
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate)
if err != nil {
return nil, err
}

View File

@ -101,10 +101,10 @@ func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, r
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")})
if client != nil && client.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().RESTClient().GetRateLimiter())
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.CoreV1().RESTClient().GetRateLimiter())
}
dc := &DeploymentController{
client: client,

View File

@ -721,7 +721,7 @@ func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister cor
}
// Only label the pod that doesn't already have the new hash
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
_, err := UpdatePodWithRetries(c.Core().Pods(namespace), podLister, pod.Namespace, pod.Name,
_, err := UpdatePodWithRetries(c.CoreV1().Pods(namespace), podLister, pod.Namespace, pod.Name,
func(podToUpdate *v1.Pod) error {
// Precondition: the pod doesn't contain the new hash in its label.
if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {

View File

@ -294,7 +294,7 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
if dc.kubeClient != nil {
glog.Infof("Sending events to api server.")
dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(dc.kubeClient.Core().RESTClient()).Events("")})
dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(dc.kubeClient.CoreV1().RESTClient()).Events("")})
} else {
glog.Infof("No api server defined - no events will be sent to API server.")
}

View File

@ -75,8 +75,8 @@ var (
// NewEndpointController returns a new *EndpointController.
func NewEndpointController(podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer,
endpointsInformer coreinformers.EndpointsInformer, client clientset.Interface) *EndpointController {
if client != nil && client.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().RESTClient().GetRateLimiter())
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.CoreV1().RESTClient().GetRateLimiter())
}
e := &EndpointController{
client: client,
@ -395,7 +395,7 @@ func (e *EndpointController) syncService(key string) error {
// service is deleted. However, if we're down at the time when
// the service is deleted, we will miss that deletion, so this
// doesn't completely solve the problem. See #6877.
err = e.client.Core().Endpoints(namespace).Delete(name, nil)
err = e.client.CoreV1().Endpoints(namespace).Delete(name, nil)
if err != nil && !errors.IsNotFound(err) {
return err
}
@ -508,10 +508,10 @@ func (e *EndpointController) syncService(key string) error {
glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
if createEndpoints {
// No previous endpoints, create them
_, err = e.client.Core().Endpoints(service.Namespace).Create(newEndpoints)
_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(newEndpoints)
} else {
// Pre-existing
_, err = e.client.Core().Endpoints(service.Namespace).Update(newEndpoints)
_, err = e.client.CoreV1().Endpoints(service.Namespace).Update(newEndpoints)
}
if err != nil {
if createEndpoints && errors.IsForbidden(err) {

View File

@ -72,11 +72,11 @@ func NewNamespaceController(
// create the controller so we can inject the enqueue function
namespaceController := &NamespaceController{
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(kubeClient.Core().Namespaces(), clientPool, kubeClient.Core(), discoverResourcesFn, finalizerToken, true),
namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(kubeClient.CoreV1().Namespaces(), clientPool, kubeClient.CoreV1(), discoverResourcesFn, finalizerToken, true),
}
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.Core().RESTClient().GetRateLimiter())
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
// configure the namespace informer event handlers

View File

@ -52,7 +52,7 @@ func newAdapter(k8s clientset.Interface, cloud *gce.GCECloud) *adapter {
ret.recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudCIDRAllocator"})
glog.V(0).Infof("Sending events to api server.")
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
Interface: v1core.New(k8s.Core().RESTClient()).Events(""),
Interface: v1core.New(k8s.CoreV1().RESTClient()).Events(""),
})
return ret
@ -86,7 +86,7 @@ func (a *adapter) AddAlias(ctx context.Context, nodeName string, cidrRange *net.
}
func (a *adapter) Node(ctx context.Context, name string) (*v1.Node, error) {
return a.k8s.Core().Nodes().Get(name, metav1.GetOptions{})
return a.k8s.CoreV1().Nodes().Get(name, metav1.GetOptions{})
}
func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error {
@ -101,7 +101,7 @@ func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRang
return err
}
_, err = a.k8s.Core().Nodes().Patch(node.Name, types.StrategicMergePatchType, bytes)
_, err = a.k8s.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, bytes)
return err
}

View File

@ -107,7 +107,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
// controller manager to restart.
if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) {
var err error
nodeList, err = kubeClient.Core().Nodes().List(metav1.ListOptions{
nodeList, err = kubeClient.CoreV1().Nodes().List(metav1.ListOptions{
FieldSelector: fields.Everything().String(),
LabelSelector: labels.Everything().String(),
})

View File

@ -71,7 +71,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof)
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")})
gceCloud, ok := cloud.(*gce.GCECloud)
if !ok {
@ -170,7 +170,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
podCIDR := data.cidr.String()
for rep := 0; rep < cidrUpdateRetries; rep++ {
// TODO: change it to using PATCH instead of full Node updates.
node, err = ca.client.Core().Nodes().Get(data.nodeName, metav1.GetOptions{})
node, err = ca.client.CoreV1().Nodes().Get(data.nodeName, metav1.GetOptions{})
if err != nil {
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err)
continue
@ -189,7 +189,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
// See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248
}
node.Spec.PodCIDR = podCIDR
if _, err = ca.client.Core().Nodes().Update(node); err == nil {
if _, err = ca.client.CoreV1().Nodes().Update(node); err == nil {
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
break
}

View File

@ -68,7 +68,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, s
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof)
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")})
ra := &rangeAllocator{
client: client,
@ -228,7 +228,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
podCIDR := data.cidr.String()
for rep := 0; rep < cidrUpdateRetries; rep++ {
// TODO: change it to using PATCH instead of full Node updates.
node, err = r.client.Core().Nodes().Get(data.nodeName, metav1.GetOptions{})
node, err = r.client.CoreV1().Nodes().Get(data.nodeName, metav1.GetOptions{})
if err != nil {
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err)
continue
@ -245,7 +245,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
return nil
}
node.Spec.PodCIDR = podCIDR
if _, err = r.client.Core().Nodes().Update(node); err == nil {
if _, err = r.client.CoreV1().Nodes().Update(node); err == nil {
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
break
}

View File

@ -246,11 +246,11 @@ func NewNodeController(
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(
&v1core.EventSinkImpl{
Interface: v1core.New(kubeClient.Core().RESTClient()).Events(""),
Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""),
})
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.Core().RESTClient().GetRateLimiter())
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
if allocateNodeCIDRs {
@ -648,7 +648,7 @@ func (nc *Controller) monitorNodeStatus() error {
return true, nil
}
name := node.Name
node, err = nc.kubeClient.Core().Nodes().Get(name, metav1.GetOptions{})
node, err = nc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name)
return false, err
@ -1055,7 +1055,7 @@ func (nc *Controller) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.Node
_, currentCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady)
if !apiequality.Semantic.DeepEqual(currentCondition, &observedReadyCondition) {
if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil {
if _, err = nc.kubeClient.CoreV1().Nodes().UpdateStatus(node); err != nil {
glog.Errorf("Error updating node %s: %v", node.Name, err)
return gracePeriod, observedReadyCondition, currentReadyCondition, err
}

View File

@ -88,7 +88,7 @@ func deletePodHandler(c clientset.Interface, emitEventFunc func(types.Namespaced
}
var err error
for i := 0; i < retries; i++ {
err = c.Core().Pods(ns).Delete(name, &metav1.DeleteOptions{})
err = c.CoreV1().Pods(ns).Delete(name, &metav1.DeleteOptions{})
if err == nil {
break
}
@ -110,12 +110,12 @@ func getNoExecuteTaints(taints []v1.Taint) []v1.Taint {
func getPodsAssignedToNode(c clientset.Interface, nodeName string) ([]v1.Pod, error) {
selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName})
pods, err := c.Core().Pods(v1.NamespaceAll).List(metav1.ListOptions{
pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: selector.String(),
LabelSelector: labels.Everything().String(),
})
for i := 0; i < retries && err != nil; i++ {
pods, err = c.Core().Pods(v1.NamespaceAll).List(metav1.ListOptions{
pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: selector.String(),
LabelSelector: labels.Everything().String(),
})
@ -156,7 +156,7 @@ func NewNoExecuteTaintManager(c clientset.Interface) *NoExecuteTaintManager {
eventBroadcaster.StartLogging(glog.Infof)
if c != nil {
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(c.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(c.CoreV1().RESTClient()).Events("")})
} else {
glog.Fatalf("kubeClient is nil when starting NodeController")
}

View File

@ -55,7 +55,7 @@ func DeletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
remaining := false
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
options := metav1.ListOptions{FieldSelector: selector}
pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(options)
pods, err := kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(options)
var updateErrList []error
if err != nil {
@ -93,7 +93,7 @@ func DeletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
glog.V(2).Infof("Starting deletion of pod %v/%v", pod.Namespace, pod.Name)
recorder.Eventf(&pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
if err := kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
return false, err
}
remaining = true
@ -118,7 +118,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
var updatedPod *v1.Pod
var err error
if updatedPod, err = kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod); err != nil {
if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod); err != nil {
return nil, err
}
return updatedPod, nil
@ -127,7 +127,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
// ForcefullyDeleteNode deletes the node immediately. The pods on the
// node are cleaned up by the podGC.
func ForcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error {
if err := kubeClient.Core().Nodes().Delete(nodeName, nil); err != nil {
if err := kubeClient.CoreV1().Nodes().Delete(nodeName, nil); err != nil {
return fmt.Errorf("unable to delete node %q: %v", nodeName, err)
}
return nil
@ -139,7 +139,7 @@ func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
nodeName := node.Name
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(opts)
pods, err := kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(opts)
if err != nil {
return err
}
@ -155,7 +155,7 @@ func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
if cond.Type == v1.PodReady {
pod.Status.Conditions[i].Status = v1.ConditionFalse
glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
_, err := kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod)
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(&pod)
if err != nil {
glog.Warningf("Failed to update status for pod %q: %v", format.Pod(&pod), err)
errMsg = append(errMsg, fmt.Sprintf("%v", err))

View File

@ -54,8 +54,8 @@ type HeapsterMetricsClient struct {
func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, service, port string) MetricsClient {
return &HeapsterMetricsClient{
services: client.Core().Services(namespace),
podsGetter: client.Core(),
services: client.CoreV1().Services(namespace),
podsGetter: client.CoreV1(),
heapsterScheme: scheme,
heapsterService: service,
heapsterPort: port,

View File

@ -52,15 +52,15 @@ type PodGCController struct {
}
func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInformer, terminatedPodThreshold int) *PodGCController {
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().RESTClient().GetRateLimiter())
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
gcc := &PodGCController{
kubeClient: kubeClient,
terminatedPodThreshold: terminatedPodThreshold,
deletePod: func(namespace, name string) error {
glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name)
return kubeClient.Core().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0))
return kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0))
},
}
@ -143,7 +143,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) {
glog.V(4).Infof("GC'ing orphaned")
// We want to get list of Nodes from the etcd, to make sure that it's as fresh as possible.
nodes, err := gcc.kubeClient.Core().Nodes().List(metav1.ListOptions{})
nodes, err := gcc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return
}

View File

@ -95,12 +95,12 @@ type ReplicaSetController struct {
// NewReplicaSetController configures a replica set controller with the specified event recorder
func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().RESTClient().GetRateLimiter())
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
rsc := &ReplicaSetController{
kubeClient: kubeClient,

View File

@ -90,13 +90,13 @@ type ReplicationManager struct {
// NewReplicationManager configures a replication manager with the specified event recorder
func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager {
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().RESTClient().GetRateLimiter())
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
rm := &ReplicationManager{
kubeClient: kubeClient,
@ -651,7 +651,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
newStatus := calculateStatus(rc, filteredPods, manageReplicasErr)
// Always updates status as pods come up or die.
updatedRC, err := updateReplicationControllerStatus(rm.kubeClient.Core().ReplicationControllers(rc.Namespace), *rc, newStatus)
updatedRC, err := updateReplicationControllerStatus(rm.kubeClient.CoreV1().ReplicationControllers(rc.Namespace), *rc, newStatus)
if err != nil {
// Multiple things could lead to this update failing. Returning an error causes a requeue without forcing a hotloop
return err

View File

@ -103,7 +103,7 @@ func (rc *RouteController) Run(stopCh <-chan struct{}, syncPeriod time.Duration)
}
if rc.broadcaster != nil {
rc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(rc.kubeClient.Core().RESTClient()).Events("")})
rc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(rc.kubeClient.CoreV1().RESTClient()).Events("")})
}
// TODO: If we do just the full Resync every 5 minutes (default value)

View File

@ -113,11 +113,11 @@ func New(
) (*ServiceController, error) {
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(glog.Infof)
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "service-controller"})
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().RESTClient().GetRateLimiter())
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
s := &ServiceController{
@ -327,7 +327,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S
func (s *ServiceController) persistUpdate(service *v1.Service) error {
var err error
for i := 0; i < clientRetryCount; i++ {
_, err = s.kubeClient.Core().Services(service.Namespace).UpdateStatus(service)
_, err = s.kubeClient.CoreV1().Services(service.Namespace).UpdateStatus(service)
if err == nil {
return nil
}

View File

@ -67,8 +67,8 @@ func NewServiceAccountsController(saInformer coreinformers.ServiceAccountInforme
serviceAccountsToEnsure: options.ServiceAccounts,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"),
}
if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().RESTClient().GetRateLimiter())
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.CoreV1().RESTClient().GetRateLimiter())
}
saInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -210,7 +210,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
// TODO eliminate this once the fake client can handle creation without NS
sa.Namespace = ns.Name
if _, err := c.client.Core().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) {
if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) {
createFailures = append(createFailures, err)
}
}

View File

@ -44,10 +44,10 @@ func NewGetterFromClient(c clientset.Interface) serviceaccount.ServiceAccountTok
return clientGetter{c}
}
func (c clientGetter) GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error) {
return c.client.Core().ServiceAccounts(namespace).Get(name, metav1.GetOptions{})
return c.client.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{})
}
func (c clientGetter) GetSecret(namespace, name string) (*v1.Secret, error) {
return c.client.Core().Secrets(namespace).Get(name, metav1.GetOptions{})
return c.client.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
}
// registryGetter implements ServiceAccountTokenGetter using a service account and secret registry

View File

@ -86,8 +86,8 @@ func NewTokensController(serviceAccounts informers.ServiceAccountInformer, secre
maxRetries: maxRetries,
}
if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_tokens_controller", cl.Core().RESTClient().GetRateLimiter())
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_tokens_controller", cl.CoreV1().RESTClient().GetRateLimiter())
}
e.serviceAccounts = serviceAccounts.Lister()
@ -344,7 +344,7 @@ func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry
if len(uid) > 0 {
opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}
}
err := e.client.Core().Secrets(ns).Delete(name, opts)
err := e.client.CoreV1().Secrets(ns).Delete(name, opts)
// NotFound doesn't need a retry (it's already been deleted)
// Conflict doesn't need a retry (the UID precondition failed)
if err == nil || apierrors.IsNotFound(err) || apierrors.IsConflict(err) {
@ -366,7 +366,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
// We don't want to update the cache's copy of the service account
// so add the secret to a freshly retrieved copy of the service account
serviceAccounts := e.client.Core().ServiceAccounts(serviceAccount.Namespace)
serviceAccounts := e.client.CoreV1().ServiceAccounts(serviceAccount.Namespace)
liveServiceAccount, err := serviceAccounts.Get(serviceAccount.Name, metav1.GetOptions{})
if err != nil {
// Retry if we cannot fetch the live service account (for a NotFound error, either the live lookup or our cache are stale)
@ -405,7 +405,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
}
// Save the secret
createdToken, err := e.client.Core().Secrets(serviceAccount.Namespace).Create(secret)
createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(secret)
if err != nil {
// retriable error
return true, err
@ -455,7 +455,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
// we weren't able to use the token, try to clean it up.
glog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}}
if deleteErr := e.client.Core().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil {
if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil {
glog.Error(deleteErr) // if we fail, just log it
}
}
@ -513,7 +513,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou
// We don't want to update the cache's copy of the secret
// so add the token to a freshly retrieved copy of the secret
secrets := e.client.Core().Secrets(cachedSecret.Namespace)
secrets := e.client.CoreV1().Secrets(cachedSecret.Namespace)
liveSecret, err := secrets.Get(cachedSecret.Name, metav1.GetOptions{})
if err != nil {
// Retry for any error other than a NotFound
@ -577,7 +577,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou
func (e *TokensController) removeSecretReference(saNamespace string, saName string, saUID types.UID, secretName string) error {
// We don't want to update the cache's copy of the service account
// so remove the secret from a freshly retrieved copy of the service account
serviceAccounts := e.client.Core().ServiceAccounts(saNamespace)
serviceAccounts := e.client.CoreV1().ServiceAccounts(saNamespace)
serviceAccount, err := serviceAccounts.Get(saName, metav1.GetOptions{})
// Ignore NotFound errors when attempting to remove a reference
if apierrors.IsNotFound(err) {
@ -631,7 +631,7 @@ func (e *TokensController) getServiceAccount(ns string, name string, uid types.U
}
// Live lookup
sa, err = e.client.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
sa, err = e.client.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return nil, nil
}
@ -667,7 +667,7 @@ func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetc
}
// Live lookup
secret, err := e.client.Core().Secrets(ns).Get(name, metav1.GetOptions{})
secret, err := e.client.CoreV1().Secrets(ns).Get(name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return nil, nil
}

View File

@ -77,7 +77,7 @@ func (spc *realStatefulPodControl) CreateStatefulPod(set *apps.StatefulSet, pod
return err
}
// If we created the PVCs attempt to create the Pod
_, err := spc.client.Core().Pods(set.Namespace).Create(pod)
_, err := spc.client.CoreV1().Pods(set.Namespace).Create(pod)
// sink already exists errors
if apierrors.IsAlreadyExists(err) {
return err
@ -113,7 +113,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod
attemptedUpdate = true
// commit the update, retrying on conflicts
_, updateErr := spc.client.Core().Pods(set.Namespace).Update(pod)
_, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(pod)
if updateErr == nil {
return nil
}
@ -134,7 +134,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod
}
func (spc *realStatefulPodControl) DeleteStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error {
err := spc.client.Core().Pods(set.Namespace).Delete(pod.Name, nil)
err := spc.client.CoreV1().Pods(set.Namespace).Delete(pod.Name, nil)
spc.recordPodEvent("delete", set, pod, err)
return err
}
@ -182,7 +182,7 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.Statef
_, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
switch {
case apierrors.IsNotFound(err):
_, err := spc.client.Core().PersistentVolumeClaims(claim.Namespace).Create(&claim)
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(&claim)
if err != nil {
errs = append(errs, fmt.Errorf("Failed to create PVC %s: %s", claim.Name, err))
}

View File

@ -85,7 +85,7 @@ func NewStatefulSetController(
) *StatefulSetController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset-controller"})
ssc := &StatefulSetController{

View File

@ -263,7 +263,7 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey
if err != nil {
return err
}
_, err = ttlc.kubeClient.Core().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes)
_, err = ttlc.kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes)
if err != nil {
glog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err)
return err

View File

@ -134,7 +134,7 @@ func NewAttachDetachController(
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"})
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)

View File

@ -239,7 +239,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
for _, newPod := range extraPods1 {
// Add a new pod between ASW and DSW ppoulators
_, err = adc.kubeClient.Core().Pods(newPod.ObjectMeta.Namespace).Create(newPod)
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(newPod)
if err != nil {
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
}
@ -256,7 +256,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
for _, newPod := range extraPods2 {
// Add a new pod between DSW ppoulator and reconciler run
_, err = adc.kubeClient.Core().Pods(newPod.ObjectMeta.Namespace).Create(newPod)
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(newPod)
if err != nil {
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
}

View File

@ -129,10 +129,10 @@ func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj
err)
}
_, err = nsu.kubeClient.Core().Nodes().PatchStatus(string(nodeName), patchBytes)
_, err = nsu.kubeClient.CoreV1().Nodes().PatchStatus(string(nodeName), patchBytes)
if err != nil {
return fmt.Errorf(
"failed to kubeClient.Core().Nodes().Patch for node %q. %v",
"failed to kubeClient.CoreV1().Nodes().Patch for node %q. %v",
nodeName,
err)
}

View File

@ -641,7 +641,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo
return claim, nil
}
newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone)
if err != nil {
glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err)
return newClaim, err
@ -697,7 +697,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV
volumeClone.Status.Phase = phase
volumeClone.Status.Message = message
newVol, err := ctrl.kubeClient.Core().PersistentVolumes().UpdateStatus(volumeClone)
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(volumeClone)
if err != nil {
glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err)
return newVol, err
@ -775,7 +775,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV
// Save the volume only if something was changed
if dirty {
glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volume.Name)
newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone)
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone)
if err != nil {
glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volume.Name, claimToClaimKey(claim), err)
return newVol, err
@ -829,7 +829,7 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo
if dirty {
glog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim))
newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
if err != nil {
glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err)
return newClaim, err
@ -916,7 +916,7 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume
volumeClone.Spec.ClaimRef.UID = ""
}
newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone)
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone)
if err != nil {
glog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err)
return err
@ -977,7 +977,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
// This method may have been waiting for a volume lock for some time.
// Previous recycleVolumeOperation might just have saved an updated version,
// so read current volume state now.
newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
if err != nil {
glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err)
return
@ -1056,7 +1056,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) e
// This method may have been waiting for a volume lock for some time.
// Previous deleteVolumeOperation might just have saved an updated version, so
// read current volume state now.
newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
if err != nil {
glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err)
return nil
@ -1100,7 +1100,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) e
glog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name)
// Delete the volume
if err = ctrl.kubeClient.Core().PersistentVolumes().Delete(volume.Name, nil); err != nil {
if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(volume.Name, nil); err != nil {
// Oops, could not delete the volume and therefore the controller will
// try to delete the volume again on next update. We _could_ maintain a
// cache of "recently deleted volumes" and avoid unnecessary deletion,
@ -1260,7 +1260,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
// yet.
pvName := ctrl.getProvisionedVolumeNameForClaim(claim)
volume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err == nil && volume != nil {
// Volume has been already provisioned, nothing to do.
glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim))
@ -1338,7 +1338,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
glog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name)
var newVol *v1.PersistentVolume
if newVol, err = ctrl.kubeClient.Core().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) {
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) {
// Save succeeded.
if err != nil {
glog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim))

View File

@ -71,7 +71,7 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
if eventRecorder == nil {
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(glog.Infof)
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(p.KubeClient.Core().RESTClient()).Events("")})
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(p.KubeClient.CoreV1().RESTClient()).Events("")})
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
}
@ -425,7 +425,7 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent
// modify these, therefore create a copy.
claimClone := claim.DeepCopy()
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annStorageProvisioner, class.Provisioner)
newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
if err != nil {
return newClaim, err
}

View File

@ -31,7 +31,7 @@ import (
// NewSourceApiserver creates a config source that watches and pulls from the apiserver.
func NewSourceApiserver(c clientset.Interface, nodeName types.NodeName, updates chan<- interface{}) {
lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName)))
lw := cache.NewListWatchFromClient(c.CoreV1().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName)))
newSourceApiserverFromLW(lw, updates)
}

View File

@ -64,7 +64,7 @@ func NewSimpleConfigMapManager(kubeClient clientset.Interface) Manager {
}
func (s *simpleConfigMapManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) {
return s.kubeClient.Core().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
return s.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
}
func (s *simpleConfigMapManager) RegisterPod(pod *v1.Pod) {
@ -216,7 +216,7 @@ func (s *configMapStore) Get(namespace, name string) (*v1.ConfigMap, error) {
// etcd and apiserver (the cache is eventually consistent).
util.FromApiserverCache(&opts)
}
configMap, err := s.kubeClient.Core().ConfigMaps(namespace).Get(name, opts)
configMap, err := s.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, opts)
if err != nil && !apierrors.IsNotFound(err) && data.configMap == nil && data.err == nil {
// Couldn't fetch the latest configmap, but there is no cached data to return.
// Return the fetch result instead.

View File

@ -439,7 +439,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
serviceIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
if kubeDeps.KubeClient != nil {
serviceLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.Core().RESTClient(), "services", metav1.NamespaceAll, fields.Everything())
serviceLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.CoreV1().RESTClient(), "services", metav1.NamespaceAll, fields.Everything())
r := cache.NewReflector(serviceLW, &v1.Service{}, serviceIndexer, 0)
go r.Run(wait.NeverStop)
}
@ -448,7 +448,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
if kubeDeps.KubeClient != nil {
fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector()
nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.Core().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector)
nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.CoreV1().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector)
r := cache.NewReflector(nodeLW, &v1.Node{}, nodeIndexer, 0)
go r.Run(wait.NeverStop)
}

View File

@ -97,7 +97,7 @@ func (kl *Kubelet) registerWithAPIServer() {
// a different externalID value, it attempts to delete that node so that a
// later attempt can recreate it.
func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
_, err := kl.kubeClient.Core().Nodes().Create(node)
_, err := kl.kubeClient.CoreV1().Nodes().Create(node)
if err == nil {
return true
}
@ -107,7 +107,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
return false
}
existingNode, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName), metav1.GetOptions{})
existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(string(kl.nodeName), metav1.GetOptions{})
if err != nil {
glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err)
return false
@ -146,7 +146,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
"Previously node %q had externalID %q; now it is %q; will delete and recreate.",
kl.nodeName, node.Spec.ExternalID, existingNode.Spec.ExternalID,
)
if err := kl.kubeClient.Core().Nodes().Delete(node.Name, nil); err != nil {
if err := kl.kubeClient.CoreV1().Nodes().Delete(node.Name, nil); err != nil {
glog.Errorf("Unable to register node %q with API server: error deleting old node: %v", kl.nodeName, err)
} else {
glog.Infof("Deleted old node object %q", kl.nodeName)

View File

@ -1777,13 +1777,13 @@ func hasHostNamespace(pod *v1.Pod) bool {
func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil {
pvc, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{})
pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{})
if err != nil {
glog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err)
continue
}
if pvc != nil {
referencedVolume, err := kl.kubeClient.Core().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
glog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err)
continue

View File

@ -47,12 +47,12 @@ func newSharedNodeInformer(client clientset.Interface, nodeName string,
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (kuberuntime.Object, error) {
return client.Core().Nodes().List(metav1.ListOptions{
return client.CoreV1().Nodes().List(metav1.ListOptions{
FieldSelector: fieldselector.String(),
})
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return client.Core().Nodes().Watch(metav1.ListOptions{
return client.CoreV1().Nodes().Watch(metav1.ListOptions{
FieldSelector: fieldselector.String(),
ResourceVersion: options.ResourceVersion,
})

View File

@ -63,7 +63,7 @@ func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error {
}
hash := getPodHash(pod)
copyPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = hash
apiPod, err := mc.apiserverClient.Core().Pods(copyPod.Namespace).Create(&copyPod)
apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(&copyPod)
if err != nil && errors.IsAlreadyExists(err) {
// Check if the existing pod is the same as the pod we want to create.
if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash {
@ -84,7 +84,7 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error {
}
glog.V(2).Infof("Deleting a mirror pod %q", podFullName)
// TODO(random-liu): Delete the mirror pod with uid precondition in mirror pod manager
if err := mc.apiserverClient.Core().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) {
if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) {
glog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err)
}
return nil

View File

@ -64,7 +64,7 @@ func NewSimpleSecretManager(kubeClient clientset.Interface) Manager {
}
func (s *simpleSecretManager) GetSecret(namespace, name string) (*v1.Secret, error) {
return s.kubeClient.Core().Secrets(namespace).Get(name, metav1.GetOptions{})
return s.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
}
func (s *simpleSecretManager) RegisterPod(pod *v1.Pod) {
@ -216,7 +216,7 @@ func (s *secretStore) Get(namespace, name string) (*v1.Secret, error) {
// etcd and apiserver (the cache is eventually consistent).
util.FromApiserverCache(&opts)
}
secret, err := s.kubeClient.Core().Secrets(namespace).Get(name, opts)
secret, err := s.kubeClient.CoreV1().Secrets(namespace).Get(name, opts)
if err != nil && !apierrors.IsNotFound(err) && data.secret == nil && data.err == nil {
// Couldn't fetch the latest secret, but there is no cached data to return.
// Return the fetch result instead.

View File

@ -441,7 +441,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
}
// TODO: make me easier to express from client code
pod, err := m.kubeClient.Core().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{})
pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{})
if errors.IsNotFound(err) {
glog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid)
// If the Pod is deleted the status will be cleared in
@ -462,7 +462,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
}
pod.Status = status.status
// TODO: handle conflict as a retry, make that easier too.
newPod, err := m.kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod)
newPod, err := m.kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
if err != nil {
glog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err)
return
@ -477,7 +477,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
deleteOptions := metav1.NewDeleteOptions(0)
// Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace.
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID))
err = m.kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, deleteOptions)
err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, deleteOptions)
if err != nil {
glog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err)
return

View File

@ -396,7 +396,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
namespace string, claimName string) (string, types.UID, error) {
pvc, err :=
dswp.kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
if err != nil || pvc == nil {
return "", "", fmt.Errorf(
"failed to fetch PVC %s/%s from API server. err=%v",
@ -425,7 +425,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVSpec(
name string,
pvcReadOnly bool,
expectedClaimUID types.UID) (*volume.Spec, string, error) {
pv, err := dswp.kubeClient.Core().PersistentVolumes().Get(name, metav1.GetOptions{})
pv, err := dswp.kubeClient.CoreV1().PersistentVolumes().Get(name, metav1.GetOptions{})
if err != nil || pv == nil {
return nil, "", fmt.Errorf(
"failed to fetch PV %q from API server. err=%v", name, err)

View File

@ -495,7 +495,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error {
// Get the node status to retrieve volume device path information.
node, fetchErr := rc.kubeClient.Core().Nodes().Get(string(rc.nodeName), metav1.GetOptions{})
node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{})
if fetchErr != nil {
glog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr)
} else {

View File

@ -324,11 +324,11 @@ func delayClaimBecomesBound(
) {
time.Sleep(500 * time.Millisecond)
volumeClaim, _ :=
kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
volumeClaim.Status = v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
}
kubeClient.Core().PersistentVolumeClaims(namespace).Update(volumeClaim)
kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(volumeClaim)
return
}

View File

@ -33,7 +33,7 @@ func listConfigMapsByNamespaceFuncUsingClient(kubeClient clientset.Interface) ge
// unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require
// structured objects.
return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().ConfigMaps(namespace).List(options)
itemList, err := kubeClient.CoreV1().ConfigMaps(namespace).List(options)
if err != nil {
return nil, err
}

View File

@ -73,7 +73,7 @@ func listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient clientset.I
// unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require
// structured objects.
return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().PersistentVolumeClaims(namespace).List(options)
itemList, err := kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(options)
if err != nil {
return nil, err
}

View File

@ -66,7 +66,7 @@ func listPodsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.
// unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require
// structured objects.
return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().Pods(namespace).List(options)
itemList, err := kubeClient.CoreV1().Pods(namespace).List(options)
if err != nil {
return nil, err
}

View File

@ -33,7 +33,7 @@ func listReplicationControllersByNamespaceFuncUsingClient(kubeClient clientset.I
// unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require
// structured objects.
return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().ReplicationControllers(namespace).List(options)
itemList, err := kubeClient.CoreV1().ReplicationControllers(namespace).List(options)
if err != nil {
return nil, err
}

View File

@ -33,7 +33,7 @@ func listResourceQuotasByNamespaceFuncUsingClient(kubeClient clientset.Interface
// unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require
// structured objects.
return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().ResourceQuotas(namespace).List(options)
itemList, err := kubeClient.CoreV1().ResourceQuotas(namespace).List(options)
if err != nil {
return nil, err
}

View File

@ -33,7 +33,7 @@ func listSecretsByNamespaceFuncUsingClient(kubeClient clientset.Interface) gener
// unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require
// structured objects.
return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().Secrets(namespace).List(options)
itemList, err := kubeClient.CoreV1().Secrets(namespace).List(options)
if err != nil {
return nil, err
}

View File

@ -48,7 +48,7 @@ func listServicesByNamespaceFuncUsingClient(kubeClient clientset.Interface) gene
// unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require
// structured objects.
return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) {
itemList, err := kubeClient.Core().Services(namespace).List(options)
itemList, err := kubeClient.CoreV1().Services(namespace).List(options)
if err != nil {
return nil, err
}

View File

@ -146,7 +146,7 @@ func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.N
if err != nil {
return nil
}
_, err = c.Core().Nodes().PatchStatus(string(node), patch)
_, err = c.CoreV1().Nodes().PatchStatus(string(node), patch)
return err
}

View File

@ -144,7 +144,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
// TODO: caching, currently it is overkill because it calls this function
// only when it creates dynamic PV
zones := make(sets.String)
nodes, err := kubeClient.Core().Nodes().List(metav1.ListOptions{})
nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
glog.V(2).Infof("Error listing nodes")
return zones, err

View File

@ -149,7 +149,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu
if kubeClient == nil {
return nil, fmt.Errorf("failed to get kube client to initialize mounter")
}
ep, err := kubeClient.Core().Endpoints(podNs).Get(epName, metav1.GetOptions{})
ep, err := kubeClient.CoreV1().Endpoints(podNs).Get(epName, metav1.GetOptions{})
if err != nil {
glog.Errorf("failed to get endpoints %s[%v]", epName, err)
return nil, err
@ -493,7 +493,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
if kubeClient == nil {
return fmt.Errorf("failed to get kube client when collecting gids")
}
pvList, err := kubeClient.Core().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
glog.Errorf("failed to get existing persistent volumes")
return err
@ -814,7 +814,7 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS
if kubeClient == nil {
return nil, nil, fmt.Errorf("failed to get kube client when creating endpoint service")
}
_, err = kubeClient.Core().Endpoints(namespace).Create(endpoint)
_, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint)
if err != nil && errors.IsAlreadyExists(err) {
glog.V(1).Infof("endpoint [%s] already exist in namespace [%s]", endpoint, namespace)
err = nil
@ -834,7 +834,7 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{Protocol: "TCP", Port: 1}}}}
_, err = kubeClient.Core().Services(namespace).Create(service)
_, err = kubeClient.CoreV1().Services(namespace).Create(service)
if err != nil && errors.IsAlreadyExists(err) {
glog.V(1).Infof("service [%s] already exist in namespace [%s]", service, namespace)
err = nil
@ -851,7 +851,7 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi
if kubeClient == nil {
return fmt.Errorf("failed to get kube client when deleting endpoint service")
}
err = kubeClient.Core().Services(namespace).Delete(epServiceName, nil)
err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil)
if err != nil {
glog.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err)
return fmt.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err)

View File

@ -200,7 +200,7 @@ func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.Vol
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err)
return nil, err

View File

@ -147,7 +147,7 @@ func (f *fakeVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) {
func (f *fakeVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
return func(namespace, name string) (*v1.Secret, error) {
return f.kubeClient.Core().Secrets(namespace).Get(name, metav1.GetOptions{})
return f.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
}
}
@ -157,7 +157,7 @@ func (f *fakeVolumeHost) GetExec(pluginName string) mount.Exec {
func (f *fakeVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
return func(namespace, name string) (*v1.ConfigMap, error) {
return f.kubeClient.Core().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
return f.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
}
}

View File

@ -187,15 +187,15 @@ type realRecyclerClient struct {
}
func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
return c.client.Core().Pods(pod.Namespace).Create(pod)
return c.client.CoreV1().Pods(pod.Namespace).Create(pod)
}
func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
return c.client.Core().Pods(namespace).Get(name, metav1.GetOptions{})
return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
}
func (c *realRecyclerClient) DeletePod(name, namespace string) error {
return c.client.Core().Pods(namespace).Delete(name, nil)
return c.client.CoreV1().Pods(namespace).Delete(name, nil)
}
func (c *realRecyclerClient) Event(eventtype, message string) {
@ -212,13 +212,13 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s
Watch: true,
}
podWatch, err := c.client.Core().Pods(namespace).Watch(options)
podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options)
if err != nil {
return nil, err
}
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
eventWatch, err := c.client.Core().Events(namespace).Watch(metav1.ListOptions{
eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{
FieldSelector: eventSelector.String(),
Watch: true,
})

View File

@ -664,7 +664,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
}
// Fetch current node object
node, fetchErr := og.kubeClient.Core().Nodes().Get(string(nodeName), metav1.GetOptions{})
node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{})
if fetchErr != nil {
// On failure, return error. Caller will log and retry.
return volumeToMount.GenerateErrorDetailed("VerifyControllerAttachedVolume failed fetching node from API server", fetchErr)
@ -698,7 +698,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
func (og *operationGenerator) verifyVolumeIsSafeToDetach(
volumeToDetach AttachedVolume) error {
// Fetch current node object
node, fetchErr := og.kubeClient.Core().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{})
node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{})
if fetchErr != nil {
if errors.IsNotFound(fetchErr) {
glog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", ""))

View File

@ -147,7 +147,7 @@ func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interf
if kubeClient == nil {
return secret, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.Core().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{})
secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{})
if err != nil {
return secret, err
}
@ -163,7 +163,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl
if kubeClient == nil {
return secret, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{})
secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{})
if err != nil {
return secret, err
}

View File

@ -165,7 +165,7 @@ func (l *lifecycle) Admit(a admission.Attributes) error {
// refuse to operate on non-existent namespaces
if !exists || forceLiveLookup {
// as a last resort, make a call directly to storage
namespace, err = l.client.Core().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{})
namespace, err = l.client.CoreV1().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{})
switch {
case errors.IsNotFound(err):
return err

View File

@ -37,7 +37,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
It("should return chunks of results for list calls", func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.Core().PodTemplates(ns)
client := c.CoreV1().PodTemplates(ns)
By("creating a large number of resources")
workqueue.Parallelize(20, numberOfTotalResources, func(i int) {

View File

@ -103,7 +103,7 @@ func masterExec(cmd string) {
func checkExistingRCRecovers(f *framework.Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller")

View File

@ -219,8 +219,8 @@ func newGCPod(name string) *v1.Pod {
// controllers and pods are rcNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingReplicationControllersPods(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) {
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
pods, err := clientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
@ -264,7 +264,7 @@ func verifyRemainingCronJobsJobsPods(f *framework.Framework, clientSet clientset
By(fmt.Sprintf("expected %d jobs, got %d jobs", jobNum, len(jobs.Items)))
}
pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
@ -332,8 +332,8 @@ var _ = SIGDescribe("Garbage collector", func() {
f := framework.NewDefaultFramework("gc")
It("should delete pods created by rc when not orphaning", func() {
clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
// TODO: find better way to keep this label unique in the test
uniqLabels := map[string]string{"gctest": "delete_pods"}
@ -385,8 +385,8 @@ var _ = SIGDescribe("Garbage collector", func() {
It("should orphan pods created by rc if delete options say so", func() {
clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
// TODO: find better way to keep this label unique in the test
uniqLabels := map[string]string{"gctest": "orphan_pods"}
@ -454,8 +454,8 @@ var _ = SIGDescribe("Garbage collector", func() {
It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
// TODO: find better way to keep this label unique in the test
uniqLabels := map[string]string{"gctest": "orphan_pods_nil_option"}
@ -619,8 +619,8 @@ var _ = SIGDescribe("Garbage collector", func() {
It("should keep the rc around until all its pods are deleted if the deleteOptions says so", func() {
clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
// TODO: find better way to keep this label unique in the test
uniqLabels := map[string]string{"gctest": "delete_pods_foreground"}
@ -703,8 +703,8 @@ var _ = SIGDescribe("Garbage collector", func() {
// TODO: this should be an integration test
It("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() {
clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
rc1Name := "simpletest-rc-to-be-deleted"
replicas := int32(estimateMaximumPods(clientSet, 10, 100))
halfReplicas := int(replicas / 2)
@ -814,7 +814,7 @@ var _ = SIGDescribe("Garbage collector", func() {
// TODO: should be an integration test
It("should not be blocked by dependency circle", func() {
clientSet := f.ClientSet
podClient := clientSet.Core().Pods(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
pod1 := newGCPod("pod1")
pod1, err := podClient.Create(pod1)
Expect(err).NotTo(HaveOccurred())

View File

@ -151,7 +151,7 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool)
var _ = SIGDescribe("Generated clientset", func() {
f := framework.NewDefaultFramework("clientset")
It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func() {
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
By("constructing the pod")
name := "pod" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())

View File

@ -52,14 +52,14 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
ch := make(chan struct{})
go func() {
_, err := c.Core().Pods(ns).Create(newUninitializedPod(podName))
_, err := c.CoreV1().Pods(ns).Create(newUninitializedPod(podName))
Expect(err).NotTo(HaveOccurred())
close(ch)
}()
// wait to ensure the scheduler does not act on an uninitialized pod
err := wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) {
p, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
p, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, nil
@ -71,23 +71,23 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
Expect(err).To(Equal(wait.ErrWaitTimeout))
// verify that we can update an initializing pod
pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pod.Annotations = map[string]string{"update-1": "test"}
pod, err = c.Core().Pods(ns).Update(pod)
pod, err = c.CoreV1().Pods(ns).Update(pod)
Expect(err).NotTo(HaveOccurred())
// verify the list call filters out uninitialized pods
pods, err := c.Core().Pods(ns).List(metav1.ListOptions{IncludeUninitialized: true})
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{IncludeUninitialized: true})
Expect(err).NotTo(HaveOccurred())
Expect(pods.Items).To(HaveLen(1))
pods, err = c.Core().Pods(ns).List(metav1.ListOptions{})
pods, err = c.CoreV1().Pods(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(pods.Items).To(HaveLen(0))
// clear initializers
pod.Initializers = nil
pod, err = c.Core().Pods(ns).Update(pod)
pod, err = c.CoreV1().Pods(ns).Update(pod)
Expect(err).NotTo(HaveOccurred())
// pod should now start running
@ -98,12 +98,12 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
<-ch
// verify that we cannot start the pod initializing again
pod, err = c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pod.Initializers = &metav1.Initializers{
Pending: []metav1.Initializer{{Name: "Other"}},
}
_, err = c.Core().Pods(ns).Update(pod)
_, err = c.CoreV1().Pods(ns).Update(pod)
if !errors.IsInvalid(err) || !strings.Contains(err.Error(), "immutable") {
Fail(fmt.Sprintf("expected invalid error: %v", err))
}
@ -145,7 +145,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
ch := make(chan struct{})
go func() {
defer close(ch)
_, err := c.Core().Pods(ns).Create(newInitPod(podName))
_, err := c.CoreV1().Pods(ns).Create(newInitPod(podName))
Expect(err).NotTo(HaveOccurred())
}()
@ -153,7 +153,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
By("Waiting until the pod is visible to a client")
var pod *v1.Pod
err = wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) {
pod, err = c.Core().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true})
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true})
if errors.IsNotFound(err) {
return false, nil
}
@ -170,7 +170,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
// pretend we are an initializer
By("Completing initialization")
pod.Initializers = nil
pod, err = c.Core().Pods(ns).Update(pod)
pod, err = c.CoreV1().Pods(ns).Update(pod)
Expect(err).NotTo(HaveOccurred())
// ensure create call returns
@ -185,7 +185,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
podName = "preinitialized-pod"
pod = newUninitializedPod(podName)
pod.Initializers.Pending = nil
pod, err = c.Core().Pods(ns).Create(pod)
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(pod.Initializers).To(BeNil())
@ -197,7 +197,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
v1.MirrorPodAnnotationKey: "true",
}
pod.Spec.NodeName = "node-does-not-yet-exist"
pod, err = c.Core().Pods(ns).Create(pod)
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(pod.Initializers).To(BeNil())
Expect(pod.Annotations[v1.MirrorPodAnnotationKey]).To(Equal("true"))
@ -259,7 +259,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
LabelSelector: selector.String(),
IncludeUninitialized: true,
}
pods, err := c.Core().Pods(ns).List(listOptions)
pods, err := c.CoreV1().Pods(ns).List(listOptions)
Expect(err).NotTo(HaveOccurred())
Expect(len(pods.Items)).Should(Equal(1))
})
@ -349,7 +349,7 @@ func newInitPod(podName string) *v1.Pod {
// removeInitializersFromAllPods walks all pods and ensures they don't have the provided initializer,
// to guarantee completing the test doesn't block the entire cluster.
func removeInitializersFromAllPods(c clientset.Interface, initializerName string) {
pods, err := c.Core().Pods("").List(metav1.ListOptions{IncludeUninitialized: true})
pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{IncludeUninitialized: true})
if err != nil {
return
}
@ -358,7 +358,7 @@ func removeInitializersFromAllPods(c clientset.Interface, initializerName string
continue
}
err := clientretry.RetryOnConflict(clientretry.DefaultRetry, func() error {
pod, err := c.Core().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{IncludeUninitialized: true})
pod, err := c.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{IncludeUninitialized: true})
if err != nil {
if errors.IsNotFound(err) {
return nil
@ -382,7 +382,7 @@ func removeInitializersFromAllPods(c clientset.Interface, initializerName string
pod.Initializers = nil
}
framework.Logf("Found initializer on pod %s in ns %s", pod.Name, pod.Namespace)
_, err = c.Core().Pods(p.Namespace).Update(pod)
_, err = c.CoreV1().Pods(p.Namespace).Update(pod)
return err
})
if err != nil {

View File

@ -62,7 +62,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
func() (bool, error) {
var cnt = 0
nsList, err := f.ClientSet.Core().Namespaces().List(metav1.ListOptions{})
nsList, err := f.ClientSet.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
return false, err
}
@ -83,7 +83,7 @@ func waitForPodInNamespace(c clientset.Interface, ns, podName string) *v1.Pod {
var pod *v1.Pod
var err error
err = wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) {
pod, err = c.Core().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true})
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true})
if errors.IsNotFound(err) {
return false, nil
}
@ -119,7 +119,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
},
},
}
pod, err = f.ClientSet.Core().Pods(namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the pod to have running status")
@ -141,21 +141,21 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
},
}
go func() {
_, err = f.ClientSet.Core().Pods(namespace.Name).Create(podB)
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(podB)
// This error is ok, because we will delete the pod before it completes initialization
framework.Logf("error from create uninitialized namespace: %v", err)
}()
podB = waitForPodInNamespace(f.ClientSet, namespace.Name, podB.Name)
By("Deleting the namespace")
err = f.ClientSet.Core().Namespaces().Delete(namespace.Name, nil)
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) {
_, err = f.ClientSet.Core().Namespaces().Get(namespace.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
}
@ -167,9 +167,9 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
By("Verifying there are no pods in the namespace")
_, err = f.ClientSet.Core().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
_, err = f.ClientSet.Core().Pods(namespace.Name).Get(podB.Name, metav1.GetOptions{IncludeUninitialized: true})
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(podB.Name, metav1.GetOptions{IncludeUninitialized: true})
Expect(err).To(HaveOccurred())
}
@ -202,18 +202,18 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
}},
},
}
service, err = f.ClientSet.Core().Services(namespace.Name).Create(service)
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service)
Expect(err).NotTo(HaveOccurred())
By("Deleting the namespace")
err = f.ClientSet.Core().Namespaces().Delete(namespace.Name, nil)
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60)
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) {
_, err = f.ClientSet.Core().Namespaces().Get(namespace.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
}
@ -225,7 +225,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
By("Verifying there is no service in the namespace")
_, err = f.ClientSet.Core().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
}

View File

@ -43,11 +43,11 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
podName := "pod-1"
framework.Logf("Creating pod %s", podName)
_, err := c.Core().Pods(ns).Create(newTablePod(podName))
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
Expect(err).NotTo(HaveOccurred())
table := &metav1alpha1.Table{}
err = c.Core().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Table: %#v", table)
@ -67,7 +67,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
c := f.ClientSet
table := &metav1alpha1.Table{}
err := c.Core().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Table: %#v", table)
@ -85,7 +85,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
c := f.ClientSet
table := &metav1alpha1.Table{}
err := c.Core().RESTClient().Get().Resource("services").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
err := c.CoreV1().RESTClient().Get().Resource("services").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
Expect(err).To(HaveOccurred())
Expect(err.(errors.APIStatus).Status().Code).To(Equal(int32(406)))
})

View File

@ -436,7 +436,7 @@ func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reaso
if err != nil {
return fmt.Errorf("Error in getting cronjob %s/%s: %v", ns, cronJobName, err)
}
events, err := c.Core().Events(ns).Search(legacyscheme.Scheme, sj)
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj)
if err != nil {
return fmt.Errorf("Error in listing events: %s", err)
}

View File

@ -164,7 +164,7 @@ func replacePods(pods []*v1.Pod, store cache.Store) {
// and a list of nodenames across which these containers restarted.
func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
options := metav1.ListOptions{LabelSelector: labelSelector.String()}
pods, err := c.Core().Pods(ns).List(options)
pods, err := c.CoreV1().Pods(ns).List(options)
framework.ExpectNoError(err)
failedContainers := 0
containerRestartNodes := sets.NewString()
@ -215,12 +215,12 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labelSelector.String()
obj, err := f.ClientSet.Core().Pods(ns).List(options)
obj, err := f.ClientSet.CoreV1().Pods(ns).List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelSelector.String()
return f.ClientSet.Core().Pods(ns).Watch(options)
return f.ClientSet.CoreV1().Pods(ns).Watch(options)
},
},
&v1.Pod{},

View File

@ -85,7 +85,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
} else {
framework.Logf("unable to dump daemonsets: %v", err)
}
if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
framework.Logf("pods: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), pods))
} else {
framework.Logf("unable to dump pods: %v", err)
@ -126,7 +126,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
By("Stop a daemon pod, check that the daemon pod is revived.")
podList := listDaemonPods(c, ns, label)
pod := podList.Items[0]
err = c.Core().Pods(ns).Delete(pod.Name, nil)
err = c.CoreV1().Pods(ns).Delete(pod.Name, nil)
Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
@ -243,7 +243,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
pod := podList.Items[0]
pod.ResourceVersion = ""
pod.Status.Phase = v1.PodFailed
_, err = c.Core().Pods(ns).UpdateStatus(&pod)
_, err = c.CoreV1().Pods(ns).UpdateStatus(&pod)
Expect(err).NotTo(HaveOccurred(), "error failing a daemon pod")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
@ -549,7 +549,7 @@ func newDaemonSet(dsName, image string, label map[string]string) *extensions.Dae
func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *v1.PodList {
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(ns).List(options)
podList, err := c.CoreV1().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(podList.Items)).To(BeNumerically(">", 0))
return podList
@ -580,7 +580,7 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
}
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
nodeClient := c.Core().Nodes()
nodeClient := c.CoreV1().Nodes()
var newNode *v1.Node
var newLabels map[string]string
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
@ -621,7 +621,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nodeNames []string) func() (bool, error) {
return func() (bool, error) {
podList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
framework.Logf("could not get the pod list: %v", err)
return false, nil
@ -660,7 +660,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nod
func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
return func() (bool, error) {
nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
nodeNames := make([]string, 0)
for _, node := range nodeList.Items {
@ -717,7 +717,7 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error {
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
return func() (bool, error) {
podList, err := c.Core().Pods(ds.Namespace).List(metav1.ListOptions{})
podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{})
if err != nil {
return false, err
}

View File

@ -145,7 +145,7 @@ func failureTrap(c clientset.Interface, ns string) {
framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
}
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(rs.Namespace).List(options)
podList, err := c.CoreV1().Pods(rs.Namespace).List(options)
for _, pod := range podList.Items {
framework.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod))
}
@ -191,7 +191,7 @@ func stopDeployment(c clientset.Interface, internalClient internalclientset.Inte
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
var pods *v1.PodList
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
pods, err = c.Core().Pods(ns).List(options)
pods, err = c.CoreV1().Pods(ns).List(options)
if err != nil {
return false, err
}
@ -342,7 +342,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
deploymentName := "test-cleanup-deployment"
framework.Logf("Creating deployment %s", deploymentName)
pods, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
options := metav1.ListOptions{
@ -350,7 +350,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
}
stopCh := make(chan struct{})
defer close(stopCh)
w, err := c.Core().Pods(ns).Watch(options)
w, err := c.CoreV1().Pods(ns).Watch(options)
Expect(err).NotTo(HaveOccurred())
go func() {
// There should be only one pod being created, which is the pod with the redis image.
@ -947,7 +947,7 @@ func testIterativeDeployments(f *framework.Framework) {
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
opts := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(ns).List(opts)
podList, err := c.CoreV1().Pods(ns).List(opts)
Expect(err).NotTo(HaveOccurred())
if len(podList.Items) == 0 {
framework.Logf("%02d: no deployment pods to delete", i)
@ -959,7 +959,7 @@ func testIterativeDeployments(f *framework.Framework) {
}
name := podList.Items[p].Name
framework.Logf("%02d: deleting deployment pod %q", i, name)
err := c.Core().Pods(ns).Delete(name, nil)
err := c.CoreV1().Pods(ns).Delete(name, nil)
if err != nil && !errors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}

View File

@ -103,7 +103,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
By(fmt.Sprintf("Creating replication controller %s", name))
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image)
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(newRC)
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC)
Expect(err).NotTo(HaveOccurred())
// Check that pods for the new RC were created.
@ -121,7 +121,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
}
err = f.WaitForPodRunning(pod.Name)
if err != nil {
updatePod, getErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if getErr == nil {
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else {
@ -160,11 +160,11 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
quota := newPodQuota(name, "2")
_, err := c.Core().ResourceQuotas(namespace).Create(quota)
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
quota, err = c.Core().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -179,14 +179,14 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
rc := newRC(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
rc, err = c.Core().ReplicationControllers(namespace).Create(rc)
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
generation := rc.Generation
conditions := rc.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rc, err = c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -215,7 +215,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
generation = rc.Generation
conditions = rc.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rc, err = c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -258,12 +258,12 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
replicas := int32(1)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName)
rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rcSt)
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
Expect(err).NotTo(HaveOccurred())
By("Then the orphan pod is adopted")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
// The Pod p should either be adopted or deleted by the RC
if errors.IsNotFound(err) {
return true, nil
@ -287,7 +287,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
replicas := int32(1)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName)
rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rcSt)
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
Expect(err).NotTo(HaveOccurred())
By("When the matched label of one of its pods change")
@ -296,11 +296,11 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
p := pods.Items[0]
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pod.Labels = map[string]string{"name": "not-matching-name"}
_, err = f.ClientSet.Core().Pods(f.Namespace.Name).Update(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
if err != nil && errors.IsConflict(err) {
return false, nil
}
@ -313,7 +313,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
By("Then the pod is released")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
for _, owner := range p2.OwnerReferences {
if *owner.Controller && owner.UID == rc.UID {

View File

@ -129,7 +129,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
}
err = f.WaitForPodRunning(pod.Name)
if err != nil {
updatePod, getErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if getErr == nil {
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else {
@ -168,11 +168,11 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
quota := newPodQuota(name, "2")
_, err := c.Core().ResourceQuotas(namespace).Create(quota)
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
quota, err = c.Core().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -272,7 +272,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
By("Then the orphan pod is adopted")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
// The Pod p should either be adopted or deleted by the ReplicaSet
if errors.IsNotFound(err) {
return true, nil
@ -295,11 +295,11 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
p = &pods.Items[0]
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pod.Labels = map[string]string{"name": "not-matching-name"}
_, err = f.ClientSet.Core().Pods(f.Namespace.Name).Update(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
if err != nil && errors.IsConflict(err) {
return false, nil
}
@ -312,7 +312,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
By("Then the pod is released")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
for _, owner := range p2.OwnerReferences {
if *owner.Controller && owner.UID == rs.UID {

View File

@ -77,7 +77,7 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Creating service " + headlessSvcName + " in namespace " + ns)
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
_, err := c.Core().Services(ns).Create(headlessService)
_, err := c.CoreV1().Services(ns).Create(headlessService)
Expect(err).NotTo(HaveOccurred())
})
@ -650,7 +650,7 @@ var _ = SIGDescribe("StatefulSet", func() {
It("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
psLabels := klabels.Set(labels)
By("Initializing watcher for selector " + psLabels.String())
watcher, err := f.ClientSet.Core().Pods(ns).Watch(metav1.ListOptions{
watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
LabelSelector: psLabels.AsSelector().String(),
})
Expect(err).NotTo(HaveOccurred())
@ -692,7 +692,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Expect(err).NotTo(HaveOccurred())
By("Scale down will halt with unhealthy stateful pod")
watcher, err = f.ClientSet.Core().Pods(ns).Watch(metav1.ListOptions{
watcher, err = f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
LabelSelector: psLabels.AsSelector().String(),
})
Expect(err).NotTo(HaveOccurred())
@ -785,7 +785,7 @@ var _ = SIGDescribe("StatefulSet", func() {
NodeName: node.Name,
},
}
pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err)
By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name)
@ -803,7 +803,7 @@ var _ = SIGDescribe("StatefulSet", func() {
var initialStatefulPodUID types.UID
By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
w, err := f.ClientSet.Core().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
framework.ExpectNoError(err)
// we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once
_, err = watch.Until(framework.StatefulPodTimeout, w, func(event watch.Event) (bool, error) {
@ -826,13 +826,13 @@ var _ = SIGDescribe("StatefulSet", func() {
}
By("Removing pod with conflicting port in namespace " + f.Namespace.Name)
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
// we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until
Eventually(func() error {
statefulPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{})
statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{})
if err != nil {
return err
}

View File

@ -326,27 +326,27 @@ var _ = SIGDescribe("Advanced Audit [Feature:Audit]", func() {
},
}
_, err := f.ClientSet.Core().ConfigMaps(namespace).Create(configMap)
_, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Create(configMap)
framework.ExpectNoError(err, "failed to create audit-configmap")
_, err = f.ClientSet.Core().ConfigMaps(namespace).Get(configMap.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Get(configMap.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get audit-configmap")
configMapChan, err := f.ClientSet.Core().ConfigMaps(namespace).Watch(watchOptions)
configMapChan, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Watch(watchOptions)
framework.ExpectNoError(err, "failed to create watch for config maps")
for range configMapChan.ResultChan() {
}
_, err = f.ClientSet.Core().ConfigMaps(namespace).Update(configMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Update(configMap)
framework.ExpectNoError(err, "failed to update audit-configmap")
_, err = f.ClientSet.Core().ConfigMaps(namespace).Patch(configMap.Name, types.JSONPatchType, patch)
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Patch(configMap.Name, types.JSONPatchType, patch)
framework.ExpectNoError(err, "failed to patch configmap")
_, err = f.ClientSet.Core().ConfigMaps(namespace).List(metav1.ListOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).List(metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list config maps")
err = f.ClientSet.Core().ConfigMaps(namespace).Delete(configMap.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().ConfigMaps(namespace).Delete(configMap.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete audit-configmap")
},
[]auditEvent{
@ -452,27 +452,27 @@ var _ = SIGDescribe("Advanced Audit [Feature:Audit]", func() {
"top-secret": []byte("foo-bar"),
},
}
_, err := f.ClientSet.Core().Secrets(namespace).Create(secret)
_, err := f.ClientSet.CoreV1().Secrets(namespace).Create(secret)
framework.ExpectNoError(err, "failed to create audit-secret")
_, err = f.ClientSet.Core().Secrets(namespace).Get(secret.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().Secrets(namespace).Get(secret.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get audit-secret")
secretChan, err := f.ClientSet.Core().Secrets(namespace).Watch(watchOptions)
secretChan, err := f.ClientSet.CoreV1().Secrets(namespace).Watch(watchOptions)
framework.ExpectNoError(err, "failed to create watch for secrets")
for range secretChan.ResultChan() {
}
_, err = f.ClientSet.Core().Secrets(namespace).Update(secret)
_, err = f.ClientSet.CoreV1().Secrets(namespace).Update(secret)
framework.ExpectNoError(err, "failed to update audit-secret")
_, err = f.ClientSet.Core().Secrets(namespace).Patch(secret.Name, types.JSONPatchType, patch)
_, err = f.ClientSet.CoreV1().Secrets(namespace).Patch(secret.Name, types.JSONPatchType, patch)
framework.ExpectNoError(err, "failed to patch secret")
_, err = f.ClientSet.Core().Secrets(namespace).List(metav1.ListOptions{})
_, err = f.ClientSet.CoreV1().Secrets(namespace).List(metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list secrets")
err = f.ClientSet.Core().Secrets(namespace).Delete(secret.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().Secrets(namespace).Delete(secret.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete audit-secret")
},
[]auditEvent{
@ -651,7 +651,7 @@ func expectAuditLines(f *framework.Framework, expected []auditEvent) {
}
// Fetch the log stream.
stream, err := f.ClientSet.Core().RESTClient().Get().AbsPath("/logs/kube-apiserver-audit.log").Stream()
stream, err := f.ClientSet.CoreV1().RESTClient().Get().AbsPath("/logs/kube-apiserver-audit.log").Stream()
framework.ExpectNoError(err, "could not read audit log")
defer stream.Close()

View File

@ -48,7 +48,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
var secrets []v1.ObjectReference
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
By("waiting for a single token reference")
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
if apierrors.IsNotFound(err) {
framework.Logf("default service account was not found")
return false, nil
@ -74,19 +74,19 @@ var _ = SIGDescribe("ServiceAccounts", func() {
{
By("ensuring the single token reference persists")
time.Sleep(2 * time.Second)
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets))
}
// delete the referenced secret
By("deleting the service account token")
framework.ExpectNoError(f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secrets[0].Name, nil))
framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secrets[0].Name, nil))
// wait for the referenced secret to be removed, and another one autocreated
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("waiting for a new token reference")
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
if err != nil {
framework.Logf("error getting default service account: %v", err)
return false, err
@ -112,7 +112,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
{
By("ensuring the single token reference persists")
time.Sleep(2 * time.Second)
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets))
}
@ -120,17 +120,17 @@ var _ = SIGDescribe("ServiceAccounts", func() {
// delete the reference from the service account
By("deleting the reference to the service account token")
{
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
framework.ExpectNoError(err)
sa.Secrets = nil
_, updateErr := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Update(sa)
_, updateErr := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Update(sa)
framework.ExpectNoError(updateErr)
}
// wait for another one to be autocreated
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("waiting for a new token to be created and added")
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
if err != nil {
framework.Logf("error getting default service account: %v", err)
return false, err
@ -152,7 +152,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
{
By("ensuring the single token reference persists")
time.Sleep(2 * time.Second)
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
framework.ExpectNoError(err)
Expect(sa.Secrets).To(Equal(secrets))
}
@ -165,7 +165,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
// Standard get, update retry loop
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("getting the auto-created API token")
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
if apierrors.IsNotFound(err) {
framework.Logf("default service account was not found")
return false, nil
@ -179,7 +179,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
return false, nil
}
for _, secretRef := range sa.Secrets {
secret, err := f.ClientSet.Core().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
continue
@ -253,15 +253,15 @@ var _ = SIGDescribe("ServiceAccounts", func() {
falseValue := false
mountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount"}, AutomountServiceAccountToken: &trueValue}
nomountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "nomount"}, AutomountServiceAccountToken: &falseValue}
mountSA, err = f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Create(mountSA)
mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(mountSA)
framework.ExpectNoError(err)
nomountSA, err = f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Create(nomountSA)
nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(nomountSA)
framework.ExpectNoError(err)
// Standard get, update retry loop
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
By("getting the auto-created API token")
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
framework.Logf("mount service account was not found")
return false, nil
@ -275,7 +275,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
return false, nil
}
for _, secretRef := range sa.Secrets {
secret, err := f.ClientSet.Core().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
continue
@ -365,7 +365,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
AutomountServiceAccountToken: tc.AutomountPodSpec,
},
}
createdPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err)
framework.Logf("created pod %s", tc.PodName)

View File

@ -115,7 +115,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
s := time.Now()
makeSchedulableLoop:
@ -255,7 +255,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
// annotate all nodes with no-scale-down
ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled"
nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{
FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String(),
@ -475,7 +475,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e
return err
}
_, err = f.ClientSet.Core().Nodes().Patch(string(node.Name), types.StrategicMergePatchType, patchBytes)
_, err = f.ClientSet.CoreV1().Nodes().Patch(string(node.Name), types.StrategicMergePatchType, patchBytes)
if err != nil {
return err
}

View File

@ -128,7 +128,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
expectedNodes += size
}
framework.ExpectNoError(framework.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
s := time.Now()
@ -159,7 +159,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
EventsLoop:
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
By("Waiting for NotTriggerScaleUp event")
events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(metav1.ListOptions{})
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{})
framework.ExpectNoError(err)
for _, e := range events.Items {
@ -458,7 +458,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
registeredNodes := sets.NewString()
for nodeName := range newNodesSet {
node, err := f.ClientSet.Core().Nodes().Get(nodeName, metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err == nil && node != nil {
registeredNodes.Insert(nodeName)
} else {
@ -609,7 +609,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
By("Make remaining nodes unschedulable")
nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
@ -685,7 +685,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ngNodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
Expect(len(ngNodes) == 1).To(BeTrue())
node, err := f.ClientSet.Core().Nodes().Get(ngNodes[0], metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(ngNodes[0], metav1.GetOptions{})
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
framework.ExpectNoError(err)
@ -718,7 +718,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
By("Block network connectivity to some nodes to simulate unhealthy cluster")
nodesToBreakCount := int(math.Floor(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
@ -762,7 +762,7 @@ func execCmd(args ...string) *exec.Cmd {
func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace string, podsPerNode, pdbSize int, verifyFunction func(int)) {
increasedSize := manuallyIncreaseClusterSize(f, migSizes)
nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
@ -984,7 +984,7 @@ func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, time
// WaitForClusterSizeFuncWithUnready waits until the cluster size matches the given function and assumes some unready nodes.
func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration, expectedUnready int) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
@ -1011,7 +1011,7 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int)
func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface, tolerateUnreadyCount int) error {
var notready []string
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
pods, err := c.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
pods, err := c.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to get pods: %v", err)
}
@ -1051,7 +1051,7 @@ func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interf
}
func getAnyNode(c clientset.Interface) *v1.Node {
nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
@ -1086,10 +1086,10 @@ func drainNode(f *framework.Framework, node *v1.Node) {
By("Manually drain the single node")
podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
pods, err := f.ClientSet.Core().Pods(metav1.NamespaceAll).List(podOpts)
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
framework.ExpectNoError(err)
for _, pod := range pods.Items {
err = f.ClientSet.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}
}
@ -1097,7 +1097,7 @@ func drainNode(f *framework.Framework, node *v1.Node) {
func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
By(fmt.Sprintf("Taint node %s", node.Name))
for j := 0; j < 3; j++ {
freshNode, err := c.Core().Nodes().Get(node.Name, metav1.GetOptions{})
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
return err
}
@ -1111,7 +1111,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
Value: "DisabledForTest",
Effect: v1.TaintEffectNoSchedule,
})
_, err = c.Core().Nodes().Update(freshNode)
_, err = c.CoreV1().Nodes().Update(freshNode)
if err == nil {
return nil
}
@ -1134,7 +1134,7 @@ func (CriticalAddonsOnlyError) Error() string {
func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAddonsOnly bool) error {
By(fmt.Sprintf("Remove taint from node %s", node.Name))
for j := 0; j < 3; j++ {
freshNode, err := c.Core().Nodes().Get(node.Name, metav1.GetOptions{})
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
return err
}
@ -1152,7 +1152,7 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd
return nil
}
freshNode.Spec.Taints = newTaints
_, err = c.Core().Nodes().Update(freshNode)
_, err = c.CoreV1().Nodes().Update(freshNode)
if err == nil {
return nil
}
@ -1181,7 +1181,7 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id
if err != nil {
return err
}
_, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
@ -1205,7 +1205,7 @@ func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods in
if err != nil {
return err
}
_, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
@ -1287,7 +1287,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
if err != nil {
return err
}
rc, err := f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
rc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
@ -1301,7 +1301,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
// (we retry 409 errors in case rc reference got out of sync)
for j := 0; j < 3; j++ {
*rc.Spec.Replicas = int32((i + 1) * podsPerNode)
rc, err = f.ClientSet.Core().ReplicationControllers(namespace).Update(rc)
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(rc)
if err == nil {
break
}
@ -1309,14 +1309,14 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
return err
}
glog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j)
rc, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
}
}
err = wait.PollImmediate(5*time.Second, podTimeout, func() (bool, error) {
rc, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil || rc.Status.ReadyReplicas < int32((i+1)*podsPerNode) {
return false, nil
}

View File

@ -117,7 +117,7 @@ func createDeploymentsToScale(f *framework.Framework, cs clientset.Interface) er
if err != nil {
return err
}
_, err = cs.Core().Pods(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, 100))
_, err = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, 100))
if err != nil {
return err
}
@ -127,7 +127,7 @@ func createDeploymentsToScale(f *framework.Framework, cs clientset.Interface) er
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface) {
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterDeployment, &metav1.DeleteOptions{})
_ = cs.Core().Pods(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterPod, &metav1.DeleteOptions{})
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterPod, &metav1.DeleteOptions{})
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(dummyDeploymentName, &metav1.DeleteOptions{})
}

View File

@ -248,7 +248,7 @@ func getScheduableCores(nodes []v1.Node) int64 {
}
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
cm, err := c.Core().ConfigMaps(metav1.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{})
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{})
if err != nil {
return nil, err
}
@ -256,7 +256,7 @@ func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
}
func deleteDNSScalingConfigMap(c clientset.Interface) error {
if err := c.Core().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
return err
}
framework.Logf("DNS autoscaling ConfigMap deleted.")
@ -282,7 +282,7 @@ func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap {
}
func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error {
_, err := c.Core().ConfigMaps(metav1.NamespaceSystem).Update(configMap)
_, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(configMap)
if err != nil {
return err
}
@ -308,7 +308,7 @@ func getDNSReplicas(c clientset.Interface) (int, error) {
func deleteDNSAutoscalerPod(c clientset.Interface) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(metav1.NamespaceSystem).List(listOpts)
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
if err != nil {
return err
}
@ -317,7 +317,7 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error {
}
podName := pods.Items[0].Name
if err := c.Core().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
return err
}
framework.Logf("DNS autoscaling pod %v deleted.", podName)

View File

@ -160,7 +160,7 @@ profile %s flags=(attach_disconnected) {
profileName: profile,
},
}
_, err := f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(cm)
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
framework.ExpectNoError(err, "Failed to create apparmor-profiles ConfigMap")
}
@ -228,7 +228,7 @@ func createAppArmorProfileLoader(f *framework.Framework) {
},
},
}
_, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(loader)
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(loader)
framework.ExpectNoError(err, "Failed to create apparmor-loader ReplicationController")
// Wait for loader to be ready.

View File

@ -243,7 +243,7 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
defer cancel()
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post())
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
@ -270,7 +270,7 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
defer cancel()
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post())
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
@ -297,7 +297,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
defer cancel()
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post())
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
@ -321,7 +321,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind {
case KindRC:
replicationController, err := rc.clientSet.Core().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if replicationController == nil {
framework.Failf(rcIsNil)
@ -404,9 +404,9 @@ func (rc *ResourceConsumer) CleanUp() {
kind, err := kindOf(rc.kind)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, kind, rc.nsName, rc.name))
framework.ExpectNoError(rc.clientSet.Core().Services(rc.nsName).Delete(rc.name, nil))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil))
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, api.Kind("ReplicationController"), rc.nsName, rc.controllerName))
framework.ExpectNoError(rc.clientSet.Core().Services(rc.nsName).Delete(rc.controllerName, nil))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil))
}
func kindOf(kind string) (schema.GroupKind, error) {
@ -424,7 +424,7 @@ func kindOf(kind string) (schema.GroupKind, error) {
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := c.Core().Services(ns).Create(&v1.Service{
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
@ -478,7 +478,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalCli
By(fmt.Sprintf("Running controller"))
controllerName := name + "-ctrl"
_, err = c.Core().Services(ns).Create(&v1.Service{
_, err = c.CoreV1().Services(ns).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: controllerName,
},

View File

@ -34,7 +34,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
configMap := newConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@ -77,7 +77,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
configMap := newEnvFromConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}

View File

@ -92,7 +92,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@ -142,7 +142,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(configMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
By("waiting to observe update in volume")
@ -196,12 +196,12 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
@ -305,18 +305,18 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
@ -339,7 +339,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@ -427,7 +427,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@ -507,7 +507,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}

View File

@ -48,14 +48,14 @@ func ObserveNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodeP
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector.String()
ls, err := f.ClientSet.Core().Nodes().List(options)
ls, err := f.ClientSet.CoreV1().Nodes().List(options)
return ls, err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
// Signal parent goroutine that watching has begun.
defer informerStartedGuard.Do(func() { close(informerStartedChan) })
options.FieldSelector = nodeSelector.String()
w, err := f.ClientSet.Core().Nodes().Watch(options)
w, err := f.ClientSet.CoreV1().Nodes().Watch(options)
return w, err
},
},
@ -105,13 +105,13 @@ func ObserveEventAfterAction(f *framework.Framework, eventPredicate func(*v1.Eve
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
ls, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options)
ls, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options)
return ls, err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
// Signal parent goroutine that watching has begun.
defer informerStartedGuard.Do(func() { close(informerStartedChan) })
w, err := f.ClientSet.Core().Events(f.Namespace.Name).Watch(options)
w, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Watch(options)
return w, err
},
},

View File

@ -427,7 +427,7 @@ var _ = framework.KubeDescribe("Pods", func() {
},
},
}
_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(svc)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc)
Expect(err).NotTo(HaveOccurred(), "failed to create service")
// Make a client pod that verifies that it has the service environment variables.
@ -491,7 +491,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("submitting the pod to kubernetes")
pod = podClient.CreateSync(pod)
req := f.ClientSet.Core().RESTClient().Get().
req := f.ClientSet.CoreV1().RESTClient().Get().
Namespace(f.Namespace.Name).
Resource("pods").
Name(pod.Name).
@ -561,7 +561,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("submitting the pod to kubernetes")
podClient.CreateSync(pod)
req := f.ClientSet.Core().RESTClient().Get().
req := f.ClientSet.CoreV1().RESTClient().Get().
Namespace(f.Namespace.Name).
Resource("pods").
Name(pod.Name).

View File

@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("Projected", func() {
secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.Core().Secrets(namespace2.Name).Create(secret2); err != nil {
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
@ -125,7 +125,7 @@ var _ = framework.KubeDescribe("Projected", func() {
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@ -250,12 +250,12 @@ var _ = framework.KubeDescribe("Projected", func() {
By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
@ -377,18 +377,18 @@ var _ = framework.KubeDescribe("Projected", func() {
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Update(updateSecret)
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
@ -496,7 +496,7 @@ var _ = framework.KubeDescribe("Projected", func() {
By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@ -552,7 +552,7 @@ var _ = framework.KubeDescribe("Projected", func() {
By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(configMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
By("waiting to observe update in volume")
@ -612,12 +612,12 @@ var _ = framework.KubeDescribe("Projected", func() {
By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
@ -739,18 +739,18 @@ var _ = framework.KubeDescribe("Projected", func() {
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
@ -778,7 +778,7 @@ var _ = framework.KubeDescribe("Projected", func() {
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@ -1114,11 +1114,11 @@ var _ = framework.KubeDescribe("Projected", func() {
}
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@ -1155,7 +1155,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@ -1236,7 +1236,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@ -1318,7 +1318,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@ -1403,7 +1403,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup in
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}

View File

@ -41,7 +41,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@ -89,7 +89,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
secret := newEnvFromSecret(f.Namespace.Name, name)
By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}

View File

@ -98,7 +98,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.Core().Secrets(namespace2.Name).Create(secret2); err != nil {
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
@ -123,7 +123,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@ -233,12 +233,12 @@ var _ = Describe("[sig-storage] Secrets", func() {
By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
@ -336,18 +336,18 @@ var _ = Describe("[sig-storage] Secrets", func() {
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Update(updateSecret)
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
@ -383,7 +383,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@ -455,7 +455,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}

View File

@ -171,7 +171,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
})
By("Creating a pod with one valid and two invalid sysctls")
client := f.ClientSet.Core().Pods(f.Namespace.Name)
client := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := client.Create(pod)
Expect(err).NotTo(BeNil())

View File

@ -86,14 +86,14 @@ func svcByName(name string, port int) *v1.Service {
func NewSVCByName(c clientset.Interface, ns, name string) error {
const testPort = 9376
_, err := c.Core().Services(ns).Create(svcByName(name, testPort))
_, err := c.CoreV1().Services(ns).Create(svcByName(name, testPort))
return err
}
// NewRCByName creates a replication controller with a selector by name of name.
func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*v1.ReplicationController, error) {
By(fmt.Sprintf("creating replication controller %s", name))
return c.Core().ReplicationControllers(ns).Create(framework.RcByNamePort(
return c.CoreV1().ReplicationControllers(ns).Create(framework.RcByNamePort(
name, replicas, framework.ServeHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
}
@ -101,7 +101,7 @@ func RestartNodes(c clientset.Interface, nodeNames []string) error {
// List old boot IDs.
oldBootIDs := make(map[string]string)
for _, name := range nodeNames {
node, err := c.Core().Nodes().Get(name, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error getting node info before reboot: %s", err)
}
@ -123,7 +123,7 @@ func RestartNodes(c clientset.Interface, nodeNames []string) error {
// Wait for their boot IDs to change.
for _, name := range nodeNames {
if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
node, err := c.Core().Nodes().Get(name, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error getting node info after reboot: %s", err)
}

Some files were not shown because too many files have changed in this diff Show More