Merge pull request #87952 from mikedanese/opts
add *Options to Create, Update, and Patch in generated clientsets
This commit is contained in:
commit
abe6321296
@ -347,7 +347,7 @@ func createDNSService(dnsService *v1.Service, serviceBytes []byte, client client
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Can't use a generic apiclient helper func here as we have to tolerate more than AlreadyExists.
|
// Can't use a generic apiclient helper func here as we have to tolerate more than AlreadyExists.
|
||||||
if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(context.TODO(), dnsService); err != nil {
|
if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(context.TODO(), dnsService, metav1.CreateOptions{}); err != nil {
|
||||||
// Ignore if the Service is invalid with this error message:
|
// Ignore if the Service is invalid with this error message:
|
||||||
// Service "kube-dns" is invalid: spec.clusterIP: Invalid value: "10.96.0.10": provided IP is already allocated
|
// Service "kube-dns" is invalid: spec.clusterIP: Invalid value: "10.96.0.10": provided IP is already allocated
|
||||||
|
|
||||||
@ -355,7 +355,7 @@ func createDNSService(dnsService *v1.Service, serviceBytes []byte, client client
|
|||||||
return errors.Wrap(err, "unable to create a new DNS service")
|
return errors.Wrap(err, "unable to create a new DNS service")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Update(context.TODO(), dnsService); err != nil {
|
if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Update(context.TODO(), dnsService, metav1.UpdateOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to create/update the DNS service")
|
return errors.Wrap(err, "unable to create/update the DNS service")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -383,7 +383,7 @@ func migrateCoreDNSCorefile(client clientset.Interface, cm *v1.ConfigMap, corefi
|
|||||||
"Corefile": corefile,
|
"Corefile": corefile,
|
||||||
"Corefile-backup": corefile,
|
"Corefile-backup": corefile,
|
||||||
},
|
},
|
||||||
}); err != nil {
|
}, metav1.UpdateOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to update the CoreDNS ConfigMap with backup Corefile")
|
return errors.Wrap(err, "unable to update the CoreDNS ConfigMap with backup Corefile")
|
||||||
}
|
}
|
||||||
if err := patchCoreDNSDeployment(client, "Corefile-backup"); err != nil {
|
if err := patchCoreDNSDeployment(client, "Corefile-backup"); err != nil {
|
||||||
@ -405,7 +405,7 @@ func migrateCoreDNSCorefile(client clientset.Interface, cm *v1.ConfigMap, corefi
|
|||||||
"Corefile": updatedCorefile,
|
"Corefile": updatedCorefile,
|
||||||
"Corefile-backup": corefile,
|
"Corefile-backup": corefile,
|
||||||
},
|
},
|
||||||
}); err != nil {
|
}, metav1.UpdateOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to update the CoreDNS ConfigMap")
|
return errors.Wrap(err, "unable to update the CoreDNS ConfigMap")
|
||||||
}
|
}
|
||||||
fmt.Println("[addons]: Migrating CoreDNS Corefile")
|
fmt.Println("[addons]: Migrating CoreDNS Corefile")
|
||||||
@ -452,7 +452,7 @@ func patchCoreDNSDeployment(client clientset.Interface, coreDNSCorefileName stri
|
|||||||
}
|
}
|
||||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"volumes":[{"name": "config-volume", "configMap":{"name": "coredns", "items":[{"key": "%s", "path": "%s"}]}}]}}}}`, coreDNSCorefileName, coreDNSCorefileName)
|
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"volumes":[{"name": "config-volume", "configMap":{"name": "coredns", "items":[{"key": "%s", "path": "%s"}]}}]}}}}`, coreDNSCorefileName, coreDNSCorefileName)
|
||||||
|
|
||||||
if _, err := client.AppsV1().Deployments(dnsDeployment.ObjectMeta.Namespace).Patch(context.TODO(), dnsDeployment.Name, types.StrategicMergePatchType, []byte(patch)); err != nil {
|
if _, err := client.AppsV1().Deployments(dnsDeployment.ObjectMeta.Namespace).Patch(context.TODO(), dnsDeployment.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to patch the CoreDNS deployment")
|
return errors.Wrap(err, "unable to patch the CoreDNS deployment")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -741,7 +741,7 @@ func createClientAndCoreDNSManifest(t *testing.T, corefile, coreDNSVersion strin
|
|||||||
Data: map[string]string{
|
Data: map[string]string{
|
||||||
"Corefile": corefile,
|
"Corefile": corefile,
|
||||||
},
|
},
|
||||||
})
|
}, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating ConfigMap: %v", err)
|
t.Fatalf("error creating ConfigMap: %v", err)
|
||||||
}
|
}
|
||||||
@ -768,7 +768,7 @@ func createClientAndCoreDNSManifest(t *testing.T, corefile, coreDNSVersion strin
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
}, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating deployment: %v", err)
|
t.Fatalf("error creating deployment: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -91,7 +91,7 @@ func (r *APIRenewer) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Sign
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := r.client.CertificateSigningRequests().Create(context.TODO(), k8sCSR)
|
req, err := r.client.CertificateSigningRequests().Create(context.TODO(), k8sCSR, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "couldn't create certificate signing request")
|
return nil, nil, errors.Wrap(err, "couldn't create certificate signing request")
|
||||||
}
|
}
|
||||||
|
@ -157,7 +157,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
|
|||||||
// Create the Job, but retry in case it is being currently deleted
|
// Create the Job, but retry in case it is being currently deleted
|
||||||
klog.V(2).Infof("Creating Job %q in the namespace %q", jobName, ns)
|
klog.V(2).Infof("Creating Job %q in the namespace %q", jobName, ns)
|
||||||
err := wait.PollImmediate(time.Second*1, timeout, func() (bool, error) {
|
err := wait.PollImmediate(time.Second*1, timeout, func() (bool, error) {
|
||||||
if _, err := client.BatchV1().Jobs(ns).Create(context.TODO(), job); err != nil {
|
if _, err := client.BatchV1().Jobs(ns).Create(context.TODO(), job, metav1.CreateOptions{}); err != nil {
|
||||||
klog.V(2).Infof("Could not create Job %q in the namespace %q, retrying: %v", jobName, ns, err)
|
klog.V(2).Infof("Could not create Job %q in the namespace %q, retrying: %v", jobName, ns, err)
|
||||||
lastError = err
|
lastError = err
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -44,12 +44,12 @@ type ConfigMapMutator func(*v1.ConfigMap) error
|
|||||||
|
|
||||||
// CreateOrUpdateConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateConfigMap(client clientset.Interface, cm *v1.ConfigMap) error {
|
func CreateOrUpdateConfigMap(client clientset.Interface, cm *v1.ConfigMap) error {
|
||||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil {
|
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create ConfigMap")
|
return errors.Wrap(err, "unable to create ConfigMap")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), cm); err != nil {
|
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to update ConfigMap")
|
return errors.Wrap(err, "unable to update ConfigMap")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -68,7 +68,7 @@ func CreateOrMutateConfigMap(client clientset.Interface, cm *v1.ConfigMap, mutat
|
|||||||
Factor: 1.0,
|
Factor: 1.0,
|
||||||
Jitter: 0.1,
|
Jitter: 0.1,
|
||||||
}, func() (bool, error) {
|
}, func() (bool, error) {
|
||||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil {
|
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}); err != nil {
|
||||||
lastError = err
|
lastError = err
|
||||||
if apierrors.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
lastError = MutateConfigMap(client, metav1.ObjectMeta{Namespace: cm.ObjectMeta.Namespace, Name: cm.ObjectMeta.Name}, mutator)
|
lastError = MutateConfigMap(client, metav1.ObjectMeta{Namespace: cm.ObjectMeta.Namespace, Name: cm.ObjectMeta.Name}, mutator)
|
||||||
@ -102,7 +102,7 @@ func MutateConfigMap(client clientset.Interface, meta metav1.ObjectMeta, mutator
|
|||||||
if err = mutator(configMap); err != nil {
|
if err = mutator(configMap); err != nil {
|
||||||
return errors.Wrap(err, "unable to mutate ConfigMap")
|
return errors.Wrap(err, "unable to mutate ConfigMap")
|
||||||
}
|
}
|
||||||
_, err = client.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Update(context.TODO(), configMap)
|
_, err = client.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -113,7 +113,7 @@ func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, confi
|
|||||||
if !apierrors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil {
|
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create ConfigMap")
|
return errors.Wrap(err, "unable to create ConfigMap")
|
||||||
}
|
}
|
||||||
@ -124,12 +124,12 @@ func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, confi
|
|||||||
|
|
||||||
// CreateOrUpdateSecret creates a Secret if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateSecret creates a Secret if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error {
|
func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error {
|
||||||
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(context.TODO(), secret); err != nil {
|
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create secret")
|
return errors.Wrap(err, "unable to create secret")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(context.TODO(), secret); err != nil {
|
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to update secret")
|
return errors.Wrap(err, "unable to update secret")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -138,7 +138,7 @@ func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error {
|
|||||||
|
|
||||||
// CreateOrUpdateServiceAccount creates a ServiceAccount if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateServiceAccount creates a ServiceAccount if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAccount) error {
|
func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAccount) error {
|
||||||
if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(context.TODO(), sa); err != nil {
|
if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(context.TODO(), sa, metav1.CreateOptions{}); err != nil {
|
||||||
// Note: We don't run .Update here afterwards as that's probably not required
|
// Note: We don't run .Update here afterwards as that's probably not required
|
||||||
// Only thing that could be updated is annotations/labels in .metadata, but we don't use that currently
|
// Only thing that could be updated is annotations/labels in .metadata, but we don't use that currently
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
@ -150,12 +150,12 @@ func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAcco
|
|||||||
|
|
||||||
// CreateOrUpdateDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deployment) error {
|
func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deployment) error {
|
||||||
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy); err != nil {
|
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy, metav1.CreateOptions{}); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create deployment")
|
return errors.Wrap(err, "unable to create deployment")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Update(context.TODO(), deploy); err != nil {
|
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to update deployment")
|
return errors.Wrap(err, "unable to update deployment")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -168,7 +168,7 @@ func CreateOrRetainDeployment(client clientset.Interface, deploy *apps.Deploymen
|
|||||||
if !apierrors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy); err != nil {
|
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy, metav1.CreateOptions{}); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create deployment")
|
return errors.Wrap(err, "unable to create deployment")
|
||||||
}
|
}
|
||||||
@ -179,12 +179,12 @@ func CreateOrRetainDeployment(client clientset.Interface, deploy *apps.Deploymen
|
|||||||
|
|
||||||
// CreateOrUpdateDaemonSet creates a DaemonSet if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateDaemonSet creates a DaemonSet if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) error {
|
func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) error {
|
||||||
if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Create(context.TODO(), ds); err != nil {
|
if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Create(context.TODO(), ds, metav1.CreateOptions{}); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create daemonset")
|
return errors.Wrap(err, "unable to create daemonset")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Update(context.TODO(), ds); err != nil {
|
if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Update(context.TODO(), ds, metav1.UpdateOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to update daemonset")
|
return errors.Wrap(err, "unable to update daemonset")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -211,12 +211,12 @@ func DeleteDeploymentForeground(client clientset.Interface, namespace, name stri
|
|||||||
|
|
||||||
// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
|
func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
|
||||||
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(context.TODO(), role); err != nil {
|
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(context.TODO(), role, metav1.CreateOptions{}); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create RBAC role")
|
return errors.Wrap(err, "unable to create RBAC role")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(context.TODO(), role); err != nil {
|
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(context.TODO(), role, metav1.UpdateOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to update RBAC role")
|
return errors.Wrap(err, "unable to update RBAC role")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -225,12 +225,12 @@ func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
|
|||||||
|
|
||||||
// CreateOrUpdateRoleBinding creates a RoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateRoleBinding creates a RoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.RoleBinding) error {
|
func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.RoleBinding) error {
|
||||||
if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(context.TODO(), roleBinding); err != nil {
|
if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(context.TODO(), roleBinding, metav1.CreateOptions{}); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create RBAC rolebinding")
|
return errors.Wrap(err, "unable to create RBAC rolebinding")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(context.TODO(), roleBinding); err != nil {
|
if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(context.TODO(), roleBinding, metav1.UpdateOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to update RBAC rolebinding")
|
return errors.Wrap(err, "unable to update RBAC rolebinding")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -239,12 +239,12 @@ func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.Rol
|
|||||||
|
|
||||||
// CreateOrUpdateClusterRole creates a ClusterRole if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateClusterRole creates a ClusterRole if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.ClusterRole) error {
|
func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.ClusterRole) error {
|
||||||
if _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole); err != nil {
|
if _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{}); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create RBAC clusterrole")
|
return errors.Wrap(err, "unable to create RBAC clusterrole")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole); err != nil {
|
if _, err := client.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to update RBAC clusterrole")
|
return errors.Wrap(err, "unable to update RBAC clusterrole")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -253,12 +253,12 @@ func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.Clu
|
|||||||
|
|
||||||
// CreateOrUpdateClusterRoleBinding creates a ClusterRoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateClusterRoleBinding creates a ClusterRoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error {
|
func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error {
|
||||||
if _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding); err != nil {
|
if _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, metav1.CreateOptions{}); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create RBAC clusterrolebinding")
|
return errors.Wrap(err, "unable to create RBAC clusterrolebinding")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterRoleBinding); err != nil {
|
if _, err := client.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterRoleBinding, metav1.UpdateOptions{}); err != nil {
|
||||||
return errors.Wrap(err, "unable to update RBAC clusterrolebinding")
|
return errors.Wrap(err, "unable to update RBAC clusterrolebinding")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -302,7 +302,7 @@ func PatchNodeOnce(client clientset.Interface, nodeName string, patchFn func(*v1
|
|||||||
return false, errors.Wrap(err, "failed to create two way merge patch")
|
return false, errors.Wrap(err, "failed to create two way merge patch")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.CoreV1().Nodes().Patch(context.TODO(), n.Name, types.StrategicMergePatchType, patchBytes); err != nil {
|
if _, err := client.CoreV1().Nodes().Patch(context.TODO(), n.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
|
||||||
// TODO also check for timeouts
|
// TODO also check for timeouts
|
||||||
if apierrors.IsConflict(err) {
|
if apierrors.IsConflict(err) {
|
||||||
fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)")
|
fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)")
|
||||||
|
@ -69,7 +69,7 @@ func TestPatchNodeNonErrorCases(t *testing.T) {
|
|||||||
for _, tc := range testcases {
|
for _, tc := range testcases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
_, err := client.CoreV1().Nodes().Create(context.TODO(), &tc.node)
|
_, err := client.CoreV1().Nodes().Create(context.TODO(), &tc.node, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create node to fake client: %v", err)
|
t.Fatalf("failed to create node to fake client: %v", err)
|
||||||
}
|
}
|
||||||
@ -122,7 +122,7 @@ func createClientAndConfigMap(t *testing.T) *fake.Clientset {
|
|||||||
Data: map[string]string{
|
Data: map[string]string{
|
||||||
"key": "some-value",
|
"key": "some-value",
|
||||||
},
|
},
|
||||||
})
|
}, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating ConfigMap: %v", err)
|
t.Fatalf("error creating ConfigMap: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -343,7 +343,7 @@ func TestGetNodeRegistration(t *testing.T) {
|
|||||||
client := clientsetfake.NewSimpleClientset()
|
client := clientsetfake.NewSimpleClientset()
|
||||||
|
|
||||||
if rt.node != nil {
|
if rt.node != nil {
|
||||||
_, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node)
|
_, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("couldn't create Node")
|
t.Errorf("couldn't create Node")
|
||||||
return
|
return
|
||||||
@ -619,7 +619,7 @@ func TestGetInitConfigurationFromCluster(t *testing.T) {
|
|||||||
client := clientsetfake.NewSimpleClientset()
|
client := clientsetfake.NewSimpleClientset()
|
||||||
|
|
||||||
if rt.node != nil {
|
if rt.node != nil {
|
||||||
_, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node)
|
_, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("couldn't create Node")
|
t.Errorf("couldn't create Node")
|
||||||
return
|
return
|
||||||
|
@ -103,7 +103,7 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) {
|
|||||||
testPod("nsA", "pod-1"),
|
testPod("nsA", "pod-1"),
|
||||||
)
|
)
|
||||||
|
|
||||||
_, err := tc.CoreV1().Namespaces().Create(context.TODO(), testNamespace("nsB"))
|
_, err := tc.CoreV1().Namespaces().Create(context.TODO(), testNamespace("nsB"), metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Namespaces.Create: %s", err)
|
t.Fatalf("Namespaces.Create: %s", err)
|
||||||
}
|
}
|
||||||
@ -116,7 +116,7 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) {
|
|||||||
t.Fatalf("Expected %d namespaces to match, got %d", expected, actual)
|
t.Fatalf("Expected %d namespaces to match, got %d", expected, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tc.CoreV1().Pods("nsB").Create(context.TODO(), testPod("", "pod-1"))
|
_, err = tc.CoreV1().Pods("nsB").Create(context.TODO(), testPod("", "pod-1"), metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Pods.Create nsB/pod-1: %s", err)
|
t.Fatalf("Pods.Create nsB/pod-1: %s", err)
|
||||||
}
|
}
|
||||||
@ -132,17 +132,17 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) {
|
|||||||
t.Fatalf("Expected to find pod nsB/pod-1t, got %s/%s", podB1.Namespace, podB1.Name)
|
t.Fatalf("Expected to find pod nsB/pod-1t, got %s/%s", podB1.Namespace, podB1.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("", "pod-1"))
|
_, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("", "pod-1"), metav1.CreateOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected Pods.Create to fail with already exists error")
|
t.Fatalf("Expected Pods.Create to fail with already exists error")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-1"))
|
_, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-1"), metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Pods.Update nsA/pod-1: %s", err)
|
t.Fatalf("Pods.Update nsA/pod-1: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("nsB", "pod-2"))
|
_, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("nsB", "pod-2"), metav1.CreateOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected Pods.Create to fail with bad request from namespace mismtach")
|
t.Fatalf("Expected Pods.Create to fail with bad request from namespace mismtach")
|
||||||
}
|
}
|
||||||
@ -150,7 +150,7 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) {
|
|||||||
t.Fatalf("Expected Pods.Create error to provide object and request namespaces, got %q", err)
|
t.Fatalf("Expected Pods.Create error to provide object and request namespaces, got %q", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-3"))
|
_, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-3"), metav1.UpdateOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected Pods.Update nsA/pod-3 to fail with not found error")
|
t.Fatalf("Expected Pods.Update nsA/pod-3 to fail with not found error")
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -243,7 +244,7 @@ func (e *Signer) signConfigMap() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *Signer) updateConfigMap(cm *v1.ConfigMap) {
|
func (e *Signer) updateConfigMap(cm *v1.ConfigMap) {
|
||||||
_, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(context.TODO(), cm)
|
_, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{})
|
||||||
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
|
||||||
klog.V(3).Infof("Error updating ConfigMap: %v", err)
|
klog.V(3).Infof("Error updating ConfigMap: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,7 @@ go_library(
|
|||||||
"//pkg/controller/certificates:go_default_library",
|
"//pkg/controller/certificates:go_default_library",
|
||||||
"//staging/src/k8s.io/api/authorization/v1:go_default_library",
|
"//staging/src/k8s.io/api/authorization/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
|
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
|
|
||||||
authorization "k8s.io/api/authorization/v1"
|
authorization "k8s.io/api/authorization/v1"
|
||||||
capi "k8s.io/api/certificates/v1beta1"
|
capi "k8s.io/api/certificates/v1beta1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
|
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
capihelper "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
|
capihelper "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
|
||||||
@ -130,7 +131,7 @@ func (a *sarApprover) authorize(csr *capi.CertificateSigningRequest, rattrs auth
|
|||||||
ResourceAttributes: &rattrs,
|
ResourceAttributes: &rattrs,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
sar, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar)
|
sar, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -186,7 +186,7 @@ func (c *Publisher) syncNamespace(ns string) error {
|
|||||||
Data: map[string]string{
|
Data: map[string]string{
|
||||||
"ca.crt": string(c.rootCA),
|
"ca.crt": string(c.rootCA),
|
||||||
},
|
},
|
||||||
})
|
}, metav1.CreateOptions{})
|
||||||
return err
|
return err
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return err
|
return err
|
||||||
@ -202,7 +202,7 @@ func (c *Publisher) syncNamespace(ns string) error {
|
|||||||
|
|
||||||
cm.Data = data
|
cm.Data = data
|
||||||
|
|
||||||
_, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm)
|
_, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,6 +36,7 @@ go_library(
|
|||||||
"//pkg/controller/certificates:go_default_library",
|
"//pkg/controller/certificates:go_default_library",
|
||||||
"//pkg/controller/certificates/authority:go_default_library",
|
"//pkg/controller/certificates/authority:go_default_library",
|
||||||
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
|
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/server/dynamiccertificates:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
capi "k8s.io/api/certificates/v1beta1"
|
capi "k8s.io/api/certificates/v1beta1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apiserver/pkg/server/dynamiccertificates"
|
"k8s.io/apiserver/pkg/server/dynamiccertificates"
|
||||||
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
|
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -95,7 +96,7 @@ func (s *signer) handle(csr *capi.CertificateSigningRequest) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error auto signing csr: %v", err)
|
return fmt.Errorf("error auto signing csr: %v", err)
|
||||||
}
|
}
|
||||||
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(context.TODO(), csr)
|
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(context.TODO(), csr, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error updating signature for csr: %v", err)
|
return fmt.Errorf("error updating signature for csr: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -186,7 +186,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount,
|
|||||||
|
|
||||||
// Try token review first
|
// Try token review first
|
||||||
tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}}
|
tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}}
|
||||||
if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(context.TODO(), tokenReview); err == nil {
|
if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(context.TODO(), tokenReview, metav1.CreateOptions{}); err == nil {
|
||||||
if !tokenResult.Status.Authenticated {
|
if !tokenResult.Status.Authenticated {
|
||||||
klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Namespace, sa.Name)
|
klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Namespace, sa.Name)
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
|
|
||||||
v1authenticationapi "k8s.io/api/authentication/v1"
|
v1authenticationapi "k8s.io/api/authentication/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
|
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||||
@ -179,7 +180,7 @@ func (ts *tokenSourceImpl) Token() (*oauth2.Token, error) {
|
|||||||
Spec: v1authenticationapi.TokenRequestSpec{
|
Spec: v1authenticationapi.TokenRequestSpec{
|
||||||
ExpirationSeconds: utilpointer.Int64Ptr(ts.expirationSeconds),
|
ExpirationSeconds: utilpointer.Int64Ptr(ts.expirationSeconds),
|
||||||
},
|
},
|
||||||
})
|
}, metav1.CreateOptions{})
|
||||||
if inErr != nil {
|
if inErr != nil {
|
||||||
klog.Warningf("get token failed: %v", inErr)
|
klog.Warningf("get token failed: %v", inErr)
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -385,7 +385,7 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod
|
|||||||
modify(curNode)
|
modify(curNode)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = cnc.kubeClient.CoreV1().Nodes().Update(context.TODO(), curNode)
|
_, err = cnc.kubeClient.CoreV1().Nodes().Update(context.TODO(), curNode, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ func (c *ClusterRoleAggregationController) syncClusterRole(key string) error {
|
|||||||
for _, rule := range newPolicyRules {
|
for _, rule := range newPolicyRules {
|
||||||
clusterRole.Rules = append(clusterRole.Rules, *rule.DeepCopy())
|
clusterRole.Rules = append(clusterRole.Rules, *rule.DeepCopy())
|
||||||
}
|
}
|
||||||
_, err = c.clusterRoleClient.ClusterRoles().Update(context.TODO(), clusterRole)
|
_, err = c.clusterRoleClient.ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{})
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -420,7 +420,7 @@ type RealRSControl struct {
|
|||||||
var _ RSControlInterface = &RealRSControl{}
|
var _ RSControlInterface = &RealRSControl{}
|
||||||
|
|
||||||
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
|
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
|
||||||
_, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data)
|
_, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -440,7 +440,7 @@ type RealControllerRevisionControl struct {
|
|||||||
var _ ControllerRevisionControlInterface = &RealControllerRevisionControl{}
|
var _ ControllerRevisionControlInterface = &RealControllerRevisionControl{}
|
||||||
|
|
||||||
func (r RealControllerRevisionControl) PatchControllerRevision(namespace, name string, data []byte) error {
|
func (r RealControllerRevisionControl) PatchControllerRevision(namespace, name string, data []byte) error {
|
||||||
_, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data)
|
_, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -537,7 +537,7 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
|
func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
|
||||||
_, err := r.KubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data)
|
_, err := r.KubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -577,7 +577,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT
|
|||||||
if len(labels.Set(pod.Labels)) == 0 {
|
if len(labels.Set(pod.Labels)) == 0 {
|
||||||
return fmt.Errorf("unable to create pods, no labels")
|
return fmt.Errorf("unable to create pods, no labels")
|
||||||
}
|
}
|
||||||
newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod)
|
newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// only send an event if the namespace isn't terminating
|
// only send an event if the namespace isn't terminating
|
||||||
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||||
@ -1119,7 +1119,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
|
|||||||
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
|
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes)
|
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1178,7 +1178,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create a two-way merge patch: %v", err)
|
return fmt.Errorf("failed to create a two-way merge patch: %v", err)
|
||||||
}
|
}
|
||||||
if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes); err != nil {
|
if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
|
||||||
return fmt.Errorf("failed to patch the node: %v", err)
|
return fmt.Errorf("failed to patch the node: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -1197,13 +1197,13 @@ func getOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, nam
|
|||||||
// Create the namespace if we can't verify it exists.
|
// Create the namespace if we can't verify it exists.
|
||||||
// Tolerate errors, since we don't know whether this component has namespace creation permissions.
|
// Tolerate errors, since we don't know whether this component has namespace creation permissions.
|
||||||
if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) {
|
if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) {
|
||||||
if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}); err != nil && !apierrors.IsAlreadyExists(err) {
|
if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
klog.Warningf("create non-exist namespace %s failed:%v", namespace, err)
|
klog.Warningf("create non-exist namespace %s failed:%v", namespace, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the service account
|
// Create the service account
|
||||||
sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}})
|
sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}, metav1.CreateOptions{})
|
||||||
if apierrors.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
// If we're racing to init and someone else already created it, re-fetch
|
// If we're racing to init and someone else already created it, re-fetch
|
||||||
return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
@ -45,7 +45,7 @@ type realSJControl struct {
|
|||||||
var _ sjControlInterface = &realSJControl{}
|
var _ sjControlInterface = &realSJControl{}
|
||||||
|
|
||||||
func (c *realSJControl) UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
|
func (c *realSJControl) UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
|
||||||
return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(context.TODO(), sj)
|
return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(context.TODO(), sj, metav1.UpdateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// fakeSJControl is the default implementation of sjControlInterface.
|
// fakeSJControl is the default implementation of sjControlInterface.
|
||||||
@ -107,15 +107,15 @@ func (r realJobControl) GetJob(namespace, name string) (*batchv1.Job, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r realJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
func (r realJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||||
return r.KubeClient.BatchV1().Jobs(namespace).Update(context.TODO(), job)
|
return r.KubeClient.BatchV1().Jobs(namespace).Update(context.TODO(), job, metav1.UpdateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r realJobControl) PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error) {
|
func (r realJobControl) PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error) {
|
||||||
return r.KubeClient.BatchV1().Jobs(namespace).Patch(context.TODO(), name, pt, data, subresources...)
|
return r.KubeClient.BatchV1().Jobs(namespace).Patch(context.TODO(), name, pt, data, metav1.PatchOptions{}, subresources...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||||
return r.KubeClient.BatchV1().Jobs(namespace).Create(context.TODO(), job)
|
return r.KubeClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r realJobControl) DeleteJob(namespace string, name string) error {
|
func (r realJobControl) DeleteJob(namespace string, name string) error {
|
||||||
|
@ -1032,7 +1032,7 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
|
|||||||
toUpdate.Status.NumberAvailable = int32(numberAvailable)
|
toUpdate.Status.NumberAvailable = int32(numberAvailable)
|
||||||
toUpdate.Status.NumberUnavailable = int32(numberUnavailable)
|
toUpdate.Status.NumberUnavailable = int32(numberUnavailable)
|
||||||
|
|
||||||
if _, updateErr = dsClient.UpdateStatus(context.TODO(), toUpdate); updateErr == nil {
|
if _, updateErr = dsClient.UpdateStatus(context.TODO(), toUpdate, metav1.UpdateOptions{}); updateErr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
|
|||||||
if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
|
if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
|
||||||
toUpdate := history.DeepCopy()
|
toUpdate := history.DeepCopy()
|
||||||
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
|
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
|
||||||
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate)
|
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -130,7 +130,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
|
|||||||
if cur.Revision < currRevision {
|
if cur.Revision < currRevision {
|
||||||
toUpdate := cur.DeepCopy()
|
toUpdate := cur.DeepCopy()
|
||||||
toUpdate.Revision = currRevision
|
toUpdate.Revision = currRevision
|
||||||
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate)
|
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -220,7 +220,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor
|
|||||||
toUpdate.Labels = make(map[string]string)
|
toUpdate.Labels = make(map[string]string)
|
||||||
}
|
}
|
||||||
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||||
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(context.TODO(), toUpdate)
|
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -323,7 +323,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
|
|||||||
Revision: revision,
|
Revision: revision,
|
||||||
}
|
}
|
||||||
|
|
||||||
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), history)
|
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), history, metav1.CreateOptions{})
|
||||||
if outerErr := err; errors.IsAlreadyExists(outerErr) {
|
if outerErr := err; errors.IsAlreadyExists(outerErr) {
|
||||||
// TODO: Is it okay to get from historyLister?
|
// TODO: Is it okay to get from historyLister?
|
||||||
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
@ -353,7 +353,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
|
|||||||
currDS.Status.CollisionCount = new(int32)
|
currDS.Status.CollisionCount = new(int32)
|
||||||
}
|
}
|
||||||
*currDS.Status.CollisionCount++
|
*currDS.Status.CollisionCount++
|
||||||
_, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(context.TODO(), currDS)
|
_, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(context.TODO(), currDS, metav1.UpdateOptions{})
|
||||||
if updateErr != nil {
|
if updateErr != nil {
|
||||||
return nil, updateErr
|
return nil, updateErr
|
||||||
}
|
}
|
||||||
|
@ -589,7 +589,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
|||||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
|
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
|
||||||
if d.Status.ObservedGeneration < d.Generation {
|
if d.Status.ObservedGeneration < d.Generation {
|
||||||
d.Status.ObservedGeneration = d.Generation
|
d.Status.ObservedGeneration = d.Generation
|
||||||
dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
|
dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
|
||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
@ -113,7 +114,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, new
|
|||||||
|
|
||||||
newDeployment := d
|
newDeployment := d
|
||||||
newDeployment.Status = newStatus
|
newDeployment.Status = newStatus
|
||||||
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment)
|
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
|
||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
@ -114,7 +115,7 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, mess
|
|||||||
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
|
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
|
||||||
klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
|
klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
|
||||||
setRollbackTo(d, nil)
|
setRollbackTo(d, nil)
|
||||||
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(context.TODO(), d)
|
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(context.TODO(), d, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
|
d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,7 +155,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
|
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
|
||||||
if annotationsUpdated || minReadySecondsNeedsUpdate {
|
if annotationsUpdated || minReadySecondsNeedsUpdate {
|
||||||
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
|
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
|
||||||
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy)
|
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should use the revision in existingNewRS's annotation, since it set by before
|
// Should use the revision in existingNewRS's annotation, since it set by before
|
||||||
@ -173,7 +173,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
|
|
||||||
if needsUpdate {
|
if needsUpdate {
|
||||||
var err error
|
var err error
|
||||||
if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d); err != nil {
|
if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -220,7 +220,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
// hash collisions. If there is any other error, we need to report it in the status of
|
// hash collisions. If there is any other error, we need to report it in the status of
|
||||||
// the Deployment.
|
// the Deployment.
|
||||||
alreadyExists := false
|
alreadyExists := false
|
||||||
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS)
|
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS, metav1.CreateOptions{})
|
||||||
switch {
|
switch {
|
||||||
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
|
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
|
||||||
case errors.IsAlreadyExists(err):
|
case errors.IsAlreadyExists(err):
|
||||||
@ -252,7 +252,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
*d.Status.CollisionCount++
|
*d.Status.CollisionCount++
|
||||||
// Update the collisionCount for the Deployment and let it requeue by returning the original
|
// Update the collisionCount for the Deployment and let it requeue by returning the original
|
||||||
// error.
|
// error.
|
||||||
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
|
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
|
||||||
if dErr == nil {
|
if dErr == nil {
|
||||||
klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||||
}
|
}
|
||||||
@ -268,7 +268,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
// We don't really care about this error at this point, since we have a bigger issue to report.
|
// We don't really care about this error at this point, since we have a bigger issue to report.
|
||||||
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
|
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
|
||||||
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
|
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
|
||||||
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
|
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
|
||||||
}
|
}
|
||||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
|
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -285,7 +285,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
needsUpdate = true
|
needsUpdate = true
|
||||||
}
|
}
|
||||||
if needsUpdate {
|
if needsUpdate {
|
||||||
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
|
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
|
||||||
}
|
}
|
||||||
return createdRS, err
|
return createdRS, err
|
||||||
}
|
}
|
||||||
@ -420,7 +420,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale in
|
|||||||
rsCopy := rs.DeepCopy()
|
rsCopy := rs.DeepCopy()
|
||||||
*(rsCopy.Spec.Replicas) = newScale
|
*(rsCopy.Spec.Replicas) = newScale
|
||||||
deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
|
deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
|
||||||
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy)
|
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{})
|
||||||
if err == nil && sizeNeedsUpdate {
|
if err == nil && sizeNeedsUpdate {
|
||||||
scaled = true
|
scaled = true
|
||||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
|
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
|
||||||
@ -478,7 +478,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet,
|
|||||||
|
|
||||||
newDeployment := d
|
newDeployment := d
|
||||||
newDeployment.Status = newStatus
|
newDeployment.Status = newStatus
|
||||||
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment)
|
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -792,6 +792,6 @@ func (dc *DisruptionController) updatePdbStatus(pdb *policy.PodDisruptionBudget,
|
|||||||
func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error {
|
func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error {
|
||||||
// If this update fails, don't retry it. Allow the failure to get handled &
|
// If this update fails, don't retry it. Allow the failure to get handled &
|
||||||
// retried in `processNextWorkItem()`.
|
// retried in `processNextWorkItem()`.
|
||||||
_, err := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(context.TODO(), pdb)
|
_, err := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(context.TODO(), pdb, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1054,14 +1054,14 @@ func TestUpdatePDBStatusRetries(t *testing.T) {
|
|||||||
|
|
||||||
// Create a PDB and 3 pods that match it.
|
// Create a PDB and 3 pods that match it.
|
||||||
pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
|
pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
|
||||||
pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb)
|
pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create PDB: %v", err)
|
t.Fatalf("Failed to create PDB: %v", err)
|
||||||
}
|
}
|
||||||
podNames := []string{"moe", "larry", "curly"}
|
podNames := []string{"moe", "larry", "curly"}
|
||||||
for _, name := range podNames {
|
for _, name := range podNames {
|
||||||
pod, _ := newPod(t, name)
|
pod, _ := newPod(t, name)
|
||||||
_, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod)
|
_, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create pod: %v", err)
|
t.Fatalf("Failed to create pod: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -513,10 +513,10 @@ func (e *EndpointController) syncService(key string) error {
|
|||||||
klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
|
klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
|
||||||
if createEndpoints {
|
if createEndpoints {
|
||||||
// No previous endpoints, create them
|
// No previous endpoints, create them
|
||||||
_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(context.TODO(), newEndpoints)
|
_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(context.TODO(), newEndpoints, metav1.CreateOptions{})
|
||||||
} else {
|
} else {
|
||||||
// Pre-existing
|
// Pre-existing
|
||||||
_, err = e.client.CoreV1().Endpoints(service.Namespace).Update(context.TODO(), newEndpoints)
|
_, err = e.client.CoreV1().Endpoints(service.Namespace).Update(context.TODO(), newEndpoints, metav1.UpdateOptions{})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if createEndpoints && errors.IsForbidden(err) {
|
if createEndpoints && errors.IsForbidden(err) {
|
||||||
|
@ -250,7 +250,7 @@ func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error adding EndpointSlice: %v", err)
|
t.Fatalf("Expected no error adding EndpointSlice: %v", err)
|
||||||
}
|
}
|
||||||
_, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(context.TODO(), endpointSlice)
|
_, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error creating EndpointSlice: %v", err)
|
t.Fatalf("Expected no error creating EndpointSlice: %v", err)
|
||||||
}
|
}
|
||||||
@ -306,7 +306,7 @@ func TestSyncServiceFull(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
esController.serviceStore.Add(service)
|
esController.serviceStore.Add(service)
|
||||||
_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service)
|
_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||||
assert.Nil(t, err, "Expected no error creating service")
|
assert.Nil(t, err, "Expected no error creating service")
|
||||||
|
|
||||||
// run through full sync service loop
|
// run through full sync service loop
|
||||||
@ -369,7 +369,7 @@ func createService(t *testing.T, esController *endpointSliceController, namespac
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
esController.serviceStore.Add(service)
|
esController.serviceStore.Add(service)
|
||||||
_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service)
|
_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||||
assert.Nil(t, err, "Expected no error creating service")
|
assert.Nil(t, err, "Expected no error creating service")
|
||||||
return service
|
return service
|
||||||
}
|
}
|
||||||
|
@ -206,7 +206,7 @@ func (r *reconciler) finalize(
|
|||||||
|
|
||||||
for _, endpointSlice := range slicesToCreate {
|
for _, endpointSlice := range slicesToCreate {
|
||||||
addTriggerTimeAnnotation(endpointSlice, triggerTime)
|
addTriggerTimeAnnotation(endpointSlice, triggerTime)
|
||||||
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice)
|
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
|
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
|
||||||
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
|
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
|
||||||
@ -221,7 +221,7 @@ func (r *reconciler) finalize(
|
|||||||
|
|
||||||
for _, endpointSlice := range slicesToUpdate {
|
for _, endpointSlice := range slicesToUpdate {
|
||||||
addTriggerTimeAnnotation(endpointSlice, triggerTime)
|
addTriggerTimeAnnotation(endpointSlice, triggerTime)
|
||||||
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice)
|
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, fmt.Errorf("Error updating %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err))
|
errs = append(errs, fmt.Errorf("Error updating %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err))
|
||||||
} else {
|
} else {
|
||||||
|
@ -204,7 +204,7 @@ func TestReconcile1EndpointSlice(t *testing.T) {
|
|||||||
svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
|
svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
|
||||||
endpointSlice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc)
|
endpointSlice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc)
|
||||||
|
|
||||||
_, createErr := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice1)
|
_, createErr := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice1, metav1.CreateOptions{})
|
||||||
assert.Nil(t, createErr, "Expected no error creating endpoint slice")
|
assert.Nil(t, createErr, "Expected no error creating endpoint slice")
|
||||||
|
|
||||||
numActionsBefore := len(client.Actions())
|
numActionsBefore := len(client.Actions())
|
||||||
@ -828,7 +828,7 @@ func portsAndAddressTypeEqual(slice1, slice2 discovery.EndpointSlice) bool {
|
|||||||
func createEndpointSlices(t *testing.T, client *fake.Clientset, namespace string, endpointSlices []*discovery.EndpointSlice) {
|
func createEndpointSlices(t *testing.T, client *fake.Clientset, namespace string, endpointSlices []*discovery.EndpointSlice) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
for _, endpointSlice := range endpointSlices {
|
for _, endpointSlice := range endpointSlices {
|
||||||
_, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice)
|
_, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error creating Endpoint Slice, got: %v", err)
|
t.Fatalf("Expected no error creating Endpoint Slice, got: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -249,7 +249,7 @@ func (rh *realHistory) CreateControllerRevision(parent metav1.Object, revision *
|
|||||||
// Update the revisions name
|
// Update the revisions name
|
||||||
clone.Name = ControllerRevisionName(parent.GetName(), hash)
|
clone.Name = ControllerRevisionName(parent.GetName(), hash)
|
||||||
ns := parent.GetNamespace()
|
ns := parent.GetNamespace()
|
||||||
created, err := rh.client.AppsV1().ControllerRevisions(ns).Create(context.TODO(), clone)
|
created, err := rh.client.AppsV1().ControllerRevisions(ns).Create(context.TODO(), clone, metav1.CreateOptions{})
|
||||||
if errors.IsAlreadyExists(err) {
|
if errors.IsAlreadyExists(err) {
|
||||||
exists, err := rh.client.AppsV1().ControllerRevisions(ns).Get(context.TODO(), clone.Name, metav1.GetOptions{})
|
exists, err := rh.client.AppsV1().ControllerRevisions(ns).Get(context.TODO(), clone.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -272,7 +272,7 @@ func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevisio
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
clone.Revision = newRevision
|
clone.Revision = newRevision
|
||||||
updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(context.TODO(), clone)
|
updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(context.TODO(), clone, metav1.UpdateOptions{})
|
||||||
if updateErr == nil {
|
if updateErr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -328,14 +328,14 @@ func (rh *realHistory) AdoptControllerRevision(parent metav1.Object, parentKind
|
|||||||
}
|
}
|
||||||
// Use strategic merge patch to add an owner reference indicating a controller ref
|
// Use strategic merge patch to add an owner reference indicating a controller ref
|
||||||
return rh.client.AppsV1().ControllerRevisions(parent.GetNamespace()).Patch(context.TODO(), revision.GetName(),
|
return rh.client.AppsV1().ControllerRevisions(parent.GetNamespace()).Patch(context.TODO(), revision.GetName(),
|
||||||
types.StrategicMergePatchType, patchBytes)
|
types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rh *realHistory) ReleaseControllerRevision(parent metav1.Object, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) {
|
func (rh *realHistory) ReleaseControllerRevision(parent metav1.Object, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) {
|
||||||
// Use strategic merge patch to add an owner reference indicating a controller ref
|
// Use strategic merge patch to add an owner reference indicating a controller ref
|
||||||
released, err := rh.client.AppsV1().ControllerRevisions(revision.GetNamespace()).Patch(context.TODO(), revision.GetName(),
|
released, err := rh.client.AppsV1().ControllerRevisions(revision.GetNamespace()).Patch(context.TODO(), revision.GetName(),
|
||||||
types.StrategicMergePatchType,
|
types.StrategicMergePatchType,
|
||||||
[]byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, parent.GetUID(), revision.UID)))
|
[]byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, parent.GetUID(), revision.UID)), metav1.PatchOptions{})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
|
@ -261,7 +261,7 @@ func TestRealHistory_CreateControllerRevision(t *testing.T) {
|
|||||||
|
|
||||||
var collisionCount int32
|
var collisionCount int32
|
||||||
for _, item := range test.existing {
|
for _, item := range test.existing {
|
||||||
_, err := client.AppsV1().ControllerRevisions(item.parent.GetNamespace()).Create(context.TODO(), item.revision)
|
_, err := client.AppsV1().ControllerRevisions(item.parent.GetNamespace()).Create(context.TODO(), item.revision, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -832,7 +832,7 @@ func (jm *JobController) updateJobStatus(job *batch.Job) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
newJob.Status = job.Status
|
newJob.Status = job.Status
|
||||||
if _, err = jobClient.UpdateStatus(context.TODO(), newJob); err == nil {
|
if _, err = jobClient.UpdateStatus(context.TODO(), newJob, metav1.UpdateOptions{}); err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -269,7 +269,7 @@ func (d *namespacedResourcesDeleter) updateNamespaceStatusFunc(namespace *v1.Nam
|
|||||||
newNamespace.ObjectMeta = namespace.ObjectMeta
|
newNamespace.ObjectMeta = namespace.ObjectMeta
|
||||||
newNamespace.Status = *namespace.Status.DeepCopy()
|
newNamespace.Status = *namespace.Status.DeepCopy()
|
||||||
newNamespace.Status.Phase = v1.NamespaceTerminating
|
newNamespace.Status.Phase = v1.NamespaceTerminating
|
||||||
return d.nsClient.UpdateStatus(context.TODO(), &newNamespace)
|
return d.nsClient.UpdateStatus(context.TODO(), &newNamespace, metav1.UpdateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// finalized returns true if the namespace.Spec.Finalizers is an empty list
|
// finalized returns true if the namespace.Spec.Finalizers is an empty list
|
||||||
@ -551,7 +551,7 @@ func (d *namespacedResourcesDeleter) deleteAllContent(ns *v1.Namespace) (int64,
|
|||||||
// we need to reflect that information. Recall that additional finalizers can be set on namespaces, so this finalizer may clear itself and
|
// we need to reflect that information. Recall that additional finalizers can be set on namespaces, so this finalizer may clear itself and
|
||||||
// NOT remove the resource instance.
|
// NOT remove the resource instance.
|
||||||
if hasChanged := conditionUpdater.Update(ns); hasChanged {
|
if hasChanged := conditionUpdater.Update(ns); hasChanged {
|
||||||
if _, err = d.nsClient.UpdateStatus(context.TODO(), ns); err != nil {
|
if _, err = d.nsClient.UpdateStatus(context.TODO(), ns, metav1.UpdateOptions{}); err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("couldn't update status condition for namespace %q: %v", namespace, err))
|
utilruntime.HandleError(fmt.Errorf("couldn't update status condition for namespace %q: %v", namespace, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRang
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = a.k8s.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, bytes)
|
_, err = a.k8s.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, bytes, metav1.PatchOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1149,7 +1149,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
|||||||
_, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
_, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||||
|
|
||||||
if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) {
|
if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) {
|
||||||
if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(context.TODO(), node); err != nil {
|
if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(context.TODO(), node, metav1.UpdateOptions{}); err != nil {
|
||||||
klog.Errorf("Error updating node %s: %v", node.Name, err)
|
klog.Errorf("Error updating node %s: %v", node.Name, err)
|
||||||
return gracePeriod, observedReadyCondition, currentReadyCondition, err
|
return gracePeriod, observedReadyCondition, currentReadyCondition, err
|
||||||
}
|
}
|
||||||
|
@ -2759,7 +2759,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
|
|
||||||
// Make node3 healthy again.
|
// Make node3 healthy again.
|
||||||
node2.Status = healthyNodeNewStatus
|
node2.Status = healthyNodeNewStatus
|
||||||
_, err = fakeNodeHandler.UpdateStatus(context.TODO(), node2)
|
_, err = fakeNodeHandler.UpdateStatus(context.TODO(), node2, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(err.Error())
|
t.Errorf(err.Error())
|
||||||
return
|
return
|
||||||
@ -2905,12 +2905,12 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
|||||||
|
|
||||||
node0.Status = newNodeStatus
|
node0.Status = newNodeStatus
|
||||||
node1.Status = healthyNodeNewStatus
|
node1.Status = healthyNodeNewStatus
|
||||||
_, err = fakeNodeHandler.UpdateStatus(context.TODO(), node0)
|
_, err = fakeNodeHandler.UpdateStatus(context.TODO(), node0, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(err.Error())
|
t.Errorf(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, err = fakeNodeHandler.UpdateStatus(context.TODO(), node1)
|
_, err = fakeNodeHandler.UpdateStatus(context.TODO(), node1, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(err.Error())
|
t.Errorf(err.Error())
|
||||||
return
|
return
|
||||||
@ -3120,7 +3120,7 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
fakeNodeHandler.Update(context.TODO(), test.Node)
|
fakeNodeHandler.Update(context.TODO(), test.Node, metav1.UpdateOptions{})
|
||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -3331,7 +3331,7 @@ func TestReconcileNodeLabels(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
fakeNodeHandler.Update(context.TODO(), test.Node)
|
fakeNodeHandler.Update(context.TODO(), test.Node, metav1.UpdateOptions{})
|
||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -3355,7 +3355,6 @@ func TestReconcileNodeLabels(t *testing.T) {
|
|||||||
if actualValue != expectedValue {
|
if actualValue != expectedValue {
|
||||||
t.Errorf("%s: label %q: expected value %q, got value %q", test.Name, key, expectedValue, actualValue)
|
t.Errorf("%s: label %q: expected value %q, got value %q", test.Name, key, expectedValue, actualValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1113,7 +1113,7 @@ func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAuto
|
|||||||
}
|
}
|
||||||
hpav1 := hpaRaw.(*autoscalingv1.HorizontalPodAutoscaler)
|
hpav1 := hpaRaw.(*autoscalingv1.HorizontalPodAutoscaler)
|
||||||
|
|
||||||
_, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(context.TODO(), hpav1)
|
_, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(context.TODO(), hpav1, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
||||||
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
|
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
|
||||||
|
@ -346,7 +346,7 @@ func TestGCOrphaned(t *testing.T) {
|
|||||||
|
|
||||||
// Execute planned nodes changes
|
// Execute planned nodes changes
|
||||||
for _, node := range test.addedClientNodes {
|
for _, node := range test.addedClientNodes {
|
||||||
client.CoreV1().Nodes().Create(context.TODO(), node)
|
client.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
|
||||||
}
|
}
|
||||||
for _, node := range test.deletedClientNodes {
|
for _, node := range test.deletedClientNodes {
|
||||||
client.CoreV1().Nodes().Delete(context.TODO(), node.Name, &metav1.DeleteOptions{})
|
client.CoreV1().Nodes().Delete(context.TODO(), node.Name, &metav1.DeleteOptions{})
|
||||||
|
@ -1159,7 +1159,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
oldRS := newReplicaSet(1, map[string]string{"foo": "bar"})
|
oldRS := newReplicaSet(1, map[string]string{"foo": "bar"})
|
||||||
oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(context.TODO(), oldRS)
|
oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(context.TODO(), oldRS, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -1240,7 +1240,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
|
|
||||||
newRS := oldRS.DeepCopy()
|
newRS := oldRS.DeepCopy()
|
||||||
newRS.UID = uuid.NewUUID()
|
newRS.UID = uuid.NewUUID()
|
||||||
newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(context.TODO(), newRS)
|
newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(context.TODO(), newRS, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSe
|
|||||||
fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, newStatus.ObservedGeneration))
|
fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, newStatus.ObservedGeneration))
|
||||||
|
|
||||||
rs.Status = newStatus
|
rs.Status = newStatus
|
||||||
updatedRS, updateErr = c.UpdateStatus(context.TODO(), rs)
|
updatedRS, updateErr = c.UpdateStatus(context.TODO(), rs, metav1.UpdateOptions{})
|
||||||
if updateErr == nil {
|
if updateErr == nil {
|
||||||
return updatedRS, nil
|
return updatedRS, nil
|
||||||
}
|
}
|
||||||
|
@ -203,16 +203,22 @@ type conversionClient struct {
|
|||||||
v1client.ReplicationControllerInterface
|
v1client.ReplicationControllerInterface
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c conversionClient) Create(ctx context.Context, rs *apps.ReplicaSet) (*apps.ReplicaSet, error) {
|
func (c conversionClient) Create(ctx context.Context, rs *apps.ReplicaSet, opts metav1.CreateOptions) (*apps.ReplicaSet, error) {
|
||||||
return convertCall(ctx, c.ReplicationControllerInterface.Create, rs)
|
return convertCall(func(rc *v1.ReplicationController) (*v1.ReplicationController, error) {
|
||||||
|
return c.ReplicationControllerInterface.Create(ctx, rc, opts)
|
||||||
|
}, rs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c conversionClient) Update(ctx context.Context, rs *apps.ReplicaSet) (*apps.ReplicaSet, error) {
|
func (c conversionClient) Update(ctx context.Context, rs *apps.ReplicaSet, opts metav1.UpdateOptions) (*apps.ReplicaSet, error) {
|
||||||
return convertCall(ctx, c.ReplicationControllerInterface.Update, rs)
|
return convertCall(func(rc *v1.ReplicationController) (*v1.ReplicationController, error) {
|
||||||
|
return c.ReplicationControllerInterface.Update(ctx, rc, opts)
|
||||||
|
}, rs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c conversionClient) UpdateStatus(ctx context.Context, rs *apps.ReplicaSet) (*apps.ReplicaSet, error) {
|
func (c conversionClient) UpdateStatus(ctx context.Context, rs *apps.ReplicaSet, opts metav1.UpdateOptions) (*apps.ReplicaSet, error) {
|
||||||
return convertCall(ctx, c.ReplicationControllerInterface.UpdateStatus, rs)
|
return convertCall(func(rc *v1.ReplicationController) (*v1.ReplicationController, error) {
|
||||||
|
return c.ReplicationControllerInterface.UpdateStatus(ctx, rc, opts)
|
||||||
|
}, rs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c conversionClient) Get(ctx context.Context, name string, options metav1.GetOptions) (*apps.ReplicaSet, error) {
|
func (c conversionClient) Get(ctx context.Context, name string, options metav1.GetOptions) (*apps.ReplicaSet, error) {
|
||||||
@ -236,7 +242,7 @@ func (c conversionClient) Watch(ctx context.Context, opts metav1.ListOptions) (w
|
|||||||
return nil, errors.New("Watch() is not implemented for conversionClient")
|
return nil, errors.New("Watch() is not implemented for conversionClient")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c conversionClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.ReplicaSet, err error) {
|
func (c conversionClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *apps.ReplicaSet, err error) {
|
||||||
// This is not used by RSC.
|
// This is not used by RSC.
|
||||||
return nil, errors.New("Patch() is not implemented for conversionClient")
|
return nil, errors.New("Patch() is not implemented for conversionClient")
|
||||||
}
|
}
|
||||||
@ -246,7 +252,7 @@ func (c conversionClient) GetScale(ctx context.Context, name string, options met
|
|||||||
return nil, errors.New("GetScale() is not implemented for conversionClient")
|
return nil, errors.New("GetScale() is not implemented for conversionClient")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c conversionClient) UpdateScale(ctx context.Context, name string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) {
|
func (c conversionClient) UpdateScale(ctx context.Context, name string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
|
||||||
// This is not used by RSC.
|
// This is not used by RSC.
|
||||||
return nil, errors.New("UpdateScale() is not implemented for conversionClient")
|
return nil, errors.New("UpdateScale() is not implemented for conversionClient")
|
||||||
}
|
}
|
||||||
@ -275,12 +281,12 @@ func convertList(rcList *v1.ReplicationControllerList) (*apps.ReplicaSetList, er
|
|||||||
return rsList, nil
|
return rsList, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertCall(ctx context.Context, fn func(context.Context, *v1.ReplicationController) (*v1.ReplicationController, error), rs *apps.ReplicaSet) (*apps.ReplicaSet, error) {
|
func convertCall(fn func(*v1.ReplicationController) (*v1.ReplicationController, error), rs *apps.ReplicaSet) (*apps.ReplicaSet, error) {
|
||||||
rc, err := convertRStoRC(rs)
|
rc, err := convertRStoRC(rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
result, err := fn(ctx, rc)
|
result, err := fn(rc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -356,7 +356,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ
|
|||||||
|
|
||||||
// there was a change observed by this controller that requires we update quota
|
// there was a change observed by this controller that requires we update quota
|
||||||
if dirty {
|
if dirty {
|
||||||
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(context.TODO(), usage)
|
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(context.TODO(), usage, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors = append(errors, err)
|
errors = append(errors, err)
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||||
|
@ -310,7 +310,7 @@ func TestSyncLoadBalancerIfNeeded(t *testing.T) {
|
|||||||
controller, cloud, client := newController()
|
controller, cloud, client := newController()
|
||||||
cloud.Exists = tc.lbExists
|
cloud.Exists = tc.lbExists
|
||||||
key := fmt.Sprintf("%s/%s", tc.service.Namespace, tc.service.Name)
|
key := fmt.Sprintf("%s/%s", tc.service.Namespace, tc.service.Name)
|
||||||
if _, err := client.CoreV1().Services(tc.service.Namespace).Create(context.TODO(), tc.service); err != nil {
|
if _, err := client.CoreV1().Services(tc.service.Namespace).Create(context.TODO(), tc.service, metav1.CreateOptions{}); err != nil {
|
||||||
t.Fatalf("Failed to prepare service %s for testing: %v", key, err)
|
t.Fatalf("Failed to prepare service %s for testing: %v", key, err)
|
||||||
}
|
}
|
||||||
client.ClearActions()
|
client.ClearActions()
|
||||||
@ -603,7 +603,7 @@ func TestProcessServiceCreateOrUpdate(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
newSvc := tc.updateFn(tc.svc)
|
newSvc := tc.updateFn(tc.svc)
|
||||||
if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
|
if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
|
||||||
t.Fatalf("Failed to prepare service %s for testing: %v", tc.key, err)
|
t.Fatalf("Failed to prepare service %s for testing: %v", tc.key, err)
|
||||||
}
|
}
|
||||||
obtErr := controller.processServiceCreateOrUpdate(newSvc, tc.key)
|
obtErr := controller.processServiceCreateOrUpdate(newSvc, tc.key)
|
||||||
@ -1222,7 +1222,7 @@ func TestAddFinalizer(t *testing.T) {
|
|||||||
s := &Controller{
|
s := &Controller{
|
||||||
kubeClient: c,
|
kubeClient: c,
|
||||||
}
|
}
|
||||||
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
|
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
|
||||||
t.Fatalf("Failed to prepare service for testing: %v", err)
|
t.Fatalf("Failed to prepare service for testing: %v", err)
|
||||||
}
|
}
|
||||||
if err := s.addFinalizer(tc.svc); err != nil {
|
if err := s.addFinalizer(tc.svc); err != nil {
|
||||||
@ -1276,7 +1276,7 @@ func TestRemoveFinalizer(t *testing.T) {
|
|||||||
s := &Controller{
|
s := &Controller{
|
||||||
kubeClient: c,
|
kubeClient: c,
|
||||||
}
|
}
|
||||||
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
|
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
|
||||||
t.Fatalf("Failed to prepare service for testing: %v", err)
|
t.Fatalf("Failed to prepare service for testing: %v", err)
|
||||||
}
|
}
|
||||||
if err := s.removeFinalizer(tc.svc); err != nil {
|
if err := s.removeFinalizer(tc.svc); err != nil {
|
||||||
@ -1376,7 +1376,7 @@ func TestPatchStatus(t *testing.T) {
|
|||||||
s := &Controller{
|
s := &Controller{
|
||||||
kubeClient: c,
|
kubeClient: c,
|
||||||
}
|
}
|
||||||
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
|
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
|
||||||
t.Fatalf("Failed to prepare service for testing: %v", err)
|
t.Fatalf("Failed to prepare service for testing: %v", err)
|
||||||
}
|
}
|
||||||
if err := s.patchStatus(tc.svc, &tc.svc.Status.LoadBalancer, tc.newStatus); err != nil {
|
if err := s.patchStatus(tc.svc, &tc.svc.Status.LoadBalancer, tc.newStatus); err != nil {
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
@ -38,7 +39,7 @@ func patch(c v1core.CoreV1Interface, oldSvc *v1.Service, newSvc *v1.Service) (*v
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status")
|
return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPatchBytes(oldSvc *v1.Service, newSvc *v1.Service) ([]byte, error) {
|
func getPatchBytes(oldSvc *v1.Service, newSvc *v1.Service) ([]byte, error) {
|
||||||
|
@ -45,7 +45,7 @@ func TestPatch(t *testing.T) {
|
|||||||
// Issue a separate update and verify patch doesn't fail after this.
|
// Issue a separate update and verify patch doesn't fail after this.
|
||||||
svcToUpdate := svcOrigin.DeepCopy()
|
svcToUpdate := svcOrigin.DeepCopy()
|
||||||
addAnnotations(svcToUpdate)
|
addAnnotations(svcToUpdate)
|
||||||
if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(context.TODO(), svcToUpdate); err != nil {
|
if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(context.TODO(), svcToUpdate, metav1.UpdateOptions{}); err != nil {
|
||||||
t.Fatalf("Failed to update service: %v", err)
|
t.Fatalf("Failed to update service: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,7 +213,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
|
|||||||
// TODO eliminate this once the fake client can handle creation without NS
|
// TODO eliminate this once the fake client can handle creation without NS
|
||||||
sa.Namespace = ns.Name
|
sa.Namespace = ns.Name
|
||||||
|
|
||||||
if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(context.TODO(), &sa); err != nil && !apierrors.IsAlreadyExists(err) {
|
if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(context.TODO(), &sa, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
// we can safely ignore terminating namespace errors
|
// we can safely ignore terminating namespace errors
|
||||||
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||||
createFailures = append(createFailures, err)
|
createFailures = append(createFailures, err)
|
||||||
|
@ -407,7 +407,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Save the secret
|
// Save the secret
|
||||||
createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(context.TODO(), secret)
|
createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// if the namespace is being terminated, create will fail no matter what
|
// if the namespace is being terminated, create will fail no matter what
|
||||||
if apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
if apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||||
@ -449,7 +449,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
|
|||||||
|
|
||||||
// Try to add a reference to the token
|
// Try to add a reference to the token
|
||||||
liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, v1.ObjectReference{Name: secret.Name})
|
liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, v1.ObjectReference{Name: secret.Name})
|
||||||
if _, err := serviceAccounts.Update(context.TODO(), liveServiceAccount); err != nil {
|
if _, err := serviceAccounts.Update(context.TODO(), liveServiceAccount, metav1.UpdateOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -567,7 +567,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou
|
|||||||
liveSecret.Annotations[v1.ServiceAccountUIDKey] = string(serviceAccount.UID)
|
liveSecret.Annotations[v1.ServiceAccountUIDKey] = string(serviceAccount.UID)
|
||||||
|
|
||||||
// Save the secret
|
// Save the secret
|
||||||
_, err = secrets.Update(context.TODO(), liveSecret)
|
_, err = secrets.Update(context.TODO(), liveSecret, metav1.UpdateOptions{})
|
||||||
if apierrors.IsConflict(err) || apierrors.IsNotFound(err) {
|
if apierrors.IsConflict(err) || apierrors.IsNotFound(err) {
|
||||||
// if we got a Conflict error, the secret was updated by someone else, and we'll get an update notification later
|
// if we got a Conflict error, the secret was updated by someone else, and we'll get an update notification later
|
||||||
// if we got a NotFound error, the secret no longer exists, and we don't need to populate a token
|
// if we got a NotFound error, the secret no longer exists, and we don't need to populate a token
|
||||||
@ -611,7 +611,7 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
serviceAccount.Secrets = secrets
|
serviceAccount.Secrets = secrets
|
||||||
_, err = serviceAccounts.Update(context.TODO(), serviceAccount)
|
_, err = serviceAccounts.Update(context.TODO(), serviceAccount, metav1.UpdateOptions{})
|
||||||
// Ignore NotFound errors when attempting to remove a reference
|
// Ignore NotFound errors when attempting to remove a reference
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
errorutils "k8s.io/apimachinery/pkg/util/errors"
|
errorutils "k8s.io/apimachinery/pkg/util/errors"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -78,7 +79,7 @@ func (spc *realStatefulPodControl) CreateStatefulPod(set *apps.StatefulSet, pod
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// If we created the PVCs attempt to create the Pod
|
// If we created the PVCs attempt to create the Pod
|
||||||
_, err := spc.client.CoreV1().Pods(set.Namespace).Create(context.TODO(), pod)
|
_, err := spc.client.CoreV1().Pods(set.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
// sink already exists errors
|
// sink already exists errors
|
||||||
if apierrors.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
return err
|
return err
|
||||||
@ -114,7 +115,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod
|
|||||||
|
|
||||||
attemptedUpdate = true
|
attemptedUpdate = true
|
||||||
// commit the update, retrying on conflicts
|
// commit the update, retrying on conflicts
|
||||||
_, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(context.TODO(), pod)
|
_, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{})
|
||||||
if updateErr == nil {
|
if updateErr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -183,7 +184,7 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.Statef
|
|||||||
_, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
|
_, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
|
||||||
switch {
|
switch {
|
||||||
case apierrors.IsNotFound(err):
|
case apierrors.IsNotFound(err):
|
||||||
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), &claim)
|
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), &claim, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err))
|
errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err))
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||||
@ -54,7 +55,7 @@ func (ssu *realStatefulSetStatusUpdater) UpdateStatefulSetStatus(
|
|||||||
// don't wait due to limited number of clients, but backoff after the default number of steps
|
// don't wait due to limited number of clients, but backoff after the default number of steps
|
||||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||||
set.Status = *status
|
set.Status = *status
|
||||||
_, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(context.TODO(), set)
|
_, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(context.TODO(), set, metav1.UpdateOptions{})
|
||||||
if updateErr == nil {
|
if updateErr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -111,7 +111,7 @@ func (m *FakeLegacyHandler) Nodes() v1core.NodeInterface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create adds a new Node to the fake store.
|
// Create adds a new Node to the fake store.
|
||||||
func (m *FakeNodeHandler) Create(_ context.Context, node *v1.Node) (*v1.Node, error) {
|
func (m *FakeNodeHandler) Create(_ context.Context, node *v1.Node, _ metav1.CreateOptions) (*v1.Node, error) {
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
defer func() {
|
defer func() {
|
||||||
m.RequestCount++
|
m.RequestCount++
|
||||||
@ -202,7 +202,7 @@ func (m *FakeNodeHandler) DeleteCollection(_ context.Context, opt *metav1.Delete
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update updates a Node in the fake store.
|
// Update updates a Node in the fake store.
|
||||||
func (m *FakeNodeHandler) Update(_ context.Context, node *v1.Node) (*v1.Node, error) {
|
func (m *FakeNodeHandler) Update(_ context.Context, node *v1.Node, _ metav1.UpdateOptions) (*v1.Node, error) {
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
defer func() {
|
defer func() {
|
||||||
m.RequestCount++
|
m.RequestCount++
|
||||||
@ -221,7 +221,7 @@ func (m *FakeNodeHandler) Update(_ context.Context, node *v1.Node) (*v1.Node, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateStatus updates a status of a Node in the fake store.
|
// UpdateStatus updates a status of a Node in the fake store.
|
||||||
func (m *FakeNodeHandler) UpdateStatus(_ context.Context, node *v1.Node) (*v1.Node, error) {
|
func (m *FakeNodeHandler) UpdateStatus(_ context.Context, node *v1.Node, _ metav1.UpdateOptions) (*v1.Node, error) {
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
defer func() {
|
defer func() {
|
||||||
m.RequestCount++
|
m.RequestCount++
|
||||||
@ -266,7 +266,7 @@ func (m *FakeNodeHandler) UpdateStatus(_ context.Context, node *v1.Node) (*v1.No
|
|||||||
// PatchStatus patches a status of a Node in the fake store.
|
// PatchStatus patches a status of a Node in the fake store.
|
||||||
func (m *FakeNodeHandler) PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error) {
|
func (m *FakeNodeHandler) PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error) {
|
||||||
m.RequestCount++
|
m.RequestCount++
|
||||||
return m.Patch(ctx, nodeName, types.StrategicMergePatchType, data, "status")
|
return m.Patch(ctx, nodeName, types.StrategicMergePatchType, data, metav1.PatchOptions{}, "status")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch watches Nodes in a fake store.
|
// Watch watches Nodes in a fake store.
|
||||||
@ -275,7 +275,7 @@ func (m *FakeNodeHandler) Watch(_ context.Context, opts metav1.ListOptions) (wat
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Patch patches a Node in the fake store.
|
// Patch patches a Node in the fake store.
|
||||||
func (m *FakeNodeHandler) Patch(_ context.Context, name string, pt types.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
|
func (m *FakeNodeHandler) Patch(_ context.Context, name string, pt types.PatchType, data []byte, _ metav1.PatchOptions, subresources ...string) (*v1.Node, error) {
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
defer func() {
|
defer func() {
|
||||||
m.RequestCount++
|
m.RequestCount++
|
||||||
|
@ -14,6 +14,7 @@ go_library(
|
|||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||||
|
@ -36,6 +36,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/json"
|
"k8s.io/apimachinery/pkg/util/json"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
@ -264,7 +265,7 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes)
|
_, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err)
|
klog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err)
|
||||||
return err
|
return err
|
||||||
|
@ -110,7 +110,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
|
|||||||
|
|
||||||
var updatedPod *v1.Pod
|
var updatedPod *v1.Pod
|
||||||
var err error
|
var err error
|
||||||
if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod); err != nil {
|
if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return updatedPod, nil
|
return updatedPod, nil
|
||||||
@ -137,7 +137,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
|
klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
|
||||||
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod)
|
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
// NotFound error means that pod was already deleted.
|
// NotFound error means that pod was already deleted.
|
||||||
|
@ -270,7 +270,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
|
|
||||||
for _, newPod := range extraPods1 {
|
for _, newPod := range extraPods1 {
|
||||||
// Add a new pod between ASW and DSW ppoulators
|
// Add a new pod between ASW and DSW ppoulators
|
||||||
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod)
|
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
||||||
}
|
}
|
||||||
@ -287,7 +287,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
|
|
||||||
for _, newPod := range extraPods2 {
|
for _, newPod := range extraPods2 {
|
||||||
// Add a new pod between DSW ppoulator and reconciler run
|
// Add a new pod between DSW ppoulator and reconciler run
|
||||||
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod)
|
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
||||||
}
|
}
|
||||||
|
@ -203,7 +203,7 @@ func (resizeMap *volumeResizeMap) UpdatePVSize(pvcr *PVCWithResizeRequest, newSi
|
|||||||
return fmt.Errorf("Error Creating two way merge patch for PV %q with error : %v", pvClone.Name, err)
|
return fmt.Errorf("Error Creating two way merge patch for PV %q with error : %v", pvClone.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, commontypes.StrategicMergePatchType, patchBytes)
|
_, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, commontypes.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||||
|
|
||||||
if updateErr != nil {
|
if updateErr != nil {
|
||||||
klog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr)
|
klog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr)
|
||||||
|
@ -754,7 +754,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo
|
|||||||
return claim, nil
|
return claim, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(context.TODO(), claimClone)
|
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(context.TODO(), claimClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err)
|
klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err)
|
||||||
return newClaim, err
|
return newClaim, err
|
||||||
@ -810,7 +810,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV
|
|||||||
volumeClone.Status.Phase = phase
|
volumeClone.Status.Phase = phase
|
||||||
volumeClone.Status.Message = message
|
volumeClone.Status.Message = message
|
||||||
|
|
||||||
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), volumeClone)
|
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), volumeClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err)
|
klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err)
|
||||||
return newVol, err
|
return newVol, err
|
||||||
@ -872,7 +872,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV
|
|||||||
func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, updateCache bool) (*v1.PersistentVolume, error) {
|
func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, updateCache bool) (*v1.PersistentVolume, error) {
|
||||||
claimKey := claimrefToClaimKey(volumeClone.Spec.ClaimRef)
|
claimKey := claimrefToClaimKey(volumeClone.Spec.ClaimRef)
|
||||||
klog.V(2).Infof("claim %q bound to volume %q", claimKey, volumeClone.Name)
|
klog.V(2).Infof("claim %q bound to volume %q", claimKey, volumeClone.Name)
|
||||||
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone)
|
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimKey, err)
|
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimKey, err)
|
||||||
return newVol, err
|
return newVol, err
|
||||||
@ -924,7 +924,7 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo
|
|||||||
|
|
||||||
if dirty {
|
if dirty {
|
||||||
klog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim))
|
klog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim))
|
||||||
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone)
|
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err)
|
klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err)
|
||||||
return newClaim, err
|
return newClaim, err
|
||||||
@ -1011,7 +1011,7 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume
|
|||||||
volumeClone.Spec.ClaimRef.UID = ""
|
volumeClone.Spec.ClaimRef.UID = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone)
|
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err)
|
klog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err)
|
||||||
return err
|
return err
|
||||||
@ -1515,7 +1515,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
|
|||||||
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
|
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
|
||||||
klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name)
|
klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name)
|
||||||
var newVol *v1.PersistentVolume
|
var newVol *v1.PersistentVolume
|
||||||
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), volume); err == nil || apierrors.IsAlreadyExists(err) {
|
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), volume, metav1.CreateOptions{}); err == nil || apierrors.IsAlreadyExists(err) {
|
||||||
// Save succeeded.
|
// Save succeeded.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim))
|
klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim))
|
||||||
@ -1631,7 +1631,7 @@ func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.Persist
|
|||||||
newClaim := claim.DeepCopy()
|
newClaim := claim.DeepCopy()
|
||||||
delete(newClaim.Annotations, pvutil.AnnSelectedNode)
|
delete(newClaim.Annotations, pvutil.AnnSelectedNode)
|
||||||
// Try to update the PVC object
|
// Try to update the PVC object
|
||||||
if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(context.TODO(), newClaim); err != nil {
|
if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(context.TODO(), newClaim, metav1.UpdateOptions{}); err != nil {
|
||||||
klog.V(4).Infof("Failed to delete annotation 'pvutil.AnnSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err)
|
klog.V(4).Infof("Failed to delete annotation 'pvutil.AnnSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -322,7 +322,7 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(claim *v
|
|||||||
if !modified {
|
if !modified {
|
||||||
return claimClone, nil
|
return claimClone, nil
|
||||||
}
|
}
|
||||||
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone)
|
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
|
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
|
||||||
}
|
}
|
||||||
@ -339,7 +339,7 @@ func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotations(volume
|
|||||||
if !modified {
|
if !modified {
|
||||||
return volumeClone, nil
|
return volumeClone, nil
|
||||||
}
|
}
|
||||||
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone)
|
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
|
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
|
||||||
}
|
}
|
||||||
@ -546,7 +546,7 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent
|
|||||||
claimClone := claim.DeepCopy()
|
claimClone := claim.DeepCopy()
|
||||||
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, pvutil.AnnStorageProvisioner, provisionerName)
|
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, pvutil.AnnStorageProvisioner, provisionerName)
|
||||||
updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, pvutil.AnnStorageProvisioner)
|
updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, pvutil.AnnStorageProvisioner)
|
||||||
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone)
|
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return newClaim, err
|
return newClaim, err
|
||||||
}
|
}
|
||||||
|
@ -189,7 +189,7 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
|||||||
}
|
}
|
||||||
claimClone := pvc.DeepCopy()
|
claimClone := pvc.DeepCopy()
|
||||||
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
|
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
|
||||||
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone)
|
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
klog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
||||||
return err
|
return err
|
||||||
@ -201,7 +201,7 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
|||||||
func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
||||||
claimClone := pvc.DeepCopy()
|
claimClone := pvc.DeepCopy()
|
||||||
claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)
|
claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)
|
||||||
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone)
|
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
klog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
||||||
return err
|
return err
|
||||||
|
@ -11,6 +11,7 @@ go_library(
|
|||||||
"//pkg/volume/util:go_default_library",
|
"//pkg/volume/util:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
@ -162,7 +163,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
|
|||||||
}
|
}
|
||||||
pvClone := pv.DeepCopy()
|
pvClone := pv.DeepCopy()
|
||||||
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
|
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
|
||||||
_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone)
|
_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err)
|
klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err)
|
||||||
return err
|
return err
|
||||||
@ -174,7 +175,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
|
|||||||
func (c *Controller) removeFinalizer(pv *v1.PersistentVolume) error {
|
func (c *Controller) removeFinalizer(pv *v1.PersistentVolume) error {
|
||||||
pvClone := pv.DeepCopy()
|
pvClone := pv.DeepCopy()
|
||||||
pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)
|
pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)
|
||||||
_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone)
|
_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err)
|
klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err)
|
||||||
return err
|
return err
|
||||||
|
@ -424,7 +424,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
|
|||||||
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
||||||
claimKey := claimToClaimKey(binding.pvc)
|
claimKey := claimToClaimKey(binding.pvc)
|
||||||
klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name)
|
klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name)
|
||||||
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv)
|
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err)
|
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err)
|
||||||
return err
|
return err
|
||||||
@ -439,7 +439,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
|
|||||||
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
||||||
for i, claim = range claimsToProvision {
|
for i, claim = range claimsToProvision {
|
||||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
||||||
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim)
|
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -295,7 +295,7 @@ func (env *testEnv) initVolumes(cachedPVs []*v1.PersistentVolume, apiPVs []*v1.P
|
|||||||
|
|
||||||
func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, waitCache bool) {
|
func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, waitCache bool) {
|
||||||
for _, pv := range pvs {
|
for _, pv := range pvs {
|
||||||
if _, err := env.client.CoreV1().PersistentVolumes().Update(context.TODO(), pv); err != nil {
|
if _, err := env.client.CoreV1().PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}); err != nil {
|
||||||
t.Fatalf("failed to update PV %q", pv.Name)
|
t.Fatalf("failed to update PV %q", pv.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -321,7 +321,7 @@ func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, wait
|
|||||||
|
|
||||||
func (env *testEnv) updateClaims(t *testing.T, pvcs []*v1.PersistentVolumeClaim, waitCache bool) {
|
func (env *testEnv) updateClaims(t *testing.T, pvcs []*v1.PersistentVolumeClaim, waitCache bool) {
|
||||||
for _, pvc := range pvcs {
|
for _, pvc := range pvcs {
|
||||||
if _, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), pvc); err != nil {
|
if _, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), pvc, metav1.UpdateOptions{}); err != nil {
|
||||||
t.Fatalf("failed to update PVC %q", getPVCName(pvc))
|
t.Fatalf("failed to update PVC %q", getPVCName(pvc))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1769,7 +1769,7 @@ func TestBindPodVolumes(t *testing.T) {
|
|||||||
newPVC := pvc.DeepCopy()
|
newPVC := pvc.DeepCopy()
|
||||||
newPVC.Spec.VolumeName = pv.Name
|
newPVC.Spec.VolumeName = pv.Name
|
||||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
||||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil {
|
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil {
|
||||||
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1786,14 +1786,14 @@ func TestBindPodVolumes(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
dynamicPV := makeTestPV("dynamic-pv", "node1", "1G", "1", newPVC, waitClass)
|
dynamicPV := makeTestPV("dynamic-pv", "node1", "1G", "1", newPVC, waitClass)
|
||||||
dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(context.TODO(), dynamicPV)
|
dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(context.TODO(), dynamicPV, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to create PV %q: %v", dynamicPV.Name, err)
|
t.Errorf("failed to create PV %q: %v", dynamicPV.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
newPVC.Spec.VolumeName = dynamicPV.Name
|
newPVC.Spec.VolumeName = dynamicPV.Name
|
||||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
||||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil {
|
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil {
|
||||||
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1869,7 +1869,7 @@ func TestBindPodVolumes(t *testing.T) {
|
|||||||
newPVC := pvcs[0].DeepCopy()
|
newPVC := pvcs[0].DeepCopy()
|
||||||
newPVC.Spec.VolumeName = pvNode2.Name
|
newPVC.Spec.VolumeName = pvNode2.Name
|
||||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
||||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil {
|
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC, metav1.UpdateOptions{}); err != nil {
|
||||||
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1904,13 +1904,13 @@ func TestBindPodVolumes(t *testing.T) {
|
|||||||
|
|
||||||
// Before Execute
|
// Before Execute
|
||||||
if scenario.apiPV != nil {
|
if scenario.apiPV != nil {
|
||||||
_, err := testEnv.client.CoreV1().PersistentVolumes().Update(context.TODO(), scenario.apiPV)
|
_, err := testEnv.client.CoreV1().PersistentVolumes().Update(context.TODO(), scenario.apiPV, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to update PV %q", scenario.apiPV.Name)
|
t.Fatalf("failed to update PV %q", scenario.apiPV.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if scenario.apiPVC != nil {
|
if scenario.apiPVC != nil {
|
||||||
_, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(context.TODO(), scenario.apiPVC)
|
_, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(context.TODO(), scenario.apiPVC, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to update PVC %q", getPVCName(scenario.apiPVC))
|
t.Fatalf("failed to update PVC %q", getPVCName(scenario.apiPVC))
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/api/rbac/v1alpha1:go_default_library",
|
"//staging/src/k8s.io/api/rbac/v1alpha1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
authorizationv1 "k8s.io/api/authorization/v1"
|
authorizationv1 "k8s.io/api/authorization/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||||
@ -223,7 +224,7 @@ func (o *CanIOptions) RunAccessList() error {
|
|||||||
Namespace: o.Namespace,
|
Namespace: o.Namespace,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
response, err := o.AuthClient.SelfSubjectRulesReviews().Create(context.TODO(), sar)
|
response, err := o.AuthClient.SelfSubjectRulesReviews().Create(context.TODO(), sar, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -258,7 +259,7 @@ func (o *CanIOptions) RunAccessCheck() (bool, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := o.AuthClient.SelfSubjectAccessReviews().Create(context.TODO(), sar)
|
response, err := o.AuthClient.SelfSubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -148,7 +148,7 @@ type fakeClient struct {
|
|||||||
failureType failureType
|
failureType failureType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *fakeClient) Create(context.Context, *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) {
|
func (c *fakeClient) Create(context.Context, *certificates.CertificateSigningRequest, metav1.CreateOptions) (*certificates.CertificateSigningRequest, error) {
|
||||||
if c.failureType == createError {
|
if c.failureType == createError {
|
||||||
return nil, fmt.Errorf("fakeClient failed creating request")
|
return nil, fmt.Errorf("fakeClient failed creating request")
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,7 @@ func (kl *Kubelet) registerWithAPIServer() {
|
|||||||
// value of the annotation for controller-managed attach-detach of attachable
|
// value of the annotation for controller-managed attach-detach of attachable
|
||||||
// persistent volumes for the node.
|
// persistent volumes for the node.
|
||||||
func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
|
func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
|
||||||
_, err := kl.kubeClient.CoreV1().Nodes().Create(context.TODO(), node)
|
_, err := kl.kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -2264,7 +2264,7 @@ func TestUpdateNodeAddresses(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := kubeClient.CoreV1().Nodes().Update(context.TODO(), oldNode)
|
_, err := kubeClient.CoreV1().Nodes().Update(context.TODO(), oldNode, metav1.UpdateOptions{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
|
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
|
||||||
func(node *v1.Node) error {
|
func(node *v1.Node) error {
|
||||||
|
@ -199,7 +199,7 @@ func restartForNewConfig(eventClient v1core.EventsGetter, nodeName string, sourc
|
|||||||
// because the event recorder won't flush its queue before we exit (we'd lose the event)
|
// because the event recorder won't flush its queue before we exit (we'd lose the event)
|
||||||
event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message)
|
event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message)
|
||||||
klog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
|
klog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
|
||||||
if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(context.TODO(), event); err != nil {
|
if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(context.TODO(), event, metav1.CreateOptions{}); err != nil {
|
||||||
utillog.Errorf("failed to send event, error: %v", err)
|
utillog.Errorf("failed to send event, error: %v", err)
|
||||||
}
|
}
|
||||||
utillog.Infof(message)
|
utillog.Infof(message)
|
||||||
|
@ -153,7 +153,7 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) {
|
|||||||
// not create it this time - we will retry in the next iteration.
|
// not create it this time - we will retry in the next iteration.
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
lease, err := c.leaseClient.Create(context.TODO(), leaseToCreate)
|
lease, err := c.leaseClient.Create(context.TODO(), leaseToCreate, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
@ -170,7 +170,7 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) {
|
|||||||
// call this once you're sure the lease has been created
|
// call this once you're sure the lease has been created
|
||||||
func (c *controller) retryUpdateLease(base *coordinationv1.Lease) error {
|
func (c *controller) retryUpdateLease(base *coordinationv1.Lease) error {
|
||||||
for i := 0; i < maxUpdateRetries; i++ {
|
for i := 0; i < maxUpdateRetries; i++ {
|
||||||
lease, err := c.leaseClient.Update(context.TODO(), c.newLease(base))
|
lease, err := c.leaseClient.Update(context.TODO(), c.newLease(base), metav1.UpdateOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
c.latestLease = lease
|
c.latestLease = lease
|
||||||
return nil
|
return nil
|
||||||
|
@ -96,7 +96,7 @@ func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error {
|
|||||||
Controller: &controller,
|
Controller: &controller,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(context.TODO(), ©Pod)
|
apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(context.TODO(), ©Pod, metav1.CreateOptions{})
|
||||||
if err != nil && apierrors.IsAlreadyExists(err) {
|
if err != nil && apierrors.IsAlreadyExists(err) {
|
||||||
// Check if the existing pod is the same as the pod we want to create.
|
// Check if the existing pod is the same as the pod we want to create.
|
||||||
if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash {
|
if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash {
|
||||||
|
@ -22,6 +22,7 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
|
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
|
|
||||||
authenticationv1 "k8s.io/api/authentication/v1"
|
authenticationv1 "k8s.io/api/authentication/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -65,7 +66,7 @@ func NewManager(c clientset.Interface) *Manager {
|
|||||||
if c == nil {
|
if c == nil {
|
||||||
return nil, errors.New("cannot use TokenManager when kubelet is in standalone mode")
|
return nil, errors.New("cannot use TokenManager when kubelet is in standalone mode")
|
||||||
}
|
}
|
||||||
tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr)
|
tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr, metav1.CreateOptions{})
|
||||||
if apierrors.IsNotFound(err) && !tokenRequestsSupported() {
|
if apierrors.IsNotFound(err) && !tokenRequestsSupported() {
|
||||||
return nil, fmt.Errorf("the API server does not have TokenRequest endpoints enabled")
|
return nil, fmt.Errorf("the API server does not have TokenRequest endpoints enabled")
|
||||||
}
|
}
|
||||||
|
@ -430,7 +430,7 @@ func delayClaimBecomesBound(
|
|||||||
volumeClaim.Status = v1.PersistentVolumeClaimStatus{
|
volumeClaim.Status = v1.PersistentVolumeClaimStatus{
|
||||||
Phase: v1.ClaimBound,
|
Phase: v1.ClaimBound,
|
||||||
}
|
}
|
||||||
kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(context.TODO(), volumeClaim)
|
kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(context.TODO(), volumeClaim, metav1.UpdateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func runVolumeManager(manager VolumeManager) chan struct{} {
|
func runVolumeManager(manager VolumeManager) chan struct{} {
|
||||||
|
@ -227,7 +227,7 @@ func (kubemarkController *KubemarkController) addNodeToNodeGroup(nodeGroup strin
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < numRetries; i++ {
|
for i := 0; i < numRetries; i++ {
|
||||||
_, err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(node.Namespace).Create(context.TODO(), node)
|
_, err = kubemarkController.externalCluster.client.CoreV1().ReplicationControllers(node.Namespace).Create(context.TODO(), node, metav1.CreateOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,7 @@ func createNamespaceIfNeeded(c corev1client.NamespacesGetter, ns string) error {
|
|||||||
Namespace: "",
|
Namespace: "",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := c.Namespaces().Create(context.TODO(), newNs)
|
_, err := c.Namespaces().Create(context.TODO(), newNs, metav1.CreateOptions{})
|
||||||
if err != nil && errors.IsAlreadyExists(err) {
|
if err != nil && errors.IsAlreadyExists(err) {
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
|
@ -286,7 +286,7 @@ func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, ser
|
|||||||
if reconcile {
|
if reconcile {
|
||||||
if svc, updated := reconcilers.GetMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated {
|
if svc, updated := reconcilers.GetMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated {
|
||||||
klog.Warningf("Resetting master service %q to %#v", serviceName, svc)
|
klog.Warningf("Resetting master service %q to %#v", serviceName, svc)
|
||||||
_, err := c.ServiceClient.Services(metav1.NamespaceDefault).Update(context.TODO(), svc)
|
_, err := c.ServiceClient.Services(metav1.NamespaceDefault).Update(context.TODO(), svc, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -308,7 +308,7 @@ func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, ser
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := c.ServiceClient.Services(metav1.NamespaceDefault).Create(context.TODO(), svc)
|
_, err := c.ServiceClient.Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
if errors.IsAlreadyExists(err) {
|
if errors.IsAlreadyExists(err) {
|
||||||
return c.CreateOrUpdateMasterServiceIfNeeded(serviceName, serviceIP, servicePorts, serviceType, reconcile)
|
return c.CreateOrUpdateMasterServiceIfNeeded(serviceName, serviceIP, servicePorts, serviceType, reconcile)
|
||||||
}
|
}
|
||||||
|
@ -185,7 +185,7 @@ func createNamespaceIfNeeded(nsClient corev1client.NamespacesGetter, ns string)
|
|||||||
Namespace: "",
|
Namespace: "",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := nsClient.Namespaces().Create(context.TODO(), newNs)
|
_, err := nsClient.Namespaces().Create(context.TODO(), newNs, metav1.CreateOptions{})
|
||||||
if err != nil && apierrors.IsAlreadyExists(err) {
|
if err != nil && apierrors.IsAlreadyExists(err) {
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
@ -193,9 +193,9 @@ func createNamespaceIfNeeded(nsClient corev1client.NamespacesGetter, ns string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func writeConfigMap(configMapClient corev1client.ConfigMapsGetter, required *corev1.ConfigMap) error {
|
func writeConfigMap(configMapClient corev1client.ConfigMapsGetter, required *corev1.ConfigMap) error {
|
||||||
_, err := configMapClient.ConfigMaps(required.Namespace).Update(context.TODO(), required)
|
_, err := configMapClient.ConfigMaps(required.Namespace).Update(context.TODO(), required, metav1.UpdateOptions{})
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
_, err := configMapClient.ConfigMaps(required.Namespace).Create(context.TODO(), required)
|
_, err := configMapClient.ConfigMaps(required.Namespace).Create(context.TODO(), required, metav1.CreateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,7 +262,7 @@ func TestGetNodeAddresses(t *testing.T) {
|
|||||||
nodes, _ := fakeNodeClient.List(context.TODO(), metav1.ListOptions{})
|
nodes, _ := fakeNodeClient.List(context.TODO(), metav1.ListOptions{})
|
||||||
for index := range nodes.Items {
|
for index := range nodes.Items {
|
||||||
nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}}
|
nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}}
|
||||||
fakeNodeClient.Update(context.TODO(), &nodes.Items[index])
|
fakeNodeClient.Update(context.TODO(), &nodes.Items[index], metav1.UpdateOptions{})
|
||||||
}
|
}
|
||||||
addrs, err = addressProvider.externalAddresses()
|
addrs, err = addressProvider.externalAddresses()
|
||||||
assert.NoError(err, "addresses should not have returned an error.")
|
assert.NoError(err, "addresses should not have returned an error.")
|
||||||
@ -278,7 +278,7 @@ func TestGetNodeAddressesWithOnlySomeExternalIP(t *testing.T) {
|
|||||||
// Pass case with 1 External type IP (index == 1) and nodes (indexes 0 & 2) have no External IP.
|
// Pass case with 1 External type IP (index == 1) and nodes (indexes 0 & 2) have no External IP.
|
||||||
nodes, _ := fakeNodeClient.List(context.TODO(), metav1.ListOptions{})
|
nodes, _ := fakeNodeClient.List(context.TODO(), metav1.ListOptions{})
|
||||||
nodes.Items[1].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}}
|
nodes.Items[1].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}}
|
||||||
fakeNodeClient.Update(context.TODO(), &nodes.Items[1])
|
fakeNodeClient.Update(context.TODO(), &nodes.Items[1], metav1.UpdateOptions{})
|
||||||
|
|
||||||
addrs, err := addressProvider.externalAddresses()
|
addrs, err := addressProvider.externalAddresses()
|
||||||
assert.NoError(err, "addresses should not have returned an error.")
|
assert.NoError(err, "addresses should not have returned an error.")
|
||||||
|
@ -60,7 +60,7 @@ func (adapter *EndpointsAdapter) Get(namespace, name string, getOpts metav1.GetO
|
|||||||
// be created or updated. The created Endpoints object or an error will be
|
// be created or updated. The created Endpoints object or an error will be
|
||||||
// returned.
|
// returned.
|
||||||
func (adapter *EndpointsAdapter) Create(namespace string, endpoints *corev1.Endpoints) (*corev1.Endpoints, error) {
|
func (adapter *EndpointsAdapter) Create(namespace string, endpoints *corev1.Endpoints) (*corev1.Endpoints, error) {
|
||||||
endpoints, err := adapter.endpointClient.Endpoints(namespace).Create(context.TODO(), endpoints)
|
endpoints, err := adapter.endpointClient.Endpoints(namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = adapter.EnsureEndpointSliceFromEndpoints(namespace, endpoints)
|
err = adapter.EnsureEndpointSliceFromEndpoints(namespace, endpoints)
|
||||||
}
|
}
|
||||||
@ -71,7 +71,7 @@ func (adapter *EndpointsAdapter) Create(namespace string, endpoints *corev1.Endp
|
|||||||
// endpointSliceClient exists, a matching EndpointSlice will also be created or
|
// endpointSliceClient exists, a matching EndpointSlice will also be created or
|
||||||
// updated. The updated Endpoints object or an error will be returned.
|
// updated. The updated Endpoints object or an error will be returned.
|
||||||
func (adapter *EndpointsAdapter) Update(namespace string, endpoints *corev1.Endpoints) (*corev1.Endpoints, error) {
|
func (adapter *EndpointsAdapter) Update(namespace string, endpoints *corev1.Endpoints) (*corev1.Endpoints, error) {
|
||||||
endpoints, err := adapter.endpointClient.Endpoints(namespace).Update(context.TODO(), endpoints)
|
endpoints, err := adapter.endpointClient.Endpoints(namespace).Update(context.TODO(), endpoints, metav1.UpdateOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = adapter.EnsureEndpointSliceFromEndpoints(namespace, endpoints)
|
err = adapter.EnsureEndpointSliceFromEndpoints(namespace, endpoints)
|
||||||
}
|
}
|
||||||
@ -90,7 +90,7 @@ func (adapter *EndpointsAdapter) EnsureEndpointSliceFromEndpoints(namespace stri
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
if _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(context.TODO(), endpointSlice); errors.IsAlreadyExists(err) {
|
if _, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{}); errors.IsAlreadyExists(err) {
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -103,7 +103,7 @@ func (adapter *EndpointsAdapter) EnsureEndpointSliceFromEndpoints(namespace stri
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(context.TODO(), endpointSlice)
|
_, err = adapter.endpointSliceClient.EndpointSlices(namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ func (adapter *EndpointsAdapter) EnsureEndpointSliceFromEndpoints(namespace stri
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = adapter.endpointSliceClient.EndpointSlices(namespace).Update(context.TODO(), endpointSlice)
|
_, err = adapter.endpointSliceClient.EndpointSlices(namespace).Update(context.TODO(), endpointSlice, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ func TestEndpointsAdapterGet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, endpoints := range testCase.endpoints {
|
for _, endpoints := range testCase.endpoints {
|
||||||
_, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints)
|
_, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating Endpoints: %v", err)
|
t.Fatalf("Error creating Endpoints: %v", err)
|
||||||
}
|
}
|
||||||
@ -182,7 +182,7 @@ func TestEndpointsAdapterCreate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, endpoints := range testCase.endpoints {
|
for _, endpoints := range testCase.endpoints {
|
||||||
_, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints)
|
_, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating Endpoints: %v", err)
|
t.Fatalf("Error creating Endpoints: %v", err)
|
||||||
}
|
}
|
||||||
@ -293,7 +293,7 @@ func TestEndpointsAdapterUpdate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, endpoints := range testCase.endpoints {
|
for _, endpoints := range testCase.endpoints {
|
||||||
_, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints)
|
_, err := client.CoreV1().Endpoints(endpoints.Namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating Endpoints: %v", err)
|
t.Fatalf("Error creating Endpoints: %v", err)
|
||||||
}
|
}
|
||||||
@ -435,7 +435,7 @@ func TestEndpointsAdapterEnsureEndpointSliceFromEndpoints(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, endpointSlice := range testCase.endpointSlices {
|
for _, endpointSlice := range testCase.endpointSlices {
|
||||||
_, err := client.DiscoveryV1beta1().EndpointSlices(endpointSlice.Namespace).Create(context.TODO(), endpointSlice)
|
_, err := client.DiscoveryV1beta1().EndpointSlices(endpointSlice.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating EndpointSlice: %v", err)
|
t.Fatalf("Error creating EndpointSlice: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -421,7 +421,7 @@ func TestLeaseEndpointReconciler(t *testing.T) {
|
|||||||
clientset := fake.NewSimpleClientset()
|
clientset := fake.NewSimpleClientset()
|
||||||
if test.endpoints != nil {
|
if test.endpoints != nil {
|
||||||
for _, ep := range test.endpoints.Items {
|
for _, ep := range test.endpoints.Items {
|
||||||
if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep); err != nil {
|
if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep, metav1.CreateOptions{}); err != nil {
|
||||||
t.Errorf("case %q: unexpected error: %v", test.testName, err)
|
t.Errorf("case %q: unexpected error: %v", test.testName, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -523,7 +523,7 @@ func TestLeaseEndpointReconciler(t *testing.T) {
|
|||||||
clientset := fake.NewSimpleClientset()
|
clientset := fake.NewSimpleClientset()
|
||||||
if test.endpoints != nil {
|
if test.endpoints != nil {
|
||||||
for _, ep := range test.endpoints.Items {
|
for _, ep := range test.endpoints.Items {
|
||||||
if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep); err != nil {
|
if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep, metav1.CreateOptions{}); err != nil {
|
||||||
t.Errorf("case %q: unexpected error: %v", test.testName, err)
|
t.Errorf("case %q: unexpected error: %v", test.testName, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -638,7 +638,7 @@ func TestLeaseRemoveEndpoints(t *testing.T) {
|
|||||||
fakeLeases.SetKeys(test.endpointKeys)
|
fakeLeases.SetKeys(test.endpointKeys)
|
||||||
clientset := fake.NewSimpleClientset()
|
clientset := fake.NewSimpleClientset()
|
||||||
for _, ep := range test.endpoints.Items {
|
for _, ep := range test.endpoints.Items {
|
||||||
if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep); err != nil {
|
if _, err := clientset.CoreV1().Endpoints(ep.Namespace).Create(context.TODO(), &ep, metav1.CreateOptions{}); err != nil {
|
||||||
t.Errorf("case %q: unexpected error: %v", test.testName, err)
|
t.Errorf("case %q: unexpected error: %v", test.testName, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -241,7 +241,7 @@ func (r *EvictionREST) checkAndDecrement(namespace string, podName string, pdb p
|
|||||||
// If the pod is not deleted within a reasonable time limit PDB controller will assume that it won't
|
// If the pod is not deleted within a reasonable time limit PDB controller will assume that it won't
|
||||||
// be deleted at all and remove it from DisruptedPod map.
|
// be deleted at all and remove it from DisruptedPod map.
|
||||||
pdb.Status.DisruptedPods[podName] = metav1.Time{Time: time.Now()}
|
pdb.Status.DisruptedPods[podName] = metav1.Time{Time: time.Now()}
|
||||||
if _, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(namespace).UpdateStatus(context.TODO(), &pdb); err != nil {
|
if _, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(namespace).UpdateStatus(context.TODO(), &pdb, metav1.UpdateOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ go_test(
|
|||||||
deps = [
|
deps = [
|
||||||
"//pkg/apis/flowcontrol/v1alpha1:go_default_library",
|
"//pkg/apis/flowcontrol/v1alpha1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library",
|
"//staging/src/k8s.io/api/flowcontrol/v1alpha1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||||
|
@ -154,7 +154,7 @@ func lastMandatoryExists(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alp
|
|||||||
|
|
||||||
func ensure(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface, flowSchemas []*flowcontrolv1alpha1.FlowSchema, priorityLevels []*flowcontrolv1alpha1.PriorityLevelConfiguration) error {
|
func ensure(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface, flowSchemas []*flowcontrolv1alpha1.FlowSchema, priorityLevels []*flowcontrolv1alpha1.PriorityLevelConfiguration) error {
|
||||||
for _, flowSchema := range flowSchemas {
|
for _, flowSchema := range flowSchemas {
|
||||||
_, err := flowcontrolClientSet.FlowSchemas().Create(context.TODO(), flowSchema)
|
_, err := flowcontrolClientSet.FlowSchemas().Create(context.TODO(), flowSchema, metav1.CreateOptions{})
|
||||||
if apierrors.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
klog.V(3).Infof("system preset FlowSchema %s already exists, skipping creating", flowSchema.Name)
|
klog.V(3).Infof("system preset FlowSchema %s already exists, skipping creating", flowSchema.Name)
|
||||||
continue
|
continue
|
||||||
@ -165,7 +165,7 @@ func ensure(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface,
|
|||||||
klog.V(3).Infof("created system preset FlowSchema %s", flowSchema.Name)
|
klog.V(3).Infof("created system preset FlowSchema %s", flowSchema.Name)
|
||||||
}
|
}
|
||||||
for _, priorityLevelConfiguration := range priorityLevels {
|
for _, priorityLevelConfiguration := range priorityLevels {
|
||||||
_, err := flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), priorityLevelConfiguration)
|
_, err := flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), priorityLevelConfiguration, metav1.CreateOptions{})
|
||||||
if apierrors.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
klog.V(3).Infof("system preset PriorityLevelConfiguration %s already exists, skipping creating", priorityLevelConfiguration.Name)
|
klog.V(3).Infof("system preset PriorityLevelConfiguration %s already exists, skipping creating", priorityLevelConfiguration.Name)
|
||||||
continue
|
continue
|
||||||
@ -189,7 +189,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface
|
|||||||
return fmt.Errorf("failed checking if mandatory FlowSchema %s is up-to-date due to %v, will retry later", expectedFlowSchema.Name, err)
|
return fmt.Errorf("failed checking if mandatory FlowSchema %s is up-to-date due to %v, will retry later", expectedFlowSchema.Name, err)
|
||||||
}
|
}
|
||||||
if !identical {
|
if !identical {
|
||||||
if _, err := flowcontrolClientSet.FlowSchemas().Update(context.TODO(), expectedFlowSchema); err != nil {
|
if _, err := flowcontrolClientSet.FlowSchemas().Update(context.TODO(), expectedFlowSchema, metav1.UpdateOptions{}); err != nil {
|
||||||
return fmt.Errorf("failed upgrading mandatory FlowSchema %s due to %v, will retry later", expectedFlowSchema.Name, err)
|
return fmt.Errorf("failed upgrading mandatory FlowSchema %s due to %v, will retry later", expectedFlowSchema.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -198,7 +198,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface
|
|||||||
if !apierrors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
return fmt.Errorf("failed getting FlowSchema %s due to %v, will retry later", expectedFlowSchema.Name, err)
|
return fmt.Errorf("failed getting FlowSchema %s due to %v, will retry later", expectedFlowSchema.Name, err)
|
||||||
}
|
}
|
||||||
_, err = flowcontrolClientSet.FlowSchemas().Create(context.TODO(), expectedFlowSchema)
|
_, err = flowcontrolClientSet.FlowSchemas().Create(context.TODO(), expectedFlowSchema, metav1.CreateOptions{})
|
||||||
if apierrors.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
klog.V(3).Infof("system preset FlowSchema %s already exists, skipping creating", expectedFlowSchema.Name)
|
klog.V(3).Infof("system preset FlowSchema %s already exists, skipping creating", expectedFlowSchema.Name)
|
||||||
continue
|
continue
|
||||||
@ -218,7 +218,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface
|
|||||||
return fmt.Errorf("failed checking if mandatory PriorityLevelConfiguration %s is up-to-date due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err)
|
return fmt.Errorf("failed checking if mandatory PriorityLevelConfiguration %s is up-to-date due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err)
|
||||||
}
|
}
|
||||||
if !identical {
|
if !identical {
|
||||||
if _, err := flowcontrolClientSet.PriorityLevelConfigurations().Update(context.TODO(), expectedPriorityLevelConfiguration); err != nil {
|
if _, err := flowcontrolClientSet.PriorityLevelConfigurations().Update(context.TODO(), expectedPriorityLevelConfiguration, metav1.UpdateOptions{}); err != nil {
|
||||||
return fmt.Errorf("failed upgrading mandatory PriorityLevelConfiguration %s due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err)
|
return fmt.Errorf("failed upgrading mandatory PriorityLevelConfiguration %s due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -227,7 +227,7 @@ func upgrade(flowcontrolClientSet flowcontrolclient.FlowcontrolV1alpha1Interface
|
|||||||
if !apierrors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
return fmt.Errorf("failed getting PriorityLevelConfiguration %s due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err)
|
return fmt.Errorf("failed getting PriorityLevelConfiguration %s due to %v, will retry later", expectedPriorityLevelConfiguration.Name, err)
|
||||||
}
|
}
|
||||||
_, err = flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), expectedPriorityLevelConfiguration)
|
_, err = flowcontrolClientSet.PriorityLevelConfigurations().Create(context.TODO(), expectedPriorityLevelConfiguration, metav1.CreateOptions{})
|
||||||
if apierrors.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
klog.V(3).Infof("system preset PriorityLevelConfiguration %s already exists, skipping creating", expectedPriorityLevelConfiguration.Name)
|
klog.V(3).Infof("system preset PriorityLevelConfiguration %s already exists, skipping creating", expectedPriorityLevelConfiguration.Name)
|
||||||
continue
|
continue
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1"
|
flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap"
|
"k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
flowcontrolapisv1alpha1 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1alpha1"
|
flowcontrolapisv1alpha1 "k8s.io/kubernetes/pkg/apis/flowcontrol/v1alpha1"
|
||||||
@ -50,7 +51,7 @@ func TestShouldEnsurePredefinedSettings(t *testing.T) {
|
|||||||
t.Run(testCase.name, func(t *testing.T) {
|
t.Run(testCase.name, func(t *testing.T) {
|
||||||
c := fake.NewSimpleClientset()
|
c := fake.NewSimpleClientset()
|
||||||
if testCase.existingPriorityLevel != nil {
|
if testCase.existingPriorityLevel != nil {
|
||||||
c.FlowcontrolV1alpha1().PriorityLevelConfigurations().Create(context.TODO(), testCase.existingPriorityLevel)
|
c.FlowcontrolV1alpha1().PriorityLevelConfigurations().Create(context.TODO(), testCase.existingPriorityLevel, metav1.CreateOptions{})
|
||||||
}
|
}
|
||||||
should, err := lastMandatoryExists(c.FlowcontrolV1alpha1())
|
should, err := lastMandatoryExists(c.FlowcontrolV1alpha1())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -88,7 +88,7 @@ func (c ClusterRoleModifier) Get(namespace, name string) (RuleOwner, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c ClusterRoleModifier) Create(in RuleOwner) (RuleOwner, error) {
|
func (c ClusterRoleModifier) Create(in RuleOwner) (RuleOwner, error) {
|
||||||
ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole)
|
ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -96,7 +96,7 @@ func (c ClusterRoleModifier) Create(in RuleOwner) (RuleOwner, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c ClusterRoleModifier) Update(in RuleOwner) (RuleOwner, error) {
|
func (c ClusterRoleModifier) Update(in RuleOwner) (RuleOwner, error) {
|
||||||
ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole)
|
ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleRuleOwner).ClusterRole, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ func (c ClusterRoleBindingClientAdapter) Get(namespace, name string) (RoleBindin
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c ClusterRoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) {
|
func (c ClusterRoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) {
|
||||||
ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding)
|
ret, err := c.Client.Create(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -97,7 +97,7 @@ func (c ClusterRoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c ClusterRoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) {
|
func (c ClusterRoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) {
|
||||||
ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding)
|
ret, err := c.Client.Update(context.TODO(), in.(ClusterRoleBindingAdapter).ClusterRoleBinding, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ func tryEnsureNamespace(client corev1client.NamespaceInterface, namespace string
|
|||||||
}
|
}
|
||||||
|
|
||||||
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||||
_, createErr := client.Create(context.TODO(), ns)
|
_, createErr := client.Create(context.TODO(), ns, metav1.CreateOptions{})
|
||||||
|
|
||||||
return utilerrors.FilterOut(createErr, apierrors.IsAlreadyExists, apierrors.IsForbidden)
|
return utilerrors.FilterOut(createErr, apierrors.IsAlreadyExists, apierrors.IsForbidden)
|
||||||
}
|
}
|
||||||
|
@ -93,7 +93,7 @@ func (c RoleModifier) Create(in RuleOwner) (RuleOwner, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ret, err := c.Client.Roles(in.GetNamespace()).Create(context.TODO(), in.(RoleRuleOwner).Role)
|
ret, err := c.Client.Roles(in.GetNamespace()).Create(context.TODO(), in.(RoleRuleOwner).Role, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -101,7 +101,7 @@ func (c RoleModifier) Create(in RuleOwner) (RuleOwner, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c RoleModifier) Update(in RuleOwner) (RuleOwner, error) {
|
func (c RoleModifier) Update(in RuleOwner) (RuleOwner, error) {
|
||||||
ret, err := c.Client.Roles(in.GetNamespace()).Update(context.TODO(), in.(RoleRuleOwner).Role)
|
ret, err := c.Client.Roles(in.GetNamespace()).Update(context.TODO(), in.(RoleRuleOwner).Role, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -95,7 +95,7 @@ func (c RoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ret, err := c.Client.RoleBindings(in.GetNamespace()).Create(context.TODO(), in.(RoleBindingAdapter).RoleBinding)
|
ret, err := c.Client.RoleBindings(in.GetNamespace()).Create(context.TODO(), in.(RoleBindingAdapter).RoleBinding, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -103,7 +103,7 @@ func (c RoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c RoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) {
|
func (c RoleBindingClientAdapter) Update(in RoleBinding) (RoleBinding, error) {
|
||||||
ret, err := c.Client.RoleBindings(in.GetNamespace()).Update(context.TODO(), in.(RoleBindingAdapter).RoleBinding)
|
ret, err := c.Client.RoleBindings(in.GetNamespace()).Update(context.TODO(), in.(RoleBindingAdapter).RoleBinding, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -360,7 +360,7 @@ func primeAggregatedClusterRoles(clusterRolesToAggregate map[string]string, clus
|
|||||||
klog.V(1).Infof("migrating %v to %v", existingRole.Name, newName)
|
klog.V(1).Infof("migrating %v to %v", existingRole.Name, newName)
|
||||||
existingRole.Name = newName
|
existingRole.Name = newName
|
||||||
existingRole.ResourceVersion = "" // clear this so the object can be created.
|
existingRole.ResourceVersion = "" // clear this so the object can be created.
|
||||||
if _, err := clusterRoleClient.ClusterRoles().Create(context.TODO(), existingRole); err != nil && !apierrors.IsAlreadyExists(err) {
|
if _, err := clusterRoleClient.ClusterRoles().Create(context.TODO(), existingRole, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -398,7 +398,7 @@ func primeSplitClusterRoleBindings(clusterRoleBindingToSplit map[string]rbacapiv
|
|||||||
newCRB.Subjects = existingRoleBinding.Subjects
|
newCRB.Subjects = existingRoleBinding.Subjects
|
||||||
newCRB.Labels = existingRoleBinding.Labels
|
newCRB.Labels = existingRoleBinding.Labels
|
||||||
newCRB.Annotations = existingRoleBinding.Annotations
|
newCRB.Annotations = existingRoleBinding.Annotations
|
||||||
if _, err := clusterRoleBindingClient.ClusterRoleBindings().Create(context.TODO(), newCRB); err != nil && !apierrors.IsAlreadyExists(err) {
|
if _, err := clusterRoleBindingClient.ClusterRoleBindings().Create(context.TODO(), newCRB, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ func AddSystemPriorityClasses() genericapiserver.PostStartHookFunc {
|
|||||||
_, err := schedClientSet.PriorityClasses().Get(context.TODO(), pc.Name, metav1.GetOptions{})
|
_, err := schedClientSet.PriorityClasses().Get(context.TODO(), pc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
_, err := schedClientSet.PriorityClasses().Create(context.TODO(), pc)
|
_, err := schedClientSet.PriorityClasses().Create(context.TODO(), pc, metav1.CreateOptions{})
|
||||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
return false, err
|
return false, err
|
||||||
} else {
|
} else {
|
||||||
|
@ -752,7 +752,7 @@ type podConditionUpdaterImpl struct {
|
|||||||
func (p *podConditionUpdaterImpl) update(pod *v1.Pod, condition *v1.PodCondition) error {
|
func (p *podConditionUpdaterImpl) update(pod *v1.Pod, condition *v1.PodCondition) error {
|
||||||
klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s, Reason=%s)", pod.Namespace, pod.Name, condition.Type, condition.Status, condition.Reason)
|
klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s, Reason=%s)", pod.Namespace, pod.Name, condition.Type, condition.Status, condition.Reason)
|
||||||
if podutil.UpdatePodCondition(&pod.Status, condition) {
|
if podutil.UpdatePodCondition(&pod.Status, condition) {
|
||||||
_, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod)
|
_, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -773,7 +773,7 @@ func (p *podPreemptorImpl) deletePod(pod *v1.Pod) error {
|
|||||||
func (p *podPreemptorImpl) setNominatedNodeName(pod *v1.Pod, nominatedNodeName string) error {
|
func (p *podPreemptorImpl) setNominatedNodeName(pod *v1.Pod, nominatedNodeName string) error {
|
||||||
podCopy := pod.DeepCopy()
|
podCopy := pod.DeepCopy()
|
||||||
podCopy.Status.NominatedNodeName = nominatedNodeName
|
podCopy.Status.NominatedNodeName = nominatedNodeName
|
||||||
_, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), podCopy)
|
_, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), podCopy, metav1.UpdateOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,7 +227,7 @@ func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) erro
|
|||||||
return fmt.Errorf("failed to json.Marshal CIDR: %v", err)
|
return fmt.Errorf("failed to json.Marshal CIDR: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes); err != nil {
|
if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
|
||||||
return fmt.Errorf("failed to patch node CIDR: %v", err)
|
return fmt.Errorf("failed to patch node CIDR: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -248,7 +248,7 @@ func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string)
|
|||||||
return fmt.Errorf("failed to json.Marshal CIDR: %v", err)
|
return fmt.Errorf("failed to json.Marshal CIDR: %v", err)
|
||||||
}
|
}
|
||||||
klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes))
|
klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes))
|
||||||
if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes); err != nil {
|
if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
|
||||||
return fmt.Errorf("failed to patch node CIDR: %v", err)
|
return fmt.Errorf("failed to patch node CIDR: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -261,7 +261,7 @@ func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, "status")
|
updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err)
|
return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err)
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,7 @@ func PatchPodStatus(c clientset.Interface, namespace, name string, uid types.UID
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
updatedPod, err := c.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes, "status")
|
updatedPod, err := c.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed to patch status %q for pod %q/%q: %v", patchBytes, namespace, name, err)
|
return nil, nil, fmt.Errorf("failed to patch status %q for pod %q/%q: %v", patchBytes, namespace, name, err)
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ func TestPatchPodStatus(t *testing.T) {
|
|||||||
Namespace: ns,
|
Namespace: ns,
|
||||||
Name: name,
|
Name: name,
|
||||||
},
|
},
|
||||||
})
|
}, metav1.CreateOptions{})
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
description string
|
description string
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user