generated: run refactor
This commit is contained in:
parent
7e88d8db66
commit
3aa59f7f30
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
@ -102,7 +103,7 @@ func main() {
|
|||||||
namespace := metav1.NamespaceSystem
|
namespace := metav1.NamespaceSystem
|
||||||
envNamespace := os.Getenv("NAMESPACE")
|
envNamespace := os.Getenv("NAMESPACE")
|
||||||
if envNamespace != "" {
|
if envNamespace != "" {
|
||||||
if _, err := client.CoreV1().Namespaces().Get(envNamespace, metav1.GetOptions{}); err != nil {
|
if _, err := client.CoreV1().Namespaces().Get(context.TODO(), envNamespace, metav1.GetOptions{}); err != nil {
|
||||||
klog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
|
klog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
|
||||||
}
|
}
|
||||||
namespace = envNamespace
|
namespace = envNamespace
|
||||||
@ -117,7 +118,7 @@ func main() {
|
|||||||
// Look for endpoints associated with the Elasticsearch logging service.
|
// Look for endpoints associated with the Elasticsearch logging service.
|
||||||
// First wait for the service to become available.
|
// First wait for the service to become available.
|
||||||
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
||||||
elasticsearch, err = client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
|
elasticsearch, err = client.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -134,7 +135,7 @@ func main() {
|
|||||||
// Wait for some endpoints.
|
// Wait for some endpoints.
|
||||||
count, _ := strconv.Atoi(os.Getenv("MINIMUM_MASTER_NODES"))
|
count, _ := strconv.Atoi(os.Getenv("MINIMUM_MASTER_NODES"))
|
||||||
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
|
||||||
endpoints, err = client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
|
endpoints, err = client.CoreV1().Endpoints(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -231,7 +231,7 @@ func StartTestServer(t Logger, instanceOptions *TestServerInstanceOptions, custo
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.CoreV1().Namespaces().Get("default", metav1.GetOptions{}); err != nil {
|
if _, err := client.CoreV1().Namespaces().Get(context.TODO(), "default", metav1.GetOptions{}); err != nil {
|
||||||
if !errors.IsNotFound(err) {
|
if !errors.IsNotFound(err) {
|
||||||
t.Logf("Unable to get default namespace: %v", err)
|
t.Logf("Unable to get default namespace: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -432,7 +433,7 @@ func NewCmdConfigUploadFromFlags(out io.Writer, kubeConfigFile *string) *cobra.C
|
|||||||
func RunConfigView(out io.Writer, client clientset.Interface) error {
|
func RunConfigView(out io.Writer, client clientset.Interface) error {
|
||||||
|
|
||||||
klog.V(1).Infoln("[config] getting the cluster configuration")
|
klog.V(1).Infoln("[config] getting the cluster configuration")
|
||||||
cfgConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(constants.KubeadmConfigConfigMap, metav1.GetOptions{})
|
cfgConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), constants.KubeadmConfigConfigMap, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package phases
|
package phases
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
@ -142,7 +143,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) {
|
|||||||
// A new Node with the same name as an existing control-plane Node can cause undefined
|
// A new Node with the same name as an existing control-plane Node can cause undefined
|
||||||
// behavior and ultimately control-plane failure.
|
// behavior and ultimately control-plane failure.
|
||||||
klog.V(1).Infof("[kubelet-start] Checking for an existing Node in the cluster with name %q and status %q", nodeName, v1.NodeReady)
|
klog.V(1).Infof("[kubelet-start] Checking for an existing Node in the cluster with name %q and status %q", nodeName, v1.NodeReady)
|
||||||
node, err := bootstrapClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
node, err := bootstrapClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return errors.Wrapf(err, "cannot get Node %q", nodeName)
|
return errors.Wrapf(err, "cannot get Node %q", nodeName)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@ -380,7 +381,7 @@ func RunListTokens(out io.Writer, errW io.Writer, client clientset.Interface, pr
|
|||||||
}
|
}
|
||||||
|
|
||||||
klog.V(1).Info("[token] retrieving list of bootstrap tokens")
|
klog.V(1).Info("[token] retrieving list of bootstrap tokens")
|
||||||
secrets, err := client.CoreV1().Secrets(metav1.NamespaceSystem).List(listOptions)
|
secrets, err := client.CoreV1().Secrets(metav1.NamespaceSystem).List(context.TODO(), listOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to list bootstrap tokens")
|
return errors.Wrap(err, "failed to list bootstrap tokens")
|
||||||
}
|
}
|
||||||
@ -430,7 +431,7 @@ func RunDeleteTokens(out io.Writer, client clientset.Interface, tokenIDsOrTokens
|
|||||||
|
|
||||||
tokenSecretName := bootstraputil.BootstrapTokenSecretName(tokenID)
|
tokenSecretName := bootstraputil.BootstrapTokenSecretName(tokenID)
|
||||||
klog.V(1).Infof("[token] deleting token %q", tokenID)
|
klog.V(1).Infof("[token] deleting token %q", tokenID)
|
||||||
if err := client.CoreV1().Secrets(metav1.NamespaceSystem).Delete(tokenSecretName, nil); err != nil {
|
if err := client.CoreV1().Secrets(metav1.NamespaceSystem).Delete(context.TODO(), tokenSecretName, nil); err != nil {
|
||||||
return errors.Wrapf(err, "failed to delete bootstrap token %q", tokenID)
|
return errors.Wrapf(err, "failed to delete bootstrap token %q", tokenID)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(out, "bootstrap token %q deleted\n", tokenID)
|
fmt.Fprintf(out, "bootstrap token %q deleted\n", tokenID)
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package file
|
package file
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -100,7 +101,7 @@ func ValidateConfigInfo(config *clientcmdapi.Config, clustername string, discove
|
|||||||
|
|
||||||
err = wait.Poll(constants.DiscoveryRetryInterval, discoveryTimeout, func() (bool, error) {
|
err = wait.Poll(constants.DiscoveryRetryInterval, discoveryTimeout, func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
clusterinfoCM, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
|
clusterinfoCM, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrors.IsForbidden(err) {
|
if apierrors.IsForbidden(err) {
|
||||||
// If the request is unauthorized, the cluster admin has not granted access to the cluster info configmap for unauthenticated users
|
// If the request is unauthorized, the cluster admin has not granted access to the cluster info configmap for unauthenticated users
|
||||||
|
@ -210,7 +210,7 @@ func getClusterInfo(client clientset.Interface, kubeconfig *clientcmdapi.Config,
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
wait.JitterUntil(func() {
|
wait.JitterUntil(func() {
|
||||||
cm, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
|
cm, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.TODO(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(1).Infof("[discovery] Failed to request cluster-info, will try again: %v", err)
|
klog.V(1).Infof("[discovery] Failed to request cluster-info, will try again: %v", err)
|
||||||
return
|
return
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
@ -58,7 +59,7 @@ const (
|
|||||||
// DeployedDNSAddon returns the type of DNS addon currently deployed
|
// DeployedDNSAddon returns the type of DNS addon currently deployed
|
||||||
func DeployedDNSAddon(client clientset.Interface) (kubeadmapi.DNSAddOnType, string, error) {
|
func DeployedDNSAddon(client clientset.Interface) (kubeadmapi.DNSAddOnType, string, error) {
|
||||||
deploymentsClient := client.AppsV1().Deployments(metav1.NamespaceSystem)
|
deploymentsClient := client.AppsV1().Deployments(metav1.NamespaceSystem)
|
||||||
deployments, err := deploymentsClient.List(metav1.ListOptions{LabelSelector: "k8s-app=kube-dns"})
|
deployments, err := deploymentsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "k8s-app=kube-dns"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", errors.Wrap(err, "couldn't retrieve DNS addon deployments")
|
return "", "", errors.Wrap(err, "couldn't retrieve DNS addon deployments")
|
||||||
}
|
}
|
||||||
@ -84,7 +85,7 @@ func DeployedDNSAddon(client clientset.Interface) (kubeadmapi.DNSAddOnType, stri
|
|||||||
// deployedDNSReplicas returns the replica count for the current DNS deployment
|
// deployedDNSReplicas returns the replica count for the current DNS deployment
|
||||||
func deployedDNSReplicas(client clientset.Interface, replicas int32) (*int32, error) {
|
func deployedDNSReplicas(client clientset.Interface, replicas int32) (*int32, error) {
|
||||||
deploymentsClient := client.AppsV1().Deployments(metav1.NamespaceSystem)
|
deploymentsClient := client.AppsV1().Deployments(metav1.NamespaceSystem)
|
||||||
deployments, err := deploymentsClient.List(metav1.ListOptions{LabelSelector: "k8s-app=kube-dns"})
|
deployments, err := deploymentsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "k8s-app=kube-dns"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &replicas, errors.Wrap(err, "couldn't retrieve DNS addon deployments")
|
return &replicas, errors.Wrap(err, "couldn't retrieve DNS addon deployments")
|
||||||
}
|
}
|
||||||
@ -209,7 +210,7 @@ func coreDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interfa
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the kube-dns ConfigMap for translation to equivalent CoreDNS Config.
|
// Get the kube-dns ConfigMap for translation to equivalent CoreDNS Config.
|
||||||
kubeDNSConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeDNSConfigMap, metav1.GetOptions{})
|
kubeDNSConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.KubeDNSConfigMap, metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -346,7 +347,7 @@ func createDNSService(dnsService *v1.Service, serviceBytes []byte, client client
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Can't use a generic apiclient helper func here as we have to tolerate more than AlreadyExists.
|
// Can't use a generic apiclient helper func here as we have to tolerate more than AlreadyExists.
|
||||||
if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(dnsService); err != nil {
|
if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Create(context.TODO(), dnsService); err != nil {
|
||||||
// Ignore if the Service is invalid with this error message:
|
// Ignore if the Service is invalid with this error message:
|
||||||
// Service "kube-dns" is invalid: spec.clusterIP: Invalid value: "10.96.0.10": provided IP is already allocated
|
// Service "kube-dns" is invalid: spec.clusterIP: Invalid value: "10.96.0.10": provided IP is already allocated
|
||||||
|
|
||||||
@ -354,7 +355,7 @@ func createDNSService(dnsService *v1.Service, serviceBytes []byte, client client
|
|||||||
return errors.Wrap(err, "unable to create a new DNS service")
|
return errors.Wrap(err, "unable to create a new DNS service")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Update(dnsService); err != nil {
|
if _, err := client.CoreV1().Services(metav1.NamespaceSystem).Update(context.TODO(), dnsService); err != nil {
|
||||||
return errors.Wrap(err, "unable to create/update the DNS service")
|
return errors.Wrap(err, "unable to create/update the DNS service")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -373,7 +374,7 @@ func migrateCoreDNSCorefile(client clientset.Interface, cm *v1.ConfigMap, corefi
|
|||||||
// Take a copy of the Corefile data as `Corefile-backup` and update the ConfigMap
|
// Take a copy of the Corefile data as `Corefile-backup` and update the ConfigMap
|
||||||
// Also point the CoreDNS deployment to the `Corefile-backup` data.
|
// Also point the CoreDNS deployment to the `Corefile-backup` data.
|
||||||
|
|
||||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(&v1.ConfigMap{
|
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), &v1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: kubeadmconstants.CoreDNSConfigMap,
|
Name: kubeadmconstants.CoreDNSConfigMap,
|
||||||
Namespace: metav1.NamespaceSystem,
|
Namespace: metav1.NamespaceSystem,
|
||||||
@ -395,7 +396,7 @@ func migrateCoreDNSCorefile(client clientset.Interface, cm *v1.ConfigMap, corefi
|
|||||||
return errors.Wrap(err, "unable to migrate CoreDNS ConfigMap")
|
return errors.Wrap(err, "unable to migrate CoreDNS ConfigMap")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(&v1.ConfigMap{
|
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), &v1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: kubeadmconstants.CoreDNSConfigMap,
|
Name: kubeadmconstants.CoreDNSConfigMap,
|
||||||
Namespace: metav1.NamespaceSystem,
|
Namespace: metav1.NamespaceSystem,
|
||||||
@ -424,7 +425,7 @@ func migrateCoreDNSCorefile(client clientset.Interface, cm *v1.ConfigMap, corefi
|
|||||||
|
|
||||||
// GetCoreDNSInfo gets the current CoreDNS installed and the current Corefile Configuration of CoreDNS.
|
// GetCoreDNSInfo gets the current CoreDNS installed and the current Corefile Configuration of CoreDNS.
|
||||||
func GetCoreDNSInfo(client clientset.Interface) (*v1.ConfigMap, string, string, error) {
|
func GetCoreDNSInfo(client clientset.Interface) (*v1.ConfigMap, string, string, error) {
|
||||||
coreDNSConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNSConfigMap, metav1.GetOptions{})
|
coreDNSConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.CoreDNSConfigMap, metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return nil, "", "", err
|
return nil, "", "", err
|
||||||
}
|
}
|
||||||
@ -445,13 +446,13 @@ func GetCoreDNSInfo(client clientset.Interface) (*v1.ConfigMap, string, string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func patchCoreDNSDeployment(client clientset.Interface, coreDNSCorefileName string) error {
|
func patchCoreDNSDeployment(client clientset.Interface, coreDNSCorefileName string) error {
|
||||||
dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNSDeploymentName, metav1.GetOptions{})
|
dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.CoreDNSDeploymentName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"volumes":[{"name": "config-volume", "configMap":{"name": "coredns", "items":[{"key": "%s", "path": "%s"}]}}]}}}}`, coreDNSCorefileName, coreDNSCorefileName)
|
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"volumes":[{"name": "config-volume", "configMap":{"name": "coredns", "items":[{"key": "%s", "path": "%s"}]}}]}}}}`, coreDNSCorefileName, coreDNSCorefileName)
|
||||||
|
|
||||||
if _, err := client.AppsV1().Deployments(dnsDeployment.ObjectMeta.Namespace).Patch(dnsDeployment.Name, types.StrategicMergePatchType, []byte(patch)); err != nil {
|
if _, err := client.AppsV1().Deployments(dnsDeployment.ObjectMeta.Namespace).Patch(context.TODO(), dnsDeployment.Name, types.StrategicMergePatchType, []byte(patch)); err != nil {
|
||||||
return errors.Wrap(err, "unable to patch the CoreDNS deployment")
|
return errors.Wrap(err, "unable to patch the CoreDNS deployment")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -722,7 +723,7 @@ func TestCreateCoreDNSConfigMap(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating the CoreDNS ConfigMap: %v", err)
|
t.Fatalf("error creating the CoreDNS ConfigMap: %v", err)
|
||||||
}
|
}
|
||||||
migratedConfigMap, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNSConfigMap, metav1.GetOptions{})
|
migratedConfigMap, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.CoreDNSConfigMap, metav1.GetOptions{})
|
||||||
if !strings.EqualFold(migratedConfigMap.Data["Corefile"], tc.expectedCorefileData) {
|
if !strings.EqualFold(migratedConfigMap.Data["Corefile"], tc.expectedCorefileData) {
|
||||||
t.Fatalf("expected to get %v, but got %v", tc.expectedCorefileData, migratedConfigMap.Data["Corefile"])
|
t.Fatalf("expected to get %v, but got %v", tc.expectedCorefileData, migratedConfigMap.Data["Corefile"])
|
||||||
}
|
}
|
||||||
@ -732,7 +733,7 @@ func TestCreateCoreDNSConfigMap(t *testing.T) {
|
|||||||
|
|
||||||
func createClientAndCoreDNSManifest(t *testing.T, corefile, coreDNSVersion string) *clientsetfake.Clientset {
|
func createClientAndCoreDNSManifest(t *testing.T, corefile, coreDNSVersion string) *clientsetfake.Clientset {
|
||||||
client := clientsetfake.NewSimpleClientset()
|
client := clientsetfake.NewSimpleClientset()
|
||||||
_, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&v1.ConfigMap{
|
_, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &v1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: kubeadmconstants.CoreDNSConfigMap,
|
Name: kubeadmconstants.CoreDNSConfigMap,
|
||||||
Namespace: metav1.NamespaceSystem,
|
Namespace: metav1.NamespaceSystem,
|
||||||
@ -744,7 +745,7 @@ func createClientAndCoreDNSManifest(t *testing.T, corefile, coreDNSVersion strin
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating ConfigMap: %v", err)
|
t.Fatalf("error creating ConfigMap: %v", err)
|
||||||
}
|
}
|
||||||
_, err = client.AppsV1().Deployments(metav1.NamespaceSystem).Create(&apps.Deployment{
|
_, err = client.AppsV1().Deployments(metav1.NamespaceSystem).Create(context.TODO(), &apps.Deployment{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
Kind: "Deployment",
|
Kind: "Deployment",
|
||||||
APIVersion: "apps/v1",
|
APIVersion: "apps/v1",
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package node
|
package node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -39,7 +40,7 @@ func UpdateOrCreateTokens(client clientset.Interface, failIfExists bool, tokens
|
|||||||
for _, token := range tokens {
|
for _, token := range tokens {
|
||||||
|
|
||||||
secretName := bootstraputil.BootstrapTokenSecretName(token.Token.ID)
|
secretName := bootstraputil.BootstrapTokenSecretName(token.Token.ID)
|
||||||
secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{})
|
secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), secretName, metav1.GetOptions{})
|
||||||
if secret != nil && err == nil && failIfExists {
|
if secret != nil && err == nil && failIfExists {
|
||||||
return errors.Errorf("a token with id %q already exists", token.Token.ID)
|
return errors.Errorf("a token with id %q already exists", token.Token.ID)
|
||||||
}
|
}
|
||||||
|
@ -91,7 +91,7 @@ func (r *APIRenewer) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Sign
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := r.client.CertificateSigningRequests().Create(k8sCSR)
|
req, err := r.client.CertificateSigningRequests().Create(context.TODO(), k8sCSR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "couldn't create certificate signing request")
|
return nil, nil, errors.Wrap(err, "couldn't create certificate signing request")
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package copycerts
|
package copycerts
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -159,7 +160,7 @@ func createRBAC(client clientset.Interface) error {
|
|||||||
|
|
||||||
func getSecretOwnerRef(client clientset.Interface, tokenID string) ([]metav1.OwnerReference, error) {
|
func getSecretOwnerRef(client clientset.Interface, tokenID string) ([]metav1.OwnerReference, error) {
|
||||||
secretName := bootstraputil.BootstrapTokenSecretName(tokenID)
|
secretName := bootstraputil.BootstrapTokenSecretName(tokenID)
|
||||||
secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{})
|
secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), secretName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error to get token reference")
|
return nil, errors.Wrap(err, "error to get token reference")
|
||||||
}
|
}
|
||||||
@ -259,7 +260,7 @@ func writeCertOrKey(certOrKeyPath string, certOrKeyData []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getSecret(client clientset.Interface) (*v1.Secret, error) {
|
func getSecret(client clientset.Interface) (*v1.Secret, error) {
|
||||||
secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(kubeadmconstants.KubeadmCertsSecret, metav1.GetOptions{})
|
secret, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.KubeadmCertsSecret, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil, errors.Errorf("Secret %q was not found in the %q Namespace. This Secret might have expired. Please, run `kubeadm init phase upload-certs --upload-certs` on a control plane to generate a new one", kubeadmconstants.KubeadmCertsSecret, metav1.NamespaceSystem)
|
return nil, errors.Errorf("Secret %q was not found in the %q Namespace. This Secret might have expired. Please, run `kubeadm init phase upload-certs --upload-certs` on a control plane to generate a new one", kubeadmconstants.KubeadmCertsSecret, metav1.NamespaceSystem)
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package copycerts
|
package copycerts
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@ -180,7 +181,7 @@ func TestUploadCerts(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error decoding key: %v", err)
|
t.Fatalf("error decoding key: %v", err)
|
||||||
}
|
}
|
||||||
secretMap, err := cs.CoreV1().Secrets(metav1.NamespaceSystem).Get(kubeadmconstants.KubeadmCertsSecret, metav1.GetOptions{})
|
secretMap, err := cs.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.KubeadmCertsSecret, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not fetch secret: %v", err)
|
t.Fatalf("could not fetch secret: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package upgrade
|
package upgrade
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
@ -142,7 +143,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if the Job already exists and delete it
|
// Check if the Job already exists and delete it
|
||||||
if _, err := client.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}); err == nil {
|
if _, err := client.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}); err == nil {
|
||||||
if err = deleteHealthCheckJob(client, ns, jobName); err != nil {
|
if err = deleteHealthCheckJob(client, ns, jobName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -156,7 +157,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
|
|||||||
// Create the Job, but retry in case it is being currently deleted
|
// Create the Job, but retry in case it is being currently deleted
|
||||||
klog.V(2).Infof("Creating Job %q in the namespace %q", jobName, ns)
|
klog.V(2).Infof("Creating Job %q in the namespace %q", jobName, ns)
|
||||||
err := wait.PollImmediate(time.Second*1, timeout, func() (bool, error) {
|
err := wait.PollImmediate(time.Second*1, timeout, func() (bool, error) {
|
||||||
if _, err := client.BatchV1().Jobs(ns).Create(job); err != nil {
|
if _, err := client.BatchV1().Jobs(ns).Create(context.TODO(), job); err != nil {
|
||||||
klog.V(2).Infof("Could not create Job %q in the namespace %q, retrying: %v", jobName, ns, err)
|
klog.V(2).Infof("Could not create Job %q in the namespace %q, retrying: %v", jobName, ns, err)
|
||||||
lastError = err
|
lastError = err
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -172,7 +173,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
|
|||||||
|
|
||||||
// Wait for the Job to complete
|
// Wait for the Job to complete
|
||||||
err = wait.PollImmediate(time.Second*1, timeout, func() (bool, error) {
|
err = wait.PollImmediate(time.Second*1, timeout, func() (bool, error) {
|
||||||
job, err := client.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
job, err := client.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lastError = err
|
lastError = err
|
||||||
klog.V(2).Infof("could not get Job %q in the namespace %q, retrying: %v", jobName, ns, err)
|
klog.V(2).Infof("could not get Job %q in the namespace %q, retrying: %v", jobName, ns, err)
|
||||||
@ -202,7 +203,7 @@ func deleteHealthCheckJob(client clientset.Interface, ns, jobName string) error
|
|||||||
deleteOptions := &metav1.DeleteOptions{
|
deleteOptions := &metav1.DeleteOptions{
|
||||||
PropagationPolicy: &propagation,
|
PropagationPolicy: &propagation,
|
||||||
}
|
}
|
||||||
if err := client.BatchV1().Jobs(ns).Delete(jobName, deleteOptions); err != nil {
|
if err := client.BatchV1().Jobs(ns).Delete(context.TODO(), jobName, deleteOptions); err != nil {
|
||||||
return errors.Wrapf(err, "could not delete Job %q in the namespace %q", jobName, ns)
|
return errors.Wrapf(err, "could not delete Job %q in the namespace %q", jobName, ns)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -213,7 +214,7 @@ func controlPlaneNodesReady(client clientset.Interface, _ *kubeadmapi.ClusterCon
|
|||||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{
|
selector := labels.SelectorFromSet(labels.Set(map[string]string{
|
||||||
constants.LabelNodeRoleMaster: "",
|
constants.LabelNodeRoleMaster: "",
|
||||||
}))
|
}))
|
||||||
controlPlanes, err := client.CoreV1().Nodes().List(metav1.ListOptions{
|
controlPlanes, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{
|
||||||
LabelSelector: selector.String(),
|
LabelSelector: selector.String(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -262,7 +263,7 @@ func getNotReadyDaemonSets(client clientset.Interface) ([]error, error) {
|
|||||||
notReadyDaemonSets := []error{}
|
notReadyDaemonSets := []error{}
|
||||||
for _, component := range constants.ControlPlaneComponents {
|
for _, component := range constants.ControlPlaneComponents {
|
||||||
dsName := constants.AddSelfHostedPrefix(component)
|
dsName := constants.AddSelfHostedPrefix(component)
|
||||||
ds, err := client.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{})
|
ds, err := client.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(context.TODO(), dsName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Errorf("couldn't get daemonset %q in the %s namespace", dsName, metav1.NamespaceSystem)
|
return nil, errors.Errorf("couldn't get daemonset %q in the %s namespace", dsName, metav1.NamespaceSystem)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package upgrade
|
package upgrade
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -120,7 +121,7 @@ func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.ClusterConfigurati
|
|||||||
|
|
||||||
// If we're dry-running, we don't need to wait for the new DNS addon to become ready
|
// If we're dry-running, we don't need to wait for the new DNS addon to become ready
|
||||||
if !dryRun {
|
if !dryRun {
|
||||||
dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(installedDeploymentName, metav1.GetOptions{})
|
dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(context.TODO(), installedDeploymentName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package upgrade
|
package upgrade
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
@ -94,7 +95,7 @@ func (g *KubeVersionGetter) VersionFromCILabel(ciVersionLabel, description strin
|
|||||||
|
|
||||||
// KubeletVersions gets the versions of the kubelets in the cluster
|
// KubeletVersions gets the versions of the kubelets in the cluster
|
||||||
func (g *KubeVersionGetter) KubeletVersions() (map[string]uint16, error) {
|
func (g *KubeVersionGetter) KubeletVersions() (map[string]uint16, error) {
|
||||||
nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("couldn't list all nodes in cluster")
|
return nil, errors.New("couldn't list all nodes in cluster")
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package uploadconfig
|
package uploadconfig
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -120,7 +121,7 @@ func TestUploadConfiguration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if tt.verifyResult {
|
if tt.verifyResult {
|
||||||
controlPlaneCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeadmConfigConfigMap, metav1.GetOptions{})
|
controlPlaneCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.KubeadmConfigConfigMap, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t2.Fatalf("Fail to query ConfigMap error = %v", err)
|
t2.Fatalf("Fail to query ConfigMap error = %v", err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package apiclient
|
package apiclient
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
@ -43,12 +44,12 @@ type ConfigMapMutator func(*v1.ConfigMap) error
|
|||||||
|
|
||||||
// CreateOrUpdateConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateConfigMap(client clientset.Interface, cm *v1.ConfigMap) error {
|
func CreateOrUpdateConfigMap(client clientset.Interface, cm *v1.ConfigMap) error {
|
||||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(cm); err != nil {
|
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create ConfigMap")
|
return errors.Wrap(err, "unable to create ConfigMap")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(cm); err != nil {
|
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), cm); err != nil {
|
||||||
return errors.Wrap(err, "unable to update ConfigMap")
|
return errors.Wrap(err, "unable to update ConfigMap")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -67,7 +68,7 @@ func CreateOrMutateConfigMap(client clientset.Interface, cm *v1.ConfigMap, mutat
|
|||||||
Factor: 1.0,
|
Factor: 1.0,
|
||||||
Jitter: 0.1,
|
Jitter: 0.1,
|
||||||
}, func() (bool, error) {
|
}, func() (bool, error) {
|
||||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(cm); err != nil {
|
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil {
|
||||||
lastError = err
|
lastError = err
|
||||||
if apierrors.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
lastError = MutateConfigMap(client, metav1.ObjectMeta{Namespace: cm.ObjectMeta.Namespace, Name: cm.ObjectMeta.Name}, mutator)
|
lastError = MutateConfigMap(client, metav1.ObjectMeta{Namespace: cm.ObjectMeta.Namespace, Name: cm.ObjectMeta.Name}, mutator)
|
||||||
@ -94,25 +95,25 @@ func MutateConfigMap(client clientset.Interface, meta metav1.ObjectMeta, mutator
|
|||||||
Factor: 1.0,
|
Factor: 1.0,
|
||||||
Jitter: 0.1,
|
Jitter: 0.1,
|
||||||
}, func() error {
|
}, func() error {
|
||||||
configMap, err := client.CoreV1().ConfigMaps(meta.Namespace).Get(meta.Name, metav1.GetOptions{})
|
configMap, err := client.CoreV1().ConfigMaps(meta.Namespace).Get(context.TODO(), meta.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = mutator(configMap); err != nil {
|
if err = mutator(configMap); err != nil {
|
||||||
return errors.Wrap(err, "unable to mutate ConfigMap")
|
return errors.Wrap(err, "unable to mutate ConfigMap")
|
||||||
}
|
}
|
||||||
_, err = client.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Update(configMap)
|
_, err = client.CoreV1().ConfigMaps(configMap.ObjectMeta.Namespace).Update(context.TODO(), configMap)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateOrRetainConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will retain the resource instead.
|
// CreateOrRetainConfigMap creates a ConfigMap if the target resource doesn't exist. If the resource exists already, this function will retain the resource instead.
|
||||||
func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, configMapName string) error {
|
func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, configMapName string) error {
|
||||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Get(configMapName, metav1.GetOptions{}); err != nil {
|
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}); err != nil {
|
||||||
if !apierrors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(cm); err != nil {
|
if _, err := client.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create ConfigMap")
|
return errors.Wrap(err, "unable to create ConfigMap")
|
||||||
}
|
}
|
||||||
@ -123,12 +124,12 @@ func CreateOrRetainConfigMap(client clientset.Interface, cm *v1.ConfigMap, confi
|
|||||||
|
|
||||||
// CreateOrUpdateSecret creates a Secret if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateSecret creates a Secret if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error {
|
func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error {
|
||||||
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(secret); err != nil {
|
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(context.TODO(), secret); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create secret")
|
return errors.Wrap(err, "unable to create secret")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(secret); err != nil {
|
if _, err := client.CoreV1().Secrets(secret.ObjectMeta.Namespace).Update(context.TODO(), secret); err != nil {
|
||||||
return errors.Wrap(err, "unable to update secret")
|
return errors.Wrap(err, "unable to update secret")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -137,7 +138,7 @@ func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error {
|
|||||||
|
|
||||||
// CreateOrUpdateServiceAccount creates a ServiceAccount if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateServiceAccount creates a ServiceAccount if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAccount) error {
|
func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAccount) error {
|
||||||
if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(sa); err != nil {
|
if _, err := client.CoreV1().ServiceAccounts(sa.ObjectMeta.Namespace).Create(context.TODO(), sa); err != nil {
|
||||||
// Note: We don't run .Update here afterwards as that's probably not required
|
// Note: We don't run .Update here afterwards as that's probably not required
|
||||||
// Only thing that could be updated is annotations/labels in .metadata, but we don't use that currently
|
// Only thing that could be updated is annotations/labels in .metadata, but we don't use that currently
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
@ -149,12 +150,12 @@ func CreateOrUpdateServiceAccount(client clientset.Interface, sa *v1.ServiceAcco
|
|||||||
|
|
||||||
// CreateOrUpdateDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deployment) error {
|
func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deployment) error {
|
||||||
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(deploy); err != nil {
|
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create deployment")
|
return errors.Wrap(err, "unable to create deployment")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Update(deploy); err != nil {
|
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Update(context.TODO(), deploy); err != nil {
|
||||||
return errors.Wrap(err, "unable to update deployment")
|
return errors.Wrap(err, "unable to update deployment")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -163,11 +164,11 @@ func CreateOrUpdateDeployment(client clientset.Interface, deploy *apps.Deploymen
|
|||||||
|
|
||||||
// CreateOrRetainDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will retain the resource instead.
|
// CreateOrRetainDeployment creates a Deployment if the target resource doesn't exist. If the resource exists already, this function will retain the resource instead.
|
||||||
func CreateOrRetainDeployment(client clientset.Interface, deploy *apps.Deployment, deployName string) error {
|
func CreateOrRetainDeployment(client clientset.Interface, deploy *apps.Deployment, deployName string) error {
|
||||||
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Get(deployName, metav1.GetOptions{}); err != nil {
|
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Get(context.TODO(), deployName, metav1.GetOptions{}); err != nil {
|
||||||
if !apierrors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(deploy); err != nil {
|
if _, err := client.AppsV1().Deployments(deploy.ObjectMeta.Namespace).Create(context.TODO(), deploy); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create deployment")
|
return errors.Wrap(err, "unable to create deployment")
|
||||||
}
|
}
|
||||||
@ -178,12 +179,12 @@ func CreateOrRetainDeployment(client clientset.Interface, deploy *apps.Deploymen
|
|||||||
|
|
||||||
// CreateOrUpdateDaemonSet creates a DaemonSet if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateDaemonSet creates a DaemonSet if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) error {
|
func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) error {
|
||||||
if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Create(ds); err != nil {
|
if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Create(context.TODO(), ds); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create daemonset")
|
return errors.Wrap(err, "unable to create daemonset")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Update(ds); err != nil {
|
if _, err := client.AppsV1().DaemonSets(ds.ObjectMeta.Namespace).Update(context.TODO(), ds); err != nil {
|
||||||
return errors.Wrap(err, "unable to update daemonset")
|
return errors.Wrap(err, "unable to update daemonset")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -196,7 +197,7 @@ func DeleteDaemonSetForeground(client clientset.Interface, namespace, name strin
|
|||||||
deleteOptions := &metav1.DeleteOptions{
|
deleteOptions := &metav1.DeleteOptions{
|
||||||
PropagationPolicy: &foregroundDelete,
|
PropagationPolicy: &foregroundDelete,
|
||||||
}
|
}
|
||||||
return client.AppsV1().DaemonSets(namespace).Delete(name, deleteOptions)
|
return client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, deleteOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted
|
// DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted
|
||||||
@ -205,17 +206,17 @@ func DeleteDeploymentForeground(client clientset.Interface, namespace, name stri
|
|||||||
deleteOptions := &metav1.DeleteOptions{
|
deleteOptions := &metav1.DeleteOptions{
|
||||||
PropagationPolicy: &foregroundDelete,
|
PropagationPolicy: &foregroundDelete,
|
||||||
}
|
}
|
||||||
return client.AppsV1().Deployments(namespace).Delete(name, deleteOptions)
|
return client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, deleteOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
|
func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
|
||||||
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(role); err != nil {
|
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(context.TODO(), role); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create RBAC role")
|
return errors.Wrap(err, "unable to create RBAC role")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(role); err != nil {
|
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(context.TODO(), role); err != nil {
|
||||||
return errors.Wrap(err, "unable to update RBAC role")
|
return errors.Wrap(err, "unable to update RBAC role")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -224,12 +225,12 @@ func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
|
|||||||
|
|
||||||
// CreateOrUpdateRoleBinding creates a RoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateRoleBinding creates a RoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.RoleBinding) error {
|
func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.RoleBinding) error {
|
||||||
if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(roleBinding); err != nil {
|
if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(context.TODO(), roleBinding); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create RBAC rolebinding")
|
return errors.Wrap(err, "unable to create RBAC rolebinding")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(roleBinding); err != nil {
|
if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(context.TODO(), roleBinding); err != nil {
|
||||||
return errors.Wrap(err, "unable to update RBAC rolebinding")
|
return errors.Wrap(err, "unable to update RBAC rolebinding")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -238,12 +239,12 @@ func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.Rol
|
|||||||
|
|
||||||
// CreateOrUpdateClusterRole creates a ClusterRole if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateClusterRole creates a ClusterRole if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.ClusterRole) error {
|
func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.ClusterRole) error {
|
||||||
if _, err := client.RbacV1().ClusterRoles().Create(clusterRole); err != nil {
|
if _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create RBAC clusterrole")
|
return errors.Wrap(err, "unable to create RBAC clusterrole")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.RbacV1().ClusterRoles().Update(clusterRole); err != nil {
|
if _, err := client.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole); err != nil {
|
||||||
return errors.Wrap(err, "unable to update RBAC clusterrole")
|
return errors.Wrap(err, "unable to update RBAC clusterrole")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -252,12 +253,12 @@ func CreateOrUpdateClusterRole(client clientset.Interface, clusterRole *rbac.Clu
|
|||||||
|
|
||||||
// CreateOrUpdateClusterRoleBinding creates a ClusterRoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
// CreateOrUpdateClusterRoleBinding creates a ClusterRoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||||
func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error {
|
func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBinding *rbac.ClusterRoleBinding) error {
|
||||||
if _, err := client.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding); err != nil {
|
if _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding); err != nil {
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
return errors.Wrap(err, "unable to create RBAC clusterrolebinding")
|
return errors.Wrap(err, "unable to create RBAC clusterrolebinding")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.RbacV1().ClusterRoleBindings().Update(clusterRoleBinding); err != nil {
|
if _, err := client.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterRoleBinding); err != nil {
|
||||||
return errors.Wrap(err, "unable to update RBAC clusterrolebinding")
|
return errors.Wrap(err, "unable to update RBAC clusterrolebinding")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -271,7 +272,7 @@ func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBin
|
|||||||
func PatchNodeOnce(client clientset.Interface, nodeName string, patchFn func(*v1.Node)) func() (bool, error) {
|
func PatchNodeOnce(client clientset.Interface, nodeName string, patchFn func(*v1.Node)) func() (bool, error) {
|
||||||
return func() (bool, error) {
|
return func() (bool, error) {
|
||||||
// First get the node object
|
// First get the node object
|
||||||
n, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
n, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO this should only be for timeouts
|
// TODO this should only be for timeouts
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -301,7 +302,7 @@ func PatchNodeOnce(client clientset.Interface, nodeName string, patchFn func(*v1
|
|||||||
return false, errors.Wrap(err, "failed to create two way merge patch")
|
return false, errors.Wrap(err, "failed to create two way merge patch")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.CoreV1().Nodes().Patch(n.Name, types.StrategicMergePatchType, patchBytes); err != nil {
|
if _, err := client.CoreV1().Nodes().Patch(context.TODO(), n.Name, types.StrategicMergePatchType, patchBytes); err != nil {
|
||||||
// TODO also check for timeouts
|
// TODO also check for timeouts
|
||||||
if apierrors.IsConflict(err) {
|
if apierrors.IsConflict(err) {
|
||||||
fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)")
|
fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)")
|
||||||
@ -332,7 +333,7 @@ func GetConfigMapWithRetry(client clientset.Interface, namespace, name string) (
|
|||||||
var lastError error
|
var lastError error
|
||||||
err := wait.ExponentialBackoff(clientsetretry.DefaultBackoff, func() (bool, error) {
|
err := wait.ExponentialBackoff(clientsetretry.DefaultBackoff, func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
cm, err = client.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
|
cm, err = client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package apiclient
|
package apiclient
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -68,7 +69,7 @@ func TestPatchNodeNonErrorCases(t *testing.T) {
|
|||||||
for _, tc := range testcases {
|
for _, tc := range testcases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
_, err := client.CoreV1().Nodes().Create(&tc.node)
|
_, err := client.CoreV1().Nodes().Create(context.TODO(), &tc.node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create node to fake client: %v", err)
|
t.Fatalf("failed to create node to fake client: %v", err)
|
||||||
}
|
}
|
||||||
@ -105,7 +106,7 @@ func TestCreateOrMutateConfigMap(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating ConfigMap: %v", err)
|
t.Fatalf("error creating ConfigMap: %v", err)
|
||||||
}
|
}
|
||||||
_, err = client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{})
|
_, err = client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), configMapName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error retrieving ConfigMap: %v", err)
|
t.Fatalf("error retrieving ConfigMap: %v", err)
|
||||||
}
|
}
|
||||||
@ -113,7 +114,7 @@ func TestCreateOrMutateConfigMap(t *testing.T) {
|
|||||||
|
|
||||||
func createClientAndConfigMap(t *testing.T) *fake.Clientset {
|
func createClientAndConfigMap(t *testing.T) *fake.Clientset {
|
||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
_, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(&v1.ConfigMap{
|
_, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &v1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: configMapName,
|
Name: configMapName,
|
||||||
Namespace: metav1.NamespaceSystem,
|
Namespace: metav1.NamespaceSystem,
|
||||||
@ -142,7 +143,7 @@ func TestMutateConfigMap(t *testing.T) {
|
|||||||
t.Fatalf("error mutating regular ConfigMap: %v", err)
|
t.Fatalf("error mutating regular ConfigMap: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cm, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{})
|
cm, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), configMapName, metav1.GetOptions{})
|
||||||
if cm.Data["key"] != "some-other-value" {
|
if cm.Data["key"] != "some-other-value" {
|
||||||
t.Fatalf("ConfigMap mutation was invalid, has: %q", cm.Data["key"])
|
t.Fatalf("ConfigMap mutation was invalid, has: %q", cm.Data["key"])
|
||||||
}
|
}
|
||||||
@ -174,7 +175,7 @@ func TestMutateConfigMapWithConflict(t *testing.T) {
|
|||||||
t.Fatalf("error mutating conflicting ConfigMap: %v", err)
|
t.Fatalf("error mutating conflicting ConfigMap: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cm, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{})
|
cm, _ := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), configMapName, metav1.GetOptions{})
|
||||||
if cm.Data["key"] != "some-other-value" {
|
if cm.Data["key"] != "some-other-value" {
|
||||||
t.Fatalf("ConfigMap mutation with conflict was invalid, has: %q", cm.Data["key"])
|
t.Fatalf("ConfigMap mutation with conflict was invalid, has: %q", cm.Data["key"])
|
||||||
}
|
}
|
||||||
|
@ -95,7 +95,7 @@ func (w *KubeWaiter) WaitForPodsWithLabel(kvLabel string) error {
|
|||||||
lastKnownPodNumber := -1
|
lastKnownPodNumber := -1
|
||||||
return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||||
listOpts := metav1.ListOptions{LabelSelector: kvLabel}
|
listOpts := metav1.ListOptions{LabelSelector: kvLabel}
|
||||||
pods, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
|
pods, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(w.writer, "[apiclient] Error getting Pods with label selector %q [%v]\n", kvLabel, err)
|
fmt.Fprintf(w.writer, "[apiclient] Error getting Pods with label selector %q [%v]\n", kvLabel, err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -123,7 +123,7 @@ func (w *KubeWaiter) WaitForPodsWithLabel(kvLabel string) error {
|
|||||||
// WaitForPodToDisappear blocks until it timeouts or gets a "NotFound" response from the API Server when getting the Static Pod in question
|
// WaitForPodToDisappear blocks until it timeouts or gets a "NotFound" response from the API Server when getting the Static Pod in question
|
||||||
func (w *KubeWaiter) WaitForPodToDisappear(podName string) error {
|
func (w *KubeWaiter) WaitForPodToDisappear(podName string) error {
|
||||||
return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
return wait.PollImmediate(kubeadmconstants.APICallRetryInterval, w.timeout, func() (bool, error) {
|
||||||
_, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).Get(podName, metav1.GetOptions{})
|
_, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
fmt.Printf("[apiclient] The old Pod %q is now removed (which is desired)\n", podName)
|
fmt.Printf("[apiclient] The old Pod %q is now removed (which is desired)\n", podName)
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -241,7 +241,7 @@ func (w *KubeWaiter) WaitForStaticPodHashChange(nodeName, component, previousHas
|
|||||||
func getStaticPodSingleHash(client clientset.Interface, nodeName string, component string) (string, error) {
|
func getStaticPodSingleHash(client clientset.Interface, nodeName string, component string) (string, error) {
|
||||||
|
|
||||||
staticPodName := fmt.Sprintf("%s-%s", component, nodeName)
|
staticPodName := fmt.Sprintf("%s-%s", component, nodeName)
|
||||||
staticPod, err := client.CoreV1().Pods(metav1.NamespaceSystem).Get(staticPodName, metav1.GetOptions{})
|
staticPod, err := client.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), staticPodName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -115,7 +116,7 @@ func getNodeRegistration(kubeconfigDir string, client clientset.Interface, nodeR
|
|||||||
}
|
}
|
||||||
|
|
||||||
// gets the corresponding node and retrieves attributes stored there.
|
// gets the corresponding node and retrieves attributes stored there.
|
||||||
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to get corresponding node")
|
return errors.Wrap(err, "failed to get corresponding node")
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -342,7 +343,7 @@ func TestGetNodeRegistration(t *testing.T) {
|
|||||||
client := clientsetfake.NewSimpleClientset()
|
client := clientsetfake.NewSimpleClientset()
|
||||||
|
|
||||||
if rt.node != nil {
|
if rt.node != nil {
|
||||||
_, err := client.CoreV1().Nodes().Create(rt.node)
|
_, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("couldn't create Node")
|
t.Errorf("couldn't create Node")
|
||||||
return
|
return
|
||||||
@ -618,7 +619,7 @@ func TestGetInitConfigurationFromCluster(t *testing.T) {
|
|||||||
client := clientsetfake.NewSimpleClientset()
|
client := clientsetfake.NewSimpleClientset()
|
||||||
|
|
||||||
if rt.node != nil {
|
if rt.node != nil {
|
||||||
_, err := client.CoreV1().Nodes().Create(rt.node)
|
_, err := client.CoreV1().Nodes().Create(context.TODO(), rt.node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("couldn't create Node")
|
t.Errorf("couldn't create Node")
|
||||||
return
|
return
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package tests
|
package tests
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
@ -37,7 +38,7 @@ func TestFakeClientSetFiltering(t *testing.T) {
|
|||||||
testSA("nsB", "sa-3"),
|
testSA("nsB", "sa-3"),
|
||||||
)
|
)
|
||||||
|
|
||||||
saList1, err := tc.CoreV1().ServiceAccounts("nsA").List(metav1.ListOptions{})
|
saList1, err := tc.CoreV1().ServiceAccounts("nsA").List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("ServiceAccounts.List: %s", err)
|
t.Fatalf("ServiceAccounts.List: %s", err)
|
||||||
}
|
}
|
||||||
@ -50,7 +51,7 @@ func TestFakeClientSetFiltering(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
saList2, err := tc.CoreV1().ServiceAccounts("nsB").List(metav1.ListOptions{})
|
saList2, err := tc.CoreV1().ServiceAccounts("nsB").List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("ServiceAccounts.List: %s", err)
|
t.Fatalf("ServiceAccounts.List: %s", err)
|
||||||
}
|
}
|
||||||
@ -63,7 +64,7 @@ func TestFakeClientSetFiltering(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pod1, err := tc.CoreV1().Pods("nsA").Get("pod-1", metav1.GetOptions{})
|
pod1, err := tc.CoreV1().Pods("nsA").Get(context.TODO(), "pod-1", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Pods.Get: %s", err)
|
t.Fatalf("Pods.Get: %s", err)
|
||||||
}
|
}
|
||||||
@ -74,12 +75,12 @@ func TestFakeClientSetFiltering(t *testing.T) {
|
|||||||
t.Fatalf("Expected to find pod nsA/pod-1t, got %s/%s", pod1.Namespace, pod1.Name)
|
t.Fatalf("Expected to find pod nsA/pod-1t, got %s/%s", pod1.Namespace, pod1.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
wrongPod, err := tc.CoreV1().Pods("nsB").Get("pod-1", metav1.GetOptions{})
|
wrongPod, err := tc.CoreV1().Pods("nsB").Get(context.TODO(), "pod-1", metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Pods.Get: expected nsB/pod-1 not to match, but it matched %s/%s", wrongPod.Namespace, wrongPod.Name)
|
t.Fatalf("Pods.Get: expected nsB/pod-1 not to match, but it matched %s/%s", wrongPod.Namespace, wrongPod.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
allPods, err := tc.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
allPods, err := tc.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Pods.List: %s", err)
|
t.Fatalf("Pods.List: %s", err)
|
||||||
}
|
}
|
||||||
@ -87,7 +88,7 @@ func TestFakeClientSetFiltering(t *testing.T) {
|
|||||||
t.Fatalf("Expected %d pods to match, got %d", expected, actual)
|
t.Fatalf("Expected %d pods to match, got %d", expected, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
allSAs, err := tc.CoreV1().ServiceAccounts(metav1.NamespaceAll).List(metav1.ListOptions{})
|
allSAs, err := tc.CoreV1().ServiceAccounts(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("ServiceAccounts.List: %s", err)
|
t.Fatalf("ServiceAccounts.List: %s", err)
|
||||||
}
|
}
|
||||||
@ -102,12 +103,12 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) {
|
|||||||
testPod("nsA", "pod-1"),
|
testPod("nsA", "pod-1"),
|
||||||
)
|
)
|
||||||
|
|
||||||
_, err := tc.CoreV1().Namespaces().Create(testNamespace("nsB"))
|
_, err := tc.CoreV1().Namespaces().Create(context.TODO(), testNamespace("nsB"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Namespaces.Create: %s", err)
|
t.Fatalf("Namespaces.Create: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
allNS, err := tc.CoreV1().Namespaces().List(metav1.ListOptions{})
|
allNS, err := tc.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Namespaces.List: %s", err)
|
t.Fatalf("Namespaces.List: %s", err)
|
||||||
}
|
}
|
||||||
@ -115,12 +116,12 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) {
|
|||||||
t.Fatalf("Expected %d namespaces to match, got %d", expected, actual)
|
t.Fatalf("Expected %d namespaces to match, got %d", expected, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tc.CoreV1().Pods("nsB").Create(testPod("", "pod-1"))
|
_, err = tc.CoreV1().Pods("nsB").Create(context.TODO(), testPod("", "pod-1"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Pods.Create nsB/pod-1: %s", err)
|
t.Fatalf("Pods.Create nsB/pod-1: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
podB1, err := tc.CoreV1().Pods("nsB").Get("pod-1", metav1.GetOptions{})
|
podB1, err := tc.CoreV1().Pods("nsB").Get(context.TODO(), "pod-1", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Pods.Get nsB/pod-1: %s", err)
|
t.Fatalf("Pods.Get nsB/pod-1: %s", err)
|
||||||
}
|
}
|
||||||
@ -131,17 +132,17 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) {
|
|||||||
t.Fatalf("Expected to find pod nsB/pod-1t, got %s/%s", podB1.Namespace, podB1.Name)
|
t.Fatalf("Expected to find pod nsB/pod-1t, got %s/%s", podB1.Namespace, podB1.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tc.CoreV1().Pods("nsA").Create(testPod("", "pod-1"))
|
_, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("", "pod-1"))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected Pods.Create to fail with already exists error")
|
t.Fatalf("Expected Pods.Create to fail with already exists error")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tc.CoreV1().Pods("nsA").Update(testPod("", "pod-1"))
|
_, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-1"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Pods.Update nsA/pod-1: %s", err)
|
t.Fatalf("Pods.Update nsA/pod-1: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tc.CoreV1().Pods("nsA").Create(testPod("nsB", "pod-2"))
|
_, err = tc.CoreV1().Pods("nsA").Create(context.TODO(), testPod("nsB", "pod-2"))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected Pods.Create to fail with bad request from namespace mismtach")
|
t.Fatalf("Expected Pods.Create to fail with bad request from namespace mismtach")
|
||||||
}
|
}
|
||||||
@ -149,7 +150,7 @@ func TestFakeClientsetInheritsNamespace(t *testing.T) {
|
|||||||
t.Fatalf("Expected Pods.Create error to provide object and request namespaces, got %q", err)
|
t.Fatalf("Expected Pods.Create error to provide object and request namespaces, got %q", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tc.CoreV1().Pods("nsA").Update(testPod("", "pod-3"))
|
_, err = tc.CoreV1().Pods("nsA").Update(context.TODO(), testPod("", "pod-3"))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected Pods.Update nsA/pod-3 to fail with not found error")
|
t.Fatalf("Expected Pods.Update nsA/pod-3 to fail with not found error")
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package bootstrap
|
package bootstrap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -242,7 +243,7 @@ func (e *Signer) signConfigMap() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *Signer) updateConfigMap(cm *v1.ConfigMap) {
|
func (e *Signer) updateConfigMap(cm *v1.ConfigMap) {
|
||||||
_, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(cm)
|
_, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(context.TODO(), cm)
|
||||||
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
|
||||||
klog.V(3).Infof("Error updating ConfigMap: %v", err)
|
klog.V(3).Infof("Error updating ConfigMap: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package bootstrap
|
package bootstrap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -195,7 +196,7 @@ func (tc *TokenCleaner) evalSecret(o interface{}) {
|
|||||||
if len(secret.UID) > 0 {
|
if len(secret.UID) > 0 {
|
||||||
options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}}
|
options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}}
|
||||||
}
|
}
|
||||||
err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(secret.Name, options)
|
err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, options)
|
||||||
// NotFound isn't a real error (it's already been deleted)
|
// NotFound isn't a real error (it's already been deleted)
|
||||||
// Conflict isn't a real error (the UID precondition failed)
|
// Conflict isn't a real error (the UID precondition failed)
|
||||||
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
|
||||||
|
@ -18,6 +18,7 @@ limitations under the License.
|
|||||||
package approver
|
package approver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -129,7 +130,7 @@ func (a *sarApprover) authorize(csr *capi.CertificateSigningRequest, rattrs auth
|
|||||||
ResourceAttributes: &rattrs,
|
ResourceAttributes: &rattrs,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
sar, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(sar)
|
sar, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ limitations under the License.
|
|||||||
package cleaner
|
package cleaner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -108,7 +109,7 @@ func (ccc *CSRCleanerController) handle(csr *capi.CertificateSigningRequest) err
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if isIssuedPastDeadline(csr) || isDeniedPastDeadline(csr) || isPendingPastDeadline(csr) || isIssuedExpired {
|
if isIssuedPastDeadline(csr) || isDeniedPastDeadline(csr) || isPendingPastDeadline(csr) || isIssuedExpired {
|
||||||
if err := ccc.csrClient.Delete(csr.Name, nil); err != nil {
|
if err := ccc.csrClient.Delete(context.TODO(), csr.Name, nil); err != nil {
|
||||||
return fmt.Errorf("unable to delete CSR %q: %v", csr.Name, err)
|
return fmt.Errorf("unable to delete CSR %q: %v", csr.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package rootcacertpublisher
|
package rootcacertpublisher
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
@ -178,7 +179,7 @@ func (c *Publisher) syncNamespace(ns string) error {
|
|||||||
cm, err := c.cmLister.ConfigMaps(ns).Get(RootCACertConfigMapName)
|
cm, err := c.cmLister.ConfigMaps(ns).Get(RootCACertConfigMapName)
|
||||||
switch {
|
switch {
|
||||||
case apierrors.IsNotFound(err):
|
case apierrors.IsNotFound(err):
|
||||||
_, err := c.client.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
|
_, err := c.client.CoreV1().ConfigMaps(ns).Create(context.TODO(), &v1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: RootCACertConfigMapName,
|
Name: RootCACertConfigMapName,
|
||||||
},
|
},
|
||||||
@ -201,7 +202,7 @@ func (c *Publisher) syncNamespace(ns string) error {
|
|||||||
|
|
||||||
cm.Data = data
|
cm.Data = data
|
||||||
|
|
||||||
_, err = c.client.CoreV1().ConfigMaps(ns).Update(cm)
|
_, err = c.client.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ limitations under the License.
|
|||||||
package signer
|
package signer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
@ -94,7 +95,7 @@ func (s *signer) handle(csr *capi.CertificateSigningRequest) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error auto signing csr: %v", err)
|
return fmt.Errorf("error auto signing csr: %v", err)
|
||||||
}
|
}
|
||||||
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(csr)
|
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(context.TODO(), csr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error updating signature for csr: %v", err)
|
return fmt.Errorf("error updating signature for csr: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -120,11 +120,11 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro
|
|||||||
lw := &cache.ListWatch{
|
lw := &cache.ListWatch{
|
||||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||||
options.FieldSelector = fieldSelector
|
options.FieldSelector = fieldSelector
|
||||||
return b.CoreClient.Secrets(b.Namespace).List(options)
|
return b.CoreClient.Secrets(b.Namespace).List(context.TODO(), options)
|
||||||
},
|
},
|
||||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||||
options.FieldSelector = fieldSelector
|
options.FieldSelector = fieldSelector
|
||||||
return b.CoreClient.Secrets(b.Namespace).Watch(options)
|
return b.CoreClient.Secrets(b.Namespace).Watch(context.TODO(), options)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
@ -157,7 +157,7 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro
|
|||||||
if !valid {
|
if !valid {
|
||||||
klog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Namespace, sa.Name)
|
klog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Namespace, sa.Name)
|
||||||
// try to delete the secret containing the invalid token
|
// try to delete the secret containing the invalid token
|
||||||
if err := b.CoreClient.Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
|
if err := b.CoreClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
|
||||||
klog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Namespace, sa.Name, err)
|
klog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Namespace, sa.Name, err)
|
||||||
}
|
}
|
||||||
// continue watching for good tokens
|
// continue watching for good tokens
|
||||||
@ -186,7 +186,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount,
|
|||||||
|
|
||||||
// Try token review first
|
// Try token review first
|
||||||
tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}}
|
tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}}
|
||||||
if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(tokenReview); err == nil {
|
if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(context.TODO(), tokenReview); err == nil {
|
||||||
if !tokenResult.Status.Authenticated {
|
if !tokenResult.Status.Authenticated {
|
||||||
klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Namespace, sa.Name)
|
klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Namespace, sa.Name)
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package controller
|
package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
@ -174,7 +175,7 @@ func (ts *tokenSourceImpl) Token() (*oauth2.Token, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
tr, inErr := ts.coreClient.ServiceAccounts(ts.namespace).CreateToken(ts.serviceAccountName, &v1authenticationapi.TokenRequest{
|
tr, inErr := ts.coreClient.ServiceAccounts(ts.namespace).CreateToken(context.TODO(), ts.serviceAccountName, &v1authenticationapi.TokenRequest{
|
||||||
Spec: v1authenticationapi.TokenRequestSpec{
|
Spec: v1authenticationapi.TokenRequestSpec{
|
||||||
ExpirationSeconds: utilpointer.Int64Ptr(ts.expirationSeconds),
|
ExpirationSeconds: utilpointer.Int64Ptr(ts.expirationSeconds),
|
||||||
},
|
},
|
||||||
|
@ -154,7 +154,7 @@ func (cnc *CloudNodeController) UpdateNodeStatus(ctx context.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
|
nodes, err := cnc.kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Error monitoring node status: %v", err)
|
klog.Errorf("Error monitoring node status: %v", err)
|
||||||
return
|
return
|
||||||
@ -352,7 +352,7 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
|
curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("failed to get node %s: %v", node.Name, err))
|
utilruntime.HandleError(fmt.Errorf("failed to get node %s: %v", node.Name, err))
|
||||||
return
|
return
|
||||||
@ -376,7 +376,7 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod
|
|||||||
})
|
})
|
||||||
|
|
||||||
err = clientretry.RetryOnConflict(UpdateNodeSpecBackoff, func() error {
|
err = clientretry.RetryOnConflict(UpdateNodeSpecBackoff, func() error {
|
||||||
curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
|
curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -385,7 +385,7 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod
|
|||||||
modify(curNode)
|
modify(curNode)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = cnc.kubeClient.CoreV1().Nodes().Update(curNode)
|
_, err = cnc.kubeClient.CoreV1().Nodes().Update(context.TODO(), curNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -773,7 +773,7 @@ func Test_reconcileNodeLabels(t *testing.T) {
|
|||||||
t.Errorf("unexpected error")
|
t.Errorf("unexpected error")
|
||||||
}
|
}
|
||||||
|
|
||||||
actualNode, err := clientset.CoreV1().Nodes().Get("node01", metav1.GetOptions{})
|
actualNode, err := clientset.CoreV1().Nodes().Get(context.TODO(), "node01", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error getting updated node: %v", err)
|
t.Fatalf("error getting updated node: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes() {
|
|||||||
fmt.Sprintf("Deleting node %v because it does not exist in the cloud provider", node.Name),
|
fmt.Sprintf("Deleting node %v because it does not exist in the cloud provider", node.Name),
|
||||||
"Node %s event: %s", node.Name, deleteNodeEvent)
|
"Node %s event: %s", node.Name, deleteNodeEvent)
|
||||||
|
|
||||||
if err := c.kubeClient.CoreV1().Nodes().Delete(node.Name, nil); err != nil {
|
if err := c.kubeClient.CoreV1().Nodes().Delete(context.TODO(), node.Name, nil); err != nil {
|
||||||
klog.Errorf("unable to delete node %q: %v", node.Name, err)
|
klog.Errorf("unable to delete node %q: %v", node.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package clusterroleaggregation
|
package clusterroleaggregation
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
@ -126,7 +127,7 @@ func (c *ClusterRoleAggregationController) syncClusterRole(key string) error {
|
|||||||
for _, rule := range newPolicyRules {
|
for _, rule := range newPolicyRules {
|
||||||
clusterRole.Rules = append(clusterRole.Rules, *rule.DeepCopy())
|
clusterRole.Rules = append(clusterRole.Rules, *rule.DeepCopy())
|
||||||
}
|
}
|
||||||
_, err = c.clusterRoleClient.ClusterRoles().Update(clusterRole)
|
_, err = c.clusterRoleClient.ClusterRoles().Update(context.TODO(), clusterRole)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package controller
|
package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -419,7 +420,7 @@ type RealRSControl struct {
|
|||||||
var _ RSControlInterface = &RealRSControl{}
|
var _ RSControlInterface = &RealRSControl{}
|
||||||
|
|
||||||
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
|
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
|
||||||
_, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data)
|
_, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -439,7 +440,7 @@ type RealControllerRevisionControl struct {
|
|||||||
var _ ControllerRevisionControlInterface = &RealControllerRevisionControl{}
|
var _ ControllerRevisionControlInterface = &RealControllerRevisionControl{}
|
||||||
|
|
||||||
func (r RealControllerRevisionControl) PatchControllerRevision(namespace, name string, data []byte) error {
|
func (r RealControllerRevisionControl) PatchControllerRevision(namespace, name string, data []byte) error {
|
||||||
_, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(name, types.StrategicMergePatchType, data)
|
_, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -536,7 +537,7 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
|
func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
|
||||||
_, err := r.KubeClient.CoreV1().Pods(namespace).Patch(name, types.StrategicMergePatchType, data)
|
_, err := r.KubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -576,7 +577,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT
|
|||||||
if len(labels.Set(pod.Labels)) == 0 {
|
if len(labels.Set(pod.Labels)) == 0 {
|
||||||
return fmt.Errorf("unable to create pods, no labels")
|
return fmt.Errorf("unable to create pods, no labels")
|
||||||
}
|
}
|
||||||
newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(pod)
|
newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// only send an event if the namespace isn't terminating
|
// only send an event if the namespace isn't terminating
|
||||||
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||||
@ -601,7 +602,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
|
|||||||
return fmt.Errorf("object does not have ObjectMeta, %v", err)
|
return fmt.Errorf("object does not have ObjectMeta, %v", err)
|
||||||
}
|
}
|
||||||
klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID)
|
klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID)
|
||||||
if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil && !apierrors.IsNotFound(err) {
|
if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, nil); err != nil && !apierrors.IsNotFound(err) {
|
||||||
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
|
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
|
||||||
return fmt.Errorf("unable to delete pods: %v", err)
|
return fmt.Errorf("unable to delete pods: %v", err)
|
||||||
}
|
}
|
||||||
@ -1013,10 +1014,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
|
|||||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||||
// we get it from etcd to be sure to have fresh data.
|
// we get it from etcd to be sure to have fresh data.
|
||||||
if firstTry {
|
if firstTry {
|
||||||
oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||||
firstTry = false
|
firstTry = false
|
||||||
} else {
|
} else {
|
||||||
oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1070,10 +1071,10 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t
|
|||||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||||
// we get it from etcd to be sure to have fresh data.
|
// we get it from etcd to be sure to have fresh data.
|
||||||
if firstTry {
|
if firstTry {
|
||||||
oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||||
firstTry = false
|
firstTry = false
|
||||||
} else {
|
} else {
|
||||||
oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1118,7 +1119,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
|
|||||||
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
|
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes)
|
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1147,10 +1148,10 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la
|
|||||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||||
// we get it from etcd to be sure to have fresh data.
|
// we get it from etcd to be sure to have fresh data.
|
||||||
if firstTry {
|
if firstTry {
|
||||||
node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||||
firstTry = false
|
firstTry = false
|
||||||
} else {
|
} else {
|
||||||
node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1177,7 +1178,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create a two-way merge patch: %v", err)
|
return fmt.Errorf("failed to create a two-way merge patch: %v", err)
|
||||||
}
|
}
|
||||||
if _, err := kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil {
|
if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes); err != nil {
|
||||||
return fmt.Errorf("failed to patch the node: %v", err)
|
return fmt.Errorf("failed to patch the node: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -1185,7 +1186,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, name string) (*v1.ServiceAccount, error) {
|
func getOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, name string) (*v1.ServiceAccount, error) {
|
||||||
sa, err := coreClient.ServiceAccounts(namespace).Get(name, metav1.GetOptions{})
|
sa, err := coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return sa, nil
|
return sa, nil
|
||||||
}
|
}
|
||||||
@ -1195,17 +1196,17 @@ func getOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, nam
|
|||||||
|
|
||||||
// Create the namespace if we can't verify it exists.
|
// Create the namespace if we can't verify it exists.
|
||||||
// Tolerate errors, since we don't know whether this component has namespace creation permissions.
|
// Tolerate errors, since we don't know whether this component has namespace creation permissions.
|
||||||
if _, err := coreClient.Namespaces().Get(namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) {
|
if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) {
|
||||||
if _, err = coreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}); err != nil && !apierrors.IsAlreadyExists(err) {
|
if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
klog.Warningf("create non-exist namespace %s failed:%v", namespace, err)
|
klog.Warningf("create non-exist namespace %s failed:%v", namespace, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the service account
|
// Create the service account
|
||||||
sa, err = coreClient.ServiceAccounts(namespace).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}})
|
sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}})
|
||||||
if apierrors.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
// If we're racing to init and someone else already created it, re-fetch
|
// If we're racing to init and someone else already created it, re-fetch
|
||||||
return coreClient.ServiceAccounts(namespace).Get(name, metav1.GetOptions{})
|
return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
return sa, err
|
return sa, err
|
||||||
}
|
}
|
||||||
|
@ -108,7 +108,7 @@ func (jm *Controller) syncAll() {
|
|||||||
// we must also see that the parent CronJob has non-nil DeletionTimestamp (see #42639).
|
// we must also see that the parent CronJob has non-nil DeletionTimestamp (see #42639).
|
||||||
// Note that this only works because we are NOT using any caches here.
|
// Note that this only works because we are NOT using any caches here.
|
||||||
jobListFunc := func(opts metav1.ListOptions) (runtime.Object, error) {
|
jobListFunc := func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||||
return jm.kubeClient.BatchV1().Jobs(metav1.NamespaceAll).List(opts)
|
return jm.kubeClient.BatchV1().Jobs(metav1.NamespaceAll).List(context.TODO(), opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
js := make([]batchv1.Job, 0)
|
js := make([]batchv1.Job, 0)
|
||||||
@ -128,7 +128,7 @@ func (jm *Controller) syncAll() {
|
|||||||
|
|
||||||
klog.V(4).Infof("Found %d jobs", len(js))
|
klog.V(4).Infof("Found %d jobs", len(js))
|
||||||
cronJobListFunc := func(opts metav1.ListOptions) (runtime.Object, error) {
|
cronJobListFunc := func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||||
return jm.kubeClient.BatchV1beta1().CronJobs(metav1.NamespaceAll).List(opts)
|
return jm.kubeClient.BatchV1beta1().CronJobs(metav1.NamespaceAll).List(context.TODO(), opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
jobsBySj := groupJobsByParent(js)
|
jobsBySj := groupJobsByParent(js)
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package cronjob
|
package cronjob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -44,7 +45,7 @@ type realSJControl struct {
|
|||||||
var _ sjControlInterface = &realSJControl{}
|
var _ sjControlInterface = &realSJControl{}
|
||||||
|
|
||||||
func (c *realSJControl) UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
|
func (c *realSJControl) UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
|
||||||
return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(sj)
|
return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(context.TODO(), sj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fakeSJControl is the default implementation of sjControlInterface.
|
// fakeSJControl is the default implementation of sjControlInterface.
|
||||||
@ -102,24 +103,24 @@ func copyAnnotations(template *batchv1beta1.JobTemplateSpec) labels.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r realJobControl) GetJob(namespace, name string) (*batchv1.Job, error) {
|
func (r realJobControl) GetJob(namespace, name string) (*batchv1.Job, error) {
|
||||||
return r.KubeClient.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{})
|
return r.KubeClient.BatchV1().Jobs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r realJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
func (r realJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||||
return r.KubeClient.BatchV1().Jobs(namespace).Update(job)
|
return r.KubeClient.BatchV1().Jobs(namespace).Update(context.TODO(), job)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r realJobControl) PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error) {
|
func (r realJobControl) PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error) {
|
||||||
return r.KubeClient.BatchV1().Jobs(namespace).Patch(name, pt, data, subresources...)
|
return r.KubeClient.BatchV1().Jobs(namespace).Patch(context.TODO(), name, pt, data, subresources...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||||
return r.KubeClient.BatchV1().Jobs(namespace).Create(job)
|
return r.KubeClient.BatchV1().Jobs(namespace).Create(context.TODO(), job)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r realJobControl) DeleteJob(namespace string, name string) error {
|
func (r realJobControl) DeleteJob(namespace string, name string) error {
|
||||||
background := metav1.DeletePropagationBackground
|
background := metav1.DeletePropagationBackground
|
||||||
return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, &metav1.DeleteOptions{PropagationPolicy: &background})
|
return r.KubeClient.BatchV1().Jobs(namespace).Delete(context.TODO(), name, &metav1.DeleteOptions{PropagationPolicy: &background})
|
||||||
}
|
}
|
||||||
|
|
||||||
type fakeJobControl struct {
|
type fakeJobControl struct {
|
||||||
@ -217,11 +218,11 @@ type realPodControl struct {
|
|||||||
var _ podControlInterface = &realPodControl{}
|
var _ podControlInterface = &realPodControl{}
|
||||||
|
|
||||||
func (r realPodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error) {
|
func (r realPodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error) {
|
||||||
return r.KubeClient.CoreV1().Pods(namespace).List(opts)
|
return r.KubeClient.CoreV1().Pods(namespace).List(context.TODO(), opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r realPodControl) DeletePod(namespace string, name string) error {
|
func (r realPodControl) DeletePod(namespace string, name string) error {
|
||||||
return r.KubeClient.CoreV1().Pods(namespace).Delete(name, nil)
|
return r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
type fakePodControl struct {
|
type fakePodControl struct {
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package daemon
|
package daemon
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
@ -718,7 +719,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, e
|
|||||||
// If any adoptions are attempted, we should first recheck for deletion with
|
// If any adoptions are attempted, we should first recheck for deletion with
|
||||||
// an uncached quorum read sometime after listing Pods (see #42639).
|
// an uncached quorum read sometime after listing Pods (see #42639).
|
||||||
dsNotDeleted := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
dsNotDeleted := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||||
fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1031,12 +1032,12 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
|
|||||||
toUpdate.Status.NumberAvailable = int32(numberAvailable)
|
toUpdate.Status.NumberAvailable = int32(numberAvailable)
|
||||||
toUpdate.Status.NumberUnavailable = int32(numberUnavailable)
|
toUpdate.Status.NumberUnavailable = int32(numberUnavailable)
|
||||||
|
|
||||||
if _, updateErr = dsClient.UpdateStatus(toUpdate); updateErr == nil {
|
if _, updateErr = dsClient.UpdateStatus(context.TODO(), toUpdate); updateErr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the set with the latest resource version for the next poll
|
// Update the set with the latest resource version for the next poll
|
||||||
if toUpdate, getErr = dsClient.Get(ds.Name, metav1.GetOptions{}); getErr != nil {
|
if toUpdate, getErr = dsClient.Get(context.TODO(), ds.Name, metav1.GetOptions{}); getErr != nil {
|
||||||
// If the GET fails we can't trust status.Replicas anymore. This error
|
// If the GET fails we can't trust status.Replicas anymore. This error
|
||||||
// is bound to be more interesting than the update failure.
|
// is bound to be more interesting than the update failure.
|
||||||
return getErr
|
return getErr
|
||||||
|
@ -18,6 +18,7 @@ package daemon
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
@ -94,7 +95,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
|
|||||||
if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
|
if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
|
||||||
toUpdate := history.DeepCopy()
|
toUpdate := history.DeepCopy()
|
||||||
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
|
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
|
||||||
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -129,7 +130,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
|
|||||||
if cur.Revision < currRevision {
|
if cur.Revision < currRevision {
|
||||||
toUpdate := cur.DeepCopy()
|
toUpdate := cur.DeepCopy()
|
||||||
toUpdate.Revision = currRevision
|
toUpdate.Revision = currRevision
|
||||||
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -170,7 +171,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Clean up
|
// Clean up
|
||||||
err := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(history.Name, nil)
|
err := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), history.Name, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -219,14 +220,14 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor
|
|||||||
toUpdate.Labels = make(map[string]string)
|
toUpdate.Labels = make(map[string]string)
|
||||||
}
|
}
|
||||||
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||||
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate)
|
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(context.TODO(), toUpdate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Remove duplicates
|
// Remove duplicates
|
||||||
err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(cur.Name, nil)
|
err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), cur.Name, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -253,7 +254,7 @@ func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*app
|
|||||||
// If any adoptions are attempted, we should first recheck for deletion with
|
// If any adoptions are attempted, we should first recheck for deletion with
|
||||||
// an uncached quorum read sometime after listing Pods (see #42639).
|
// an uncached quorum read sometime after listing Pods (see #42639).
|
||||||
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||||
fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -322,10 +323,10 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
|
|||||||
Revision: revision,
|
Revision: revision,
|
||||||
}
|
}
|
||||||
|
|
||||||
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(history)
|
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), history)
|
||||||
if outerErr := err; errors.IsAlreadyExists(outerErr) {
|
if outerErr := err; errors.IsAlreadyExists(outerErr) {
|
||||||
// TODO: Is it okay to get from historyLister?
|
// TODO: Is it okay to get from historyLister?
|
||||||
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
|
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
if getErr != nil {
|
if getErr != nil {
|
||||||
return nil, getErr
|
return nil, getErr
|
||||||
}
|
}
|
||||||
@ -340,7 +341,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
|
|||||||
|
|
||||||
// Handle name collisions between different history
|
// Handle name collisions between different history
|
||||||
// Get the latest DaemonSet from the API server to make sure collision count is only increased when necessary
|
// Get the latest DaemonSet from the API server to make sure collision count is only increased when necessary
|
||||||
currDS, getErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
currDS, getErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
|
||||||
if getErr != nil {
|
if getErr != nil {
|
||||||
return nil, getErr
|
return nil, getErr
|
||||||
}
|
}
|
||||||
@ -352,7 +353,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
|
|||||||
currDS.Status.CollisionCount = new(int32)
|
currDS.Status.CollisionCount = new(int32)
|
||||||
}
|
}
|
||||||
*currDS.Status.CollisionCount++
|
*currDS.Status.CollisionCount++
|
||||||
_, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(currDS)
|
_, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(context.TODO(), currDS)
|
||||||
if updateErr != nil {
|
if updateErr != nil {
|
||||||
return nil, updateErr
|
return nil, updateErr
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ limitations under the License.
|
|||||||
package deployment
|
package deployment
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
@ -508,7 +509,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment)
|
|||||||
// If any adoptions are attempted, we should first recheck for deletion with
|
// If any adoptions are attempted, we should first recheck for deletion with
|
||||||
// an uncached quorum read sometime after listing ReplicaSets (see #42639).
|
// an uncached quorum read sometime after listing ReplicaSets (see #42639).
|
||||||
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||||
fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(context.TODO(), d.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -588,7 +589,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
|||||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
|
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
|
||||||
if d.Status.ObservedGeneration < d.Generation {
|
if d.Status.ObservedGeneration < d.Generation {
|
||||||
d.Status.ObservedGeneration = d.Generation
|
d.Status.ObservedGeneration = d.Generation
|
||||||
dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package deployment
|
package deployment
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
@ -112,7 +113,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, new
|
|||||||
|
|
||||||
newDeployment := d
|
newDeployment := d
|
||||||
newDeployment.Status = newStatus
|
newDeployment.Status = newStatus
|
||||||
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
|
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package deployment
|
package deployment
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@ -113,7 +114,7 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, mess
|
|||||||
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
|
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
|
||||||
klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
|
klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
|
||||||
setRollbackTo(d, nil)
|
setRollbackTo(d, nil)
|
||||||
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d)
|
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(context.TODO(), d)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package deployment
|
package deployment
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
@ -97,7 +98,7 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,7 +155,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
|
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
|
||||||
if annotationsUpdated || minReadySecondsNeedsUpdate {
|
if annotationsUpdated || minReadySecondsNeedsUpdate {
|
||||||
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
|
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
|
||||||
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
|
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should use the revision in existingNewRS's annotation, since it set by before
|
// Should use the revision in existingNewRS's annotation, since it set by before
|
||||||
@ -172,7 +173,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
|
|
||||||
if needsUpdate {
|
if needsUpdate {
|
||||||
var err error
|
var err error
|
||||||
if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d); err != nil {
|
if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -219,7 +220,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
// hash collisions. If there is any other error, we need to report it in the status of
|
// hash collisions. If there is any other error, we need to report it in the status of
|
||||||
// the Deployment.
|
// the Deployment.
|
||||||
alreadyExists := false
|
alreadyExists := false
|
||||||
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(&newRS)
|
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS)
|
||||||
switch {
|
switch {
|
||||||
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
|
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
|
||||||
case errors.IsAlreadyExists(err):
|
case errors.IsAlreadyExists(err):
|
||||||
@ -251,7 +252,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
*d.Status.CollisionCount++
|
*d.Status.CollisionCount++
|
||||||
// Update the collisionCount for the Deployment and let it requeue by returning the original
|
// Update the collisionCount for the Deployment and let it requeue by returning the original
|
||||||
// error.
|
// error.
|
||||||
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
|
||||||
if dErr == nil {
|
if dErr == nil {
|
||||||
klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||||
}
|
}
|
||||||
@ -267,7 +268,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
// We don't really care about this error at this point, since we have a bigger issue to report.
|
// We don't really care about this error at this point, since we have a bigger issue to report.
|
||||||
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
|
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
|
||||||
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
|
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
|
||||||
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
|
||||||
}
|
}
|
||||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
|
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -284,7 +285,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
|||||||
needsUpdate = true
|
needsUpdate = true
|
||||||
}
|
}
|
||||||
if needsUpdate {
|
if needsUpdate {
|
||||||
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d)
|
||||||
}
|
}
|
||||||
return createdRS, err
|
return createdRS, err
|
||||||
}
|
}
|
||||||
@ -419,7 +420,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale in
|
|||||||
rsCopy := rs.DeepCopy()
|
rsCopy := rs.DeepCopy()
|
||||||
*(rsCopy.Spec.Replicas) = newScale
|
*(rsCopy.Spec.Replicas) = newScale
|
||||||
deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
|
deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
|
||||||
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
|
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy)
|
||||||
if err == nil && sizeNeedsUpdate {
|
if err == nil && sizeNeedsUpdate {
|
||||||
scaled = true
|
scaled = true
|
||||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
|
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
|
||||||
@ -457,7 +458,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
|
klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
|
||||||
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
|
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(context.TODO(), rs.Name, nil); err != nil && !errors.IsNotFound(err) {
|
||||||
// Return error instead of aggregating and continuing DELETEs on the theory
|
// Return error instead of aggregating and continuing DELETEs on the theory
|
||||||
// that we may be overloading the api server.
|
// that we may be overloading the api server.
|
||||||
return err
|
return err
|
||||||
@ -477,7 +478,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet,
|
|||||||
|
|
||||||
newDeployment := d
|
newDeployment := d
|
||||||
newDeployment.Status = newStatus
|
newDeployment.Status = newStatus
|
||||||
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
|
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
@ -545,7 +546,7 @@ func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface)
|
|||||||
// RsListFromClient returns an rsListFunc that wraps the given client.
|
// RsListFromClient returns an rsListFunc that wraps the given client.
|
||||||
func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
|
func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
|
||||||
return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
|
return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
|
||||||
rsList, err := c.ReplicaSets(namespace).List(options)
|
rsList, err := c.ReplicaSets(namespace).List(context.TODO(), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package disruption
|
package disruption
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -791,6 +792,6 @@ func (dc *DisruptionController) updatePdbStatus(pdb *policy.PodDisruptionBudget,
|
|||||||
func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error {
|
func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error {
|
||||||
// If this update fails, don't retry it. Allow the failure to get handled &
|
// If this update fails, don't retry it. Allow the failure to get handled &
|
||||||
// retried in `processNextWorkItem()`.
|
// retried in `processNextWorkItem()`.
|
||||||
_, err := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(pdb)
|
_, err := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).UpdateStatus(context.TODO(), pdb)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1054,14 +1054,14 @@ func TestUpdatePDBStatusRetries(t *testing.T) {
|
|||||||
|
|
||||||
// Create a PDB and 3 pods that match it.
|
// Create a PDB and 3 pods that match it.
|
||||||
pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
|
pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
|
||||||
pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(pdb)
|
pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create PDB: %v", err)
|
t.Fatalf("Failed to create PDB: %v", err)
|
||||||
}
|
}
|
||||||
podNames := []string{"moe", "larry", "curly"}
|
podNames := []string{"moe", "larry", "curly"}
|
||||||
for _, name := range podNames {
|
for _, name := range podNames {
|
||||||
pod, _ := newPod(t, name)
|
pod, _ := newPod(t, name)
|
||||||
_, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(pod)
|
_, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create pod: %v", err)
|
t.Fatalf("Failed to create pod: %v", err)
|
||||||
}
|
}
|
||||||
@ -1133,7 +1133,7 @@ func TestUpdatePDBStatusRetries(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// (A) Delete one pod
|
// (A) Delete one pod
|
||||||
if err := dc.coreClient.CoreV1().Pods("default").Delete(podNames[0], &metav1.DeleteOptions{}); err != nil {
|
if err := dc.coreClient.CoreV1().Pods("default").Delete(context.TODO(), podNames[0], &metav1.DeleteOptions{}); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if err := waitForCacheCount(dc.podStore, len(podNames)-1); err != nil {
|
if err := waitForCacheCount(dc.podStore, len(podNames)-1); err != nil {
|
||||||
@ -1151,7 +1151,7 @@ func TestUpdatePDBStatusRetries(t *testing.T) {
|
|||||||
|
|
||||||
// (C) Whether or not sync() returned an error, the PDB status should reflect
|
// (C) Whether or not sync() returned an error, the PDB status should reflect
|
||||||
// the evictions that took place.
|
// the evictions that took place.
|
||||||
finalPDB, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets("default").Get(pdb.Name, metav1.GetOptions{})
|
finalPDB, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets("default").Get(context.TODO(), pdb.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to get PDB: %v", err)
|
t.Fatalf("Failed to get PDB: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package endpoint
|
package endpoint
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -368,7 +369,7 @@ func (e *EndpointController) syncService(key string) error {
|
|||||||
// service is deleted. However, if we're down at the time when
|
// service is deleted. However, if we're down at the time when
|
||||||
// the service is deleted, we will miss that deletion, so this
|
// the service is deleted, we will miss that deletion, so this
|
||||||
// doesn't completely solve the problem. See #6877.
|
// doesn't completely solve the problem. See #6877.
|
||||||
err = e.client.CoreV1().Endpoints(namespace).Delete(name, nil)
|
err = e.client.CoreV1().Endpoints(namespace).Delete(context.TODO(), name, nil)
|
||||||
if err != nil && !errors.IsNotFound(err) {
|
if err != nil && !errors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -512,10 +513,10 @@ func (e *EndpointController) syncService(key string) error {
|
|||||||
klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
|
klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
|
||||||
if createEndpoints {
|
if createEndpoints {
|
||||||
// No previous endpoints, create them
|
// No previous endpoints, create them
|
||||||
_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(newEndpoints)
|
_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(context.TODO(), newEndpoints)
|
||||||
} else {
|
} else {
|
||||||
// Pre-existing
|
// Pre-existing
|
||||||
_, err = e.client.CoreV1().Endpoints(service.Namespace).Update(newEndpoints)
|
_, err = e.client.CoreV1().Endpoints(service.Namespace).Update(context.TODO(), newEndpoints)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if createEndpoints && errors.IsForbidden(err) {
|
if createEndpoints && errors.IsForbidden(err) {
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package endpointslice
|
package endpointslice
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
@ -106,7 +107,7 @@ func TestSyncServiceWithSelector(t *testing.T) {
|
|||||||
standardSyncService(t, esController, ns, serviceName, "true")
|
standardSyncService(t, esController, ns, serviceName, "true")
|
||||||
expectActions(t, client.Actions(), 1, "create", "endpointslices")
|
expectActions(t, client.Actions(), 1, "create", "endpointslices")
|
||||||
|
|
||||||
sliceList, err := client.DiscoveryV1beta1().EndpointSlices(ns).List(metav1.ListOptions{})
|
sliceList, err := client.DiscoveryV1beta1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{})
|
||||||
assert.Nil(t, err, "Expected no error fetching endpoint slices")
|
assert.Nil(t, err, "Expected no error fetching endpoint slices")
|
||||||
assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices")
|
assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices")
|
||||||
slice := sliceList.Items[0]
|
slice := sliceList.Items[0]
|
||||||
@ -173,7 +174,7 @@ func TestSyncServicePodSelection(t *testing.T) {
|
|||||||
expectActions(t, client.Actions(), 1, "create", "endpointslices")
|
expectActions(t, client.Actions(), 1, "create", "endpointslices")
|
||||||
|
|
||||||
// an endpoint slice should be created, it should only reference pod1 (not pod2)
|
// an endpoint slice should be created, it should only reference pod1 (not pod2)
|
||||||
slices, err := client.DiscoveryV1beta1().EndpointSlices(ns).List(metav1.ListOptions{})
|
slices, err := client.DiscoveryV1beta1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{})
|
||||||
assert.Nil(t, err, "Expected no error fetching endpoint slices")
|
assert.Nil(t, err, "Expected no error fetching endpoint slices")
|
||||||
assert.Len(t, slices.Items, 1, "Expected 1 endpoint slices")
|
assert.Len(t, slices.Items, 1, "Expected 1 endpoint slices")
|
||||||
slice := slices.Items[0]
|
slice := slices.Items[0]
|
||||||
@ -249,7 +250,7 @@ func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error adding EndpointSlice: %v", err)
|
t.Fatalf("Expected no error adding EndpointSlice: %v", err)
|
||||||
}
|
}
|
||||||
_, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(endpointSlice)
|
_, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(context.TODO(), endpointSlice)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error creating EndpointSlice: %v", err)
|
t.Fatalf("Expected no error creating EndpointSlice: %v", err)
|
||||||
}
|
}
|
||||||
@ -305,7 +306,7 @@ func TestSyncServiceFull(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
esController.serviceStore.Add(service)
|
esController.serviceStore.Add(service)
|
||||||
_, err := esController.client.CoreV1().Services(namespace).Create(service)
|
_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service)
|
||||||
assert.Nil(t, err, "Expected no error creating service")
|
assert.Nil(t, err, "Expected no error creating service")
|
||||||
|
|
||||||
// run through full sync service loop
|
// run through full sync service loop
|
||||||
@ -314,7 +315,7 @@ func TestSyncServiceFull(t *testing.T) {
|
|||||||
|
|
||||||
// last action should be to create endpoint slice
|
// last action should be to create endpoint slice
|
||||||
expectActions(t, client.Actions(), 1, "create", "endpointslices")
|
expectActions(t, client.Actions(), 1, "create", "endpointslices")
|
||||||
sliceList, err := client.DiscoveryV1beta1().EndpointSlices(namespace).List(metav1.ListOptions{})
|
sliceList, err := client.DiscoveryV1beta1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{})
|
||||||
assert.Nil(t, err, "Expected no error fetching endpoint slices")
|
assert.Nil(t, err, "Expected no error fetching endpoint slices")
|
||||||
assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices")
|
assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices")
|
||||||
|
|
||||||
@ -368,7 +369,7 @@ func createService(t *testing.T, esController *endpointSliceController, namespac
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
esController.serviceStore.Add(service)
|
esController.serviceStore.Add(service)
|
||||||
_, err := esController.client.CoreV1().Services(namespace).Create(service)
|
_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service)
|
||||||
assert.Nil(t, err, "Expected no error creating service")
|
assert.Nil(t, err, "Expected no error creating service")
|
||||||
return service
|
return service
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package endpointslice
|
package endpointslice
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
@ -205,7 +206,7 @@ func (r *reconciler) finalize(
|
|||||||
|
|
||||||
for _, endpointSlice := range slicesToCreate {
|
for _, endpointSlice := range slicesToCreate {
|
||||||
addTriggerTimeAnnotation(endpointSlice, triggerTime)
|
addTriggerTimeAnnotation(endpointSlice, triggerTime)
|
||||||
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(endpointSlice)
|
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
|
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
|
||||||
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
|
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
|
||||||
@ -220,7 +221,7 @@ func (r *reconciler) finalize(
|
|||||||
|
|
||||||
for _, endpointSlice := range slicesToUpdate {
|
for _, endpointSlice := range slicesToUpdate {
|
||||||
addTriggerTimeAnnotation(endpointSlice, triggerTime)
|
addTriggerTimeAnnotation(endpointSlice, triggerTime)
|
||||||
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(endpointSlice)
|
_, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Update(context.TODO(), endpointSlice)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, fmt.Errorf("Error updating %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err))
|
errs = append(errs, fmt.Errorf("Error updating %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err))
|
||||||
} else {
|
} else {
|
||||||
@ -230,7 +231,7 @@ func (r *reconciler) finalize(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, endpointSlice := range slicesToDelete {
|
for _, endpointSlice := range slicesToDelete {
|
||||||
err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Delete(endpointSlice.Name, &metav1.DeleteOptions{})
|
err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Delete(context.TODO(), endpointSlice.Name, &metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, fmt.Errorf("Error deleting %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err))
|
errs = append(errs, fmt.Errorf("Error deleting %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err))
|
||||||
} else {
|
} else {
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package endpointslice
|
package endpointslice
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -203,7 +204,7 @@ func TestReconcile1EndpointSlice(t *testing.T) {
|
|||||||
svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
|
svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
|
||||||
endpointSlice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc)
|
endpointSlice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc)
|
||||||
|
|
||||||
_, createErr := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(endpointSlice1)
|
_, createErr := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice1)
|
||||||
assert.Nil(t, createErr, "Expected no error creating endpoint slice")
|
assert.Nil(t, createErr, "Expected no error creating endpoint slice")
|
||||||
|
|
||||||
numActionsBefore := len(client.Actions())
|
numActionsBefore := len(client.Actions())
|
||||||
@ -827,7 +828,7 @@ func portsAndAddressTypeEqual(slice1, slice2 discovery.EndpointSlice) bool {
|
|||||||
func createEndpointSlices(t *testing.T, client *fake.Clientset, namespace string, endpointSlices []*discovery.EndpointSlice) {
|
func createEndpointSlices(t *testing.T, client *fake.Clientset, namespace string, endpointSlices []*discovery.EndpointSlice) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
for _, endpointSlice := range endpointSlices {
|
for _, endpointSlice := range endpointSlices {
|
||||||
_, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(endpointSlice)
|
_, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error creating Endpoint Slice, got: %v", err)
|
t.Fatalf("Expected no error creating Endpoint Slice, got: %v", err)
|
||||||
}
|
}
|
||||||
@ -836,7 +837,7 @@ func createEndpointSlices(t *testing.T, client *fake.Clientset, namespace string
|
|||||||
|
|
||||||
func fetchEndpointSlices(t *testing.T, client *fake.Clientset, namespace string) []discovery.EndpointSlice {
|
func fetchEndpointSlices(t *testing.T, client *fake.Clientset, namespace string) []discovery.EndpointSlice {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
fetchedSlices, err := client.DiscoveryV1beta1().EndpointSlices(namespace).List(metav1.ListOptions{})
|
fetchedSlices, err := client.DiscoveryV1beta1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error fetching Endpoint Slices, got: %v", err)
|
t.Fatalf("Expected no error fetching Endpoint Slices, got: %v", err)
|
||||||
return []discovery.EndpointSlice{}
|
return []discovery.EndpointSlice{}
|
||||||
|
@ -18,6 +18,7 @@ package history
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
@ -248,9 +249,9 @@ func (rh *realHistory) CreateControllerRevision(parent metav1.Object, revision *
|
|||||||
// Update the revisions name
|
// Update the revisions name
|
||||||
clone.Name = ControllerRevisionName(parent.GetName(), hash)
|
clone.Name = ControllerRevisionName(parent.GetName(), hash)
|
||||||
ns := parent.GetNamespace()
|
ns := parent.GetNamespace()
|
||||||
created, err := rh.client.AppsV1().ControllerRevisions(ns).Create(clone)
|
created, err := rh.client.AppsV1().ControllerRevisions(ns).Create(context.TODO(), clone)
|
||||||
if errors.IsAlreadyExists(err) {
|
if errors.IsAlreadyExists(err) {
|
||||||
exists, err := rh.client.AppsV1().ControllerRevisions(ns).Get(clone.Name, metav1.GetOptions{})
|
exists, err := rh.client.AppsV1().ControllerRevisions(ns).Get(context.TODO(), clone.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -271,7 +272,7 @@ func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevisio
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
clone.Revision = newRevision
|
clone.Revision = newRevision
|
||||||
updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(clone)
|
updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(context.TODO(), clone)
|
||||||
if updateErr == nil {
|
if updateErr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -288,7 +289,7 @@ func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevisio
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rh *realHistory) DeleteControllerRevision(revision *apps.ControllerRevision) error {
|
func (rh *realHistory) DeleteControllerRevision(revision *apps.ControllerRevision) error {
|
||||||
return rh.client.AppsV1().ControllerRevisions(revision.Namespace).Delete(revision.Name, nil)
|
return rh.client.AppsV1().ControllerRevisions(revision.Namespace).Delete(context.TODO(), revision.Name, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
type objectForPatch struct {
|
type objectForPatch struct {
|
||||||
@ -326,13 +327,13 @@ func (rh *realHistory) AdoptControllerRevision(parent metav1.Object, parentKind
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Use strategic merge patch to add an owner reference indicating a controller ref
|
// Use strategic merge patch to add an owner reference indicating a controller ref
|
||||||
return rh.client.AppsV1().ControllerRevisions(parent.GetNamespace()).Patch(revision.GetName(),
|
return rh.client.AppsV1().ControllerRevisions(parent.GetNamespace()).Patch(context.TODO(), revision.GetName(),
|
||||||
types.StrategicMergePatchType, patchBytes)
|
types.StrategicMergePatchType, patchBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rh *realHistory) ReleaseControllerRevision(parent metav1.Object, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) {
|
func (rh *realHistory) ReleaseControllerRevision(parent metav1.Object, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) {
|
||||||
// Use strategic merge patch to add an owner reference indicating a controller ref
|
// Use strategic merge patch to add an owner reference indicating a controller ref
|
||||||
released, err := rh.client.AppsV1().ControllerRevisions(revision.GetNamespace()).Patch(revision.GetName(),
|
released, err := rh.client.AppsV1().ControllerRevisions(revision.GetNamespace()).Patch(context.TODO(), revision.GetName(),
|
||||||
types.StrategicMergePatchType,
|
types.StrategicMergePatchType,
|
||||||
[]byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, parent.GetUID(), revision.UID)))
|
[]byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, parent.GetUID(), revision.UID)))
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ package history
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -260,7 +261,7 @@ func TestRealHistory_CreateControllerRevision(t *testing.T) {
|
|||||||
|
|
||||||
var collisionCount int32
|
var collisionCount int32
|
||||||
for _, item := range test.existing {
|
for _, item := range test.existing {
|
||||||
_, err := client.AppsV1().ControllerRevisions(item.parent.GetNamespace()).Create(item.revision)
|
_, err := client.AppsV1().ControllerRevisions(item.parent.GetNamespace()).Create(context.TODO(), item.revision)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package job
|
package job
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -417,7 +418,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
|
|||||||
// If any adoptions are attempted, we should first recheck for deletion
|
// If any adoptions are attempted, we should first recheck for deletion
|
||||||
// with an uncached quorum read sometime after listing Pods (see #42639).
|
// with an uncached quorum read sometime after listing Pods (see #42639).
|
||||||
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||||
fresh, err := jm.kubeClient.BatchV1().Jobs(j.Namespace).Get(j.Name, metav1.GetOptions{})
|
fresh, err := jm.kubeClient.BatchV1().Jobs(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -826,12 +827,12 @@ func (jm *JobController) updateJobStatus(job *batch.Job) error {
|
|||||||
var err error
|
var err error
|
||||||
for i := 0; i <= statusUpdateRetries; i = i + 1 {
|
for i := 0; i <= statusUpdateRetries; i = i + 1 {
|
||||||
var newJob *batch.Job
|
var newJob *batch.Job
|
||||||
newJob, err = jobClient.Get(job.Name, metav1.GetOptions{})
|
newJob, err = jobClient.Get(context.TODO(), job.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
newJob.Status = job.Status
|
newJob.Status = job.Status
|
||||||
if _, err = jobClient.UpdateStatus(newJob); err == nil {
|
if _, err = jobClient.UpdateStatus(context.TODO(), newJob); err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package deletion
|
package deletion
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
@ -95,7 +96,7 @@ func (d *namespacedResourcesDeleter) Delete(nsName string) error {
|
|||||||
// Multiple controllers may edit a namespace during termination
|
// Multiple controllers may edit a namespace during termination
|
||||||
// first get the latest state of the namespace before proceeding
|
// first get the latest state of the namespace before proceeding
|
||||||
// if the namespace was deleted already, don't do anything
|
// if the namespace was deleted already, don't do anything
|
||||||
namespace, err := d.nsClient.Get(nsName, metav1.GetOptions{})
|
namespace, err := d.nsClient.Get(context.TODO(), nsName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
@ -249,7 +250,7 @@ func (d *namespacedResourcesDeleter) retryOnConflictError(namespace *v1.Namespac
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
prevNamespace := latestNamespace
|
prevNamespace := latestNamespace
|
||||||
latestNamespace, err = d.nsClient.Get(latestNamespace.Name, metav1.GetOptions{})
|
latestNamespace, err = d.nsClient.Get(context.TODO(), latestNamespace.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -268,7 +269,7 @@ func (d *namespacedResourcesDeleter) updateNamespaceStatusFunc(namespace *v1.Nam
|
|||||||
newNamespace.ObjectMeta = namespace.ObjectMeta
|
newNamespace.ObjectMeta = namespace.ObjectMeta
|
||||||
newNamespace.Status = *namespace.Status.DeepCopy()
|
newNamespace.Status = *namespace.Status.DeepCopy()
|
||||||
newNamespace.Status.Phase = v1.NamespaceTerminating
|
newNamespace.Status.Phase = v1.NamespaceTerminating
|
||||||
return d.nsClient.UpdateStatus(&newNamespace)
|
return d.nsClient.UpdateStatus(context.TODO(), &newNamespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
// finalized returns true if the namespace.Spec.Finalizers is an empty list
|
// finalized returns true if the namespace.Spec.Finalizers is an empty list
|
||||||
@ -550,7 +551,7 @@ func (d *namespacedResourcesDeleter) deleteAllContent(ns *v1.Namespace) (int64,
|
|||||||
// we need to reflect that information. Recall that additional finalizers can be set on namespaces, so this finalizer may clear itself and
|
// we need to reflect that information. Recall that additional finalizers can be set on namespaces, so this finalizer may clear itself and
|
||||||
// NOT remove the resource instance.
|
// NOT remove the resource instance.
|
||||||
if hasChanged := conditionUpdater.Update(ns); hasChanged {
|
if hasChanged := conditionUpdater.Update(ns); hasChanged {
|
||||||
if _, err = d.nsClient.UpdateStatus(ns); err != nil {
|
if _, err = d.nsClient.UpdateStatus(context.TODO(), ns); err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("couldn't update status condition for namespace %q: %v", namespace, err))
|
utilruntime.HandleError(fmt.Errorf("couldn't update status condition for namespace %q: %v", namespace, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -590,7 +591,7 @@ func (d *namespacedResourcesDeleter) estimateGracefulTerminationForPods(ns strin
|
|||||||
if podsGetter == nil || reflect.ValueOf(podsGetter).IsNil() {
|
if podsGetter == nil || reflect.ValueOf(podsGetter).IsNil() {
|
||||||
return 0, fmt.Errorf("unexpected: podsGetter is nil. Cannot estimate grace period seconds for pods")
|
return 0, fmt.Errorf("unexpected: podsGetter is nil. Cannot estimate grace period seconds for pods")
|
||||||
}
|
}
|
||||||
items, err := podsGetter.Pods(ns).List(metav1.ListOptions{})
|
items, err := podsGetter.Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,7 @@ func (a *adapter) AddAlias(ctx context.Context, nodeName string, cidrRange *net.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *adapter) Node(ctx context.Context, name string) (*v1.Node, error) {
|
func (a *adapter) Node(ctx context.Context, name string) (*v1.Node, error) {
|
||||||
return a.k8s.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
return a.k8s.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error {
|
func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error {
|
||||||
@ -103,7 +103,7 @@ func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRang
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = a.k8s.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, bytes)
|
_, err = a.k8s.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, bytes)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package ipam
|
package ipam
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
@ -124,7 +125,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
|
|||||||
// controller manager to restart.
|
// controller manager to restart.
|
||||||
if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) {
|
if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
nodeList, err = kubeClient.CoreV1().Nodes().List(metav1.ListOptions{
|
nodeList, err = kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{
|
||||||
FieldSelector: fields.Everything().String(),
|
FieldSelector: fields.Everything().String(),
|
||||||
LabelSelector: labels.Everything().String(),
|
LabelSelector: labels.Everything().String(),
|
||||||
})
|
})
|
||||||
|
@ -22,6 +22,7 @@ limitations under the License.
|
|||||||
package nodelifecycle
|
package nodelifecycle
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -813,7 +814,7 @@ func (nc *Controller) monitorNodeHealth() error {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
name := node.Name
|
name := node.Name
|
||||||
node, err = nc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
node, err = nc.kubeClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed while getting a Node to retry updating node health. Probably Node %s was deleted.", name)
|
klog.Errorf("Failed while getting a Node to retry updating node health. Probably Node %s was deleted.", name)
|
||||||
return false, err
|
return false, err
|
||||||
@ -1148,7 +1149,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
|||||||
_, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
_, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||||
|
|
||||||
if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) {
|
if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) {
|
||||||
if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(node); err != nil {
|
if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(context.TODO(), node); err != nil {
|
||||||
klog.Errorf("Error updating node %s: %v", node.Name, err)
|
klog.Errorf("Error updating node %s: %v", node.Name, err)
|
||||||
return gracePeriod, observedReadyCondition, currentReadyCondition, err
|
return gracePeriod, observedReadyCondition, currentReadyCondition, err
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package nodelifecycle
|
package nodelifecycle
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@ -66,7 +67,7 @@ func alwaysReady() bool { return true }
|
|||||||
func fakeGetPodsAssignedToNode(c *fake.Clientset) func(string) ([]*v1.Pod, error) {
|
func fakeGetPodsAssignedToNode(c *fake.Clientset) func(string) ([]*v1.Pod, error) {
|
||||||
return func(nodeName string) ([]*v1.Pod, error) {
|
return func(nodeName string) ([]*v1.Pod, error) {
|
||||||
selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName})
|
selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName})
|
||||||
pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
|
pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{
|
||||||
FieldSelector: selector.String(),
|
FieldSelector: selector.String(),
|
||||||
LabelSelector: labels.Everything().String(),
|
LabelSelector: labels.Everything().String(),
|
||||||
})
|
})
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package scheduler
|
package scheduler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
"io"
|
"io"
|
||||||
@ -108,7 +109,7 @@ func deletePodHandler(c clientset.Interface, emitEventFunc func(types.Namespaced
|
|||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < retries; i++ {
|
for i := 0; i < retries; i++ {
|
||||||
err = c.CoreV1().Pods(ns).Delete(name, &metav1.DeleteOptions{})
|
err = c.CoreV1().Pods(ns).Delete(context.TODO(), name, &metav1.DeleteOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package scheduler
|
package scheduler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
@ -37,14 +38,14 @@ var timeForControllerToProgress = 500 * time.Millisecond
|
|||||||
|
|
||||||
func getPodFromClientset(clientset *fake.Clientset) GetPodFunc {
|
func getPodFromClientset(clientset *fake.Clientset) GetPodFunc {
|
||||||
return func(name, namespace string) (*v1.Pod, error) {
|
return func(name, namespace string) (*v1.Pod, error) {
|
||||||
return clientset.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
return clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPodsAssignedToNode(c *fake.Clientset) GetPodsByNodeNameFunc {
|
func getPodsAssignedToNode(c *fake.Clientset) GetPodsByNodeNameFunc {
|
||||||
return func(nodeName string) ([]*v1.Pod, error) {
|
return func(nodeName string) ([]*v1.Pod, error) {
|
||||||
selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName})
|
selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName})
|
||||||
pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
|
pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{
|
||||||
FieldSelector: selector.String(),
|
FieldSelector: selector.String(),
|
||||||
LabelSelector: labels.Everything().String(),
|
LabelSelector: labels.Everything().String(),
|
||||||
})
|
})
|
||||||
@ -61,7 +62,7 @@ func getPodsAssignedToNode(c *fake.Clientset) GetPodsByNodeNameFunc {
|
|||||||
|
|
||||||
func getNodeFromClientset(clientset *fake.Clientset) GetNodeFunc {
|
func getNodeFromClientset(clientset *fake.Clientset) GetNodeFunc {
|
||||||
return func(name string) (*v1.Node, error) {
|
return func(name string) (*v1.Node, error) {
|
||||||
return clientset.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
return clientset.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package podautoscaler
|
package podautoscaler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"time"
|
"time"
|
||||||
@ -1112,7 +1113,7 @@ func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAuto
|
|||||||
}
|
}
|
||||||
hpav1 := hpaRaw.(*autoscalingv1.HorizontalPodAutoscaler)
|
hpav1 := hpaRaw.(*autoscalingv1.HorizontalPodAutoscaler)
|
||||||
|
|
||||||
_, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(hpav1)
|
_, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(context.TODO(), hpav1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
||||||
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
|
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
|
||||||
|
@ -116,7 +116,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||||
podList, err := h.podsGetter.Pods(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
podList, err := h.podsGetter.Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
|
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package metrics
|
package metrics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -63,7 +64,7 @@ type resourceMetricsClient struct {
|
|||||||
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
|
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
|
||||||
// for all pods matching the specified selector in the given namespace
|
// for all pods matching the specified selector in the given namespace
|
||||||
func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||||
metrics, err := c.client.PodMetricses(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
metrics, err := c.client.PodMetricses(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from resource metrics API: %v", err)
|
return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from resource metrics API: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package podgc
|
package podgc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -75,7 +76,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor
|
|||||||
nodeQueue: workqueue.NewNamedDelayingQueue("orphaned_pods_nodes"),
|
nodeQueue: workqueue.NewNamedDelayingQueue("orphaned_pods_nodes"),
|
||||||
deletePod: func(namespace, name string) error {
|
deletePod: func(namespace, name string) error {
|
||||||
klog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name)
|
klog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name)
|
||||||
return kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0))
|
return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.NewDeleteOptions(0))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,7 +215,7 @@ func (gcc *PodGCController) discoverDeletedNodes(existingNodeNames sets.String)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (gcc *PodGCController) checkIfNodeExists(name string) (bool, error) {
|
func (gcc *PodGCController) checkIfNodeExists(name string) (bool, error) {
|
||||||
_, fetchErr := gcc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
_, fetchErr := gcc.kubeClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
if errors.IsNotFound(fetchErr) {
|
if errors.IsNotFound(fetchErr) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package podgc
|
package podgc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -345,10 +346,10 @@ func TestGCOrphaned(t *testing.T) {
|
|||||||
|
|
||||||
// Execute planned nodes changes
|
// Execute planned nodes changes
|
||||||
for _, node := range test.addedClientNodes {
|
for _, node := range test.addedClientNodes {
|
||||||
client.CoreV1().Nodes().Create(node)
|
client.CoreV1().Nodes().Create(context.TODO(), node)
|
||||||
}
|
}
|
||||||
for _, node := range test.deletedClientNodes {
|
for _, node := range test.deletedClientNodes {
|
||||||
client.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{})
|
client.CoreV1().Nodes().Delete(context.TODO(), node.Name, &metav1.DeleteOptions{})
|
||||||
}
|
}
|
||||||
for _, node := range test.addedInformerNodes {
|
for _, node := range test.addedInformerNodes {
|
||||||
nodeInformer.Informer().GetStore().Add(node)
|
nodeInformer.Informer().GetStore().Add(node)
|
||||||
|
@ -28,6 +28,7 @@ limitations under the License.
|
|||||||
package replicaset
|
package replicaset
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
@ -714,7 +715,7 @@ func (rsc *ReplicaSetController) claimPods(rs *apps.ReplicaSet, selector labels.
|
|||||||
// If any adoptions are attempted, we should first recheck for deletion with
|
// If any adoptions are attempted, we should first recheck for deletion with
|
||||||
// an uncached quorum read sometime after listing Pods (see #42639).
|
// an uncached quorum read sometime after listing Pods (see #42639).
|
||||||
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||||
fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
|
fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(context.TODO(), rs.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package replicaset
|
package replicaset
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -1158,7 +1159,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
oldRS := newReplicaSet(1, map[string]string{"foo": "bar"})
|
oldRS := newReplicaSet(1, map[string]string{"foo": "bar"})
|
||||||
oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(oldRS)
|
oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(context.TODO(), oldRS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -1202,7 +1203,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
t.Fatal("Unexpected item in the queue")
|
t.Fatal("Unexpected item in the queue")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.AppsV1().ReplicaSets(oldRS.Namespace).Delete(oldRS.Name, &metav1.DeleteOptions{})
|
err = client.AppsV1().ReplicaSets(oldRS.Namespace).Delete(context.TODO(), oldRS.Name, &metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -1239,7 +1240,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
|
|
||||||
newRS := oldRS.DeepCopy()
|
newRS := oldRS.DeepCopy()
|
||||||
newRS.UID = uuid.NewUUID()
|
newRS.UID = uuid.NewUUID()
|
||||||
newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(newRS)
|
newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(context.TODO(), newRS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@ limitations under the License.
|
|||||||
package replicaset
|
package replicaset
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
@ -63,7 +64,7 @@ func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSe
|
|||||||
fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, newStatus.ObservedGeneration))
|
fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, newStatus.ObservedGeneration))
|
||||||
|
|
||||||
rs.Status = newStatus
|
rs.Status = newStatus
|
||||||
updatedRS, updateErr = c.UpdateStatus(rs)
|
updatedRS, updateErr = c.UpdateStatus(context.TODO(), rs)
|
||||||
if updateErr == nil {
|
if updateErr == nil {
|
||||||
return updatedRS, nil
|
return updatedRS, nil
|
||||||
}
|
}
|
||||||
@ -72,7 +73,7 @@ func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSe
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Update the ReplicaSet with the latest resource version for the next poll
|
// Update the ReplicaSet with the latest resource version for the next poll
|
||||||
if rs, getErr = c.Get(rs.Name, metav1.GetOptions{}); getErr != nil {
|
if rs, getErr = c.Get(context.TODO(), rs.Name, metav1.GetOptions{}); getErr != nil {
|
||||||
// If the GET fails we can't trust status.Replicas anymore. This error
|
// If the GET fails we can't trust status.Replicas anymore. This error
|
||||||
// is bound to be more interesting than the update failure.
|
// is bound to be more interesting than the update failure.
|
||||||
return nil, getErr
|
return nil, getErr
|
||||||
|
@ -22,6 +22,7 @@ limitations under the License.
|
|||||||
package replication
|
package replication
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
@ -215,7 +216,7 @@ func (c conversionClient) UpdateStatus(rs *apps.ReplicaSet) (*apps.ReplicaSet, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c conversionClient) Get(name string, options metav1.GetOptions) (*apps.ReplicaSet, error) {
|
func (c conversionClient) Get(name string, options metav1.GetOptions) (*apps.ReplicaSet, error) {
|
||||||
rc, err := c.ReplicationControllerInterface.Get(name, options)
|
rc, err := c.ReplicationControllerInterface.Get(context.TODO(), name, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -223,7 +224,7 @@ func (c conversionClient) Get(name string, options metav1.GetOptions) (*apps.Rep
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c conversionClient) List(opts metav1.ListOptions) (*apps.ReplicaSetList, error) {
|
func (c conversionClient) List(opts metav1.ListOptions) (*apps.ReplicaSetList, error) {
|
||||||
rcList, err := c.ReplicationControllerInterface.List(opts)
|
rcList, err := c.ReplicationControllerInterface.List(context.TODO(), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package resourcequota
|
package resourcequota
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
@ -355,7 +356,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQ
|
|||||||
|
|
||||||
// there was a change observed by this controller that requires we update quota
|
// there was a change observed by this controller that requires we update quota
|
||||||
if dirty {
|
if dirty {
|
||||||
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(usage)
|
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(context.TODO(), usage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors = append(errors, err)
|
errors = append(errors, err)
|
||||||
}
|
}
|
||||||
|
@ -310,7 +310,7 @@ func TestSyncLoadBalancerIfNeeded(t *testing.T) {
|
|||||||
controller, cloud, client := newController()
|
controller, cloud, client := newController()
|
||||||
cloud.Exists = tc.lbExists
|
cloud.Exists = tc.lbExists
|
||||||
key := fmt.Sprintf("%s/%s", tc.service.Namespace, tc.service.Name)
|
key := fmt.Sprintf("%s/%s", tc.service.Namespace, tc.service.Name)
|
||||||
if _, err := client.CoreV1().Services(tc.service.Namespace).Create(tc.service); err != nil {
|
if _, err := client.CoreV1().Services(tc.service.Namespace).Create(context.TODO(), tc.service); err != nil {
|
||||||
t.Fatalf("Failed to prepare service %s for testing: %v", key, err)
|
t.Fatalf("Failed to prepare service %s for testing: %v", key, err)
|
||||||
}
|
}
|
||||||
client.ClearActions()
|
client.ClearActions()
|
||||||
@ -603,7 +603,7 @@ func TestProcessServiceCreateOrUpdate(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
newSvc := tc.updateFn(tc.svc)
|
newSvc := tc.updateFn(tc.svc)
|
||||||
if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil {
|
if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
|
||||||
t.Fatalf("Failed to prepare service %s for testing: %v", tc.key, err)
|
t.Fatalf("Failed to prepare service %s for testing: %v", tc.key, err)
|
||||||
}
|
}
|
||||||
obtErr := controller.processServiceCreateOrUpdate(newSvc, tc.key)
|
obtErr := controller.processServiceCreateOrUpdate(newSvc, tc.key)
|
||||||
@ -1222,7 +1222,7 @@ func TestAddFinalizer(t *testing.T) {
|
|||||||
s := &Controller{
|
s := &Controller{
|
||||||
kubeClient: c,
|
kubeClient: c,
|
||||||
}
|
}
|
||||||
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil {
|
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
|
||||||
t.Fatalf("Failed to prepare service for testing: %v", err)
|
t.Fatalf("Failed to prepare service for testing: %v", err)
|
||||||
}
|
}
|
||||||
if err := s.addFinalizer(tc.svc); err != nil {
|
if err := s.addFinalizer(tc.svc); err != nil {
|
||||||
@ -1276,7 +1276,7 @@ func TestRemoveFinalizer(t *testing.T) {
|
|||||||
s := &Controller{
|
s := &Controller{
|
||||||
kubeClient: c,
|
kubeClient: c,
|
||||||
}
|
}
|
||||||
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil {
|
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
|
||||||
t.Fatalf("Failed to prepare service for testing: %v", err)
|
t.Fatalf("Failed to prepare service for testing: %v", err)
|
||||||
}
|
}
|
||||||
if err := s.removeFinalizer(tc.svc); err != nil {
|
if err := s.removeFinalizer(tc.svc); err != nil {
|
||||||
@ -1376,7 +1376,7 @@ func TestPatchStatus(t *testing.T) {
|
|||||||
s := &Controller{
|
s := &Controller{
|
||||||
kubeClient: c,
|
kubeClient: c,
|
||||||
}
|
}
|
||||||
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil {
|
if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc); err != nil {
|
||||||
t.Fatalf("Failed to prepare service for testing: %v", err)
|
t.Fatalf("Failed to prepare service for testing: %v", err)
|
||||||
}
|
}
|
||||||
if err := s.patchStatus(tc.svc, &tc.svc.Status.LoadBalancer, tc.newStatus); err != nil {
|
if err := s.patchStatus(tc.svc, &tc.svc.Status.LoadBalancer, tc.newStatus); err != nil {
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package service
|
package service
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
@ -37,7 +38,7 @@ func patch(c v1core.CoreV1Interface, oldSvc *v1.Service, newSvc *v1.Service) (*v
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.Services(oldSvc.Namespace).Patch(oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status")
|
return c.Services(oldSvc.Namespace).Patch(context.TODO(), oldSvc.Name, types.StrategicMergePatchType, patchBytes, "status")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPatchBytes(oldSvc *v1.Service, newSvc *v1.Service) ([]byte, error) {
|
func getPatchBytes(oldSvc *v1.Service, newSvc *v1.Service) ([]byte, error) {
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package service
|
package service
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -44,7 +45,7 @@ func TestPatch(t *testing.T) {
|
|||||||
// Issue a separate update and verify patch doesn't fail after this.
|
// Issue a separate update and verify patch doesn't fail after this.
|
||||||
svcToUpdate := svcOrigin.DeepCopy()
|
svcToUpdate := svcOrigin.DeepCopy()
|
||||||
addAnnotations(svcToUpdate)
|
addAnnotations(svcToUpdate)
|
||||||
if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(svcToUpdate); err != nil {
|
if _, err := fakeCs.CoreV1().Services(svcOrigin.Namespace).Update(context.TODO(), svcToUpdate); err != nil {
|
||||||
t.Fatalf("Failed to update service: %v", err)
|
t.Fatalf("Failed to update service: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package serviceaccount
|
package serviceaccount
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -212,7 +213,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
|
|||||||
// TODO eliminate this once the fake client can handle creation without NS
|
// TODO eliminate this once the fake client can handle creation without NS
|
||||||
sa.Namespace = ns.Name
|
sa.Namespace = ns.Name
|
||||||
|
|
||||||
if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrors.IsAlreadyExists(err) {
|
if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(context.TODO(), &sa); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
// we can safely ignore terminating namespace errors
|
// we can safely ignore terminating namespace errors
|
||||||
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||||
createFailures = append(createFailures, err)
|
createFailures = append(createFailures, err)
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package serviceaccount
|
package serviceaccount
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -44,19 +45,19 @@ func (c clientGetter) GetServiceAccount(namespace, name string) (*v1.ServiceAcco
|
|||||||
if serviceAccount, err := c.serviceAccountLister.ServiceAccounts(namespace).Get(name); err == nil {
|
if serviceAccount, err := c.serviceAccountLister.ServiceAccounts(namespace).Get(name); err == nil {
|
||||||
return serviceAccount, nil
|
return serviceAccount, nil
|
||||||
}
|
}
|
||||||
return c.client.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{})
|
return c.client.CoreV1().ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c clientGetter) GetPod(namespace, name string) (*v1.Pod, error) {
|
func (c clientGetter) GetPod(namespace, name string) (*v1.Pod, error) {
|
||||||
if pod, err := c.podLister.Pods(namespace).Get(name); err == nil {
|
if pod, err := c.podLister.Pods(namespace).Get(name); err == nil {
|
||||||
return pod, nil
|
return pod, nil
|
||||||
}
|
}
|
||||||
return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
return c.client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c clientGetter) GetSecret(namespace, name string) (*v1.Secret, error) {
|
func (c clientGetter) GetSecret(namespace, name string) (*v1.Secret, error) {
|
||||||
if secret, err := c.secretLister.Secrets(namespace).Get(name); err == nil {
|
if secret, err := c.secretLister.Secrets(namespace).Get(name); err == nil {
|
||||||
return secret, nil
|
return secret, nil
|
||||||
}
|
}
|
||||||
return c.client.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
|
return c.client.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ package serviceaccount
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -345,7 +346,7 @@ func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry
|
|||||||
if len(uid) > 0 {
|
if len(uid) > 0 {
|
||||||
opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}
|
opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}
|
||||||
}
|
}
|
||||||
err := e.client.CoreV1().Secrets(ns).Delete(name, opts)
|
err := e.client.CoreV1().Secrets(ns).Delete(context.TODO(), name, opts)
|
||||||
// NotFound doesn't need a retry (it's already been deleted)
|
// NotFound doesn't need a retry (it's already been deleted)
|
||||||
// Conflict doesn't need a retry (the UID precondition failed)
|
// Conflict doesn't need a retry (the UID precondition failed)
|
||||||
if err == nil || apierrors.IsNotFound(err) || apierrors.IsConflict(err) {
|
if err == nil || apierrors.IsNotFound(err) || apierrors.IsConflict(err) {
|
||||||
@ -368,7 +369,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
|
|||||||
// We don't want to update the cache's copy of the service account
|
// We don't want to update the cache's copy of the service account
|
||||||
// so add the secret to a freshly retrieved copy of the service account
|
// so add the secret to a freshly retrieved copy of the service account
|
||||||
serviceAccounts := e.client.CoreV1().ServiceAccounts(serviceAccount.Namespace)
|
serviceAccounts := e.client.CoreV1().ServiceAccounts(serviceAccount.Namespace)
|
||||||
liveServiceAccount, err := serviceAccounts.Get(serviceAccount.Name, metav1.GetOptions{})
|
liveServiceAccount, err := serviceAccounts.Get(context.TODO(), serviceAccount.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Retry if we cannot fetch the live service account (for a NotFound error, either the live lookup or our cache are stale)
|
// Retry if we cannot fetch the live service account (for a NotFound error, either the live lookup or our cache are stale)
|
||||||
return true, err
|
return true, err
|
||||||
@ -406,7 +407,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Save the secret
|
// Save the secret
|
||||||
createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(secret)
|
createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(context.TODO(), secret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// if the namespace is being terminated, create will fail no matter what
|
// if the namespace is being terminated, create will fail no matter what
|
||||||
if apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
if apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||||
@ -427,7 +428,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
|
|||||||
|
|
||||||
// fetch the live service account if needed, and verify the UID matches and that we still need a token
|
// fetch the live service account if needed, and verify the UID matches and that we still need a token
|
||||||
if liveServiceAccount == nil {
|
if liveServiceAccount == nil {
|
||||||
liveServiceAccount, err = serviceAccounts.Get(serviceAccount.Name, metav1.GetOptions{})
|
liveServiceAccount, err = serviceAccounts.Get(context.TODO(), serviceAccount.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -448,7 +449,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
|
|||||||
|
|
||||||
// Try to add a reference to the token
|
// Try to add a reference to the token
|
||||||
liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, v1.ObjectReference{Name: secret.Name})
|
liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, v1.ObjectReference{Name: secret.Name})
|
||||||
if _, err := serviceAccounts.Update(liveServiceAccount); err != nil {
|
if _, err := serviceAccounts.Update(context.TODO(), liveServiceAccount); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -460,7 +461,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
|
|||||||
// we weren't able to use the token, try to clean it up.
|
// we weren't able to use the token, try to clean it up.
|
||||||
klog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
|
klog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
|
||||||
deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}}
|
deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}}
|
||||||
if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil {
|
if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(context.TODO(), createdToken.Name, deleteOpts); deleteErr != nil {
|
||||||
klog.Error(deleteErr) // if we fail, just log it
|
klog.Error(deleteErr) // if we fail, just log it
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -519,7 +520,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou
|
|||||||
// We don't want to update the cache's copy of the secret
|
// We don't want to update the cache's copy of the secret
|
||||||
// so add the token to a freshly retrieved copy of the secret
|
// so add the token to a freshly retrieved copy of the secret
|
||||||
secrets := e.client.CoreV1().Secrets(cachedSecret.Namespace)
|
secrets := e.client.CoreV1().Secrets(cachedSecret.Namespace)
|
||||||
liveSecret, err := secrets.Get(cachedSecret.Name, metav1.GetOptions{})
|
liveSecret, err := secrets.Get(context.TODO(), cachedSecret.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Retry for any error other than a NotFound
|
// Retry for any error other than a NotFound
|
||||||
return !apierrors.IsNotFound(err), err
|
return !apierrors.IsNotFound(err), err
|
||||||
@ -566,7 +567,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou
|
|||||||
liveSecret.Annotations[v1.ServiceAccountUIDKey] = string(serviceAccount.UID)
|
liveSecret.Annotations[v1.ServiceAccountUIDKey] = string(serviceAccount.UID)
|
||||||
|
|
||||||
// Save the secret
|
// Save the secret
|
||||||
_, err = secrets.Update(liveSecret)
|
_, err = secrets.Update(context.TODO(), liveSecret)
|
||||||
if apierrors.IsConflict(err) || apierrors.IsNotFound(err) {
|
if apierrors.IsConflict(err) || apierrors.IsNotFound(err) {
|
||||||
// if we got a Conflict error, the secret was updated by someone else, and we'll get an update notification later
|
// if we got a Conflict error, the secret was updated by someone else, and we'll get an update notification later
|
||||||
// if we got a NotFound error, the secret no longer exists, and we don't need to populate a token
|
// if we got a NotFound error, the secret no longer exists, and we don't need to populate a token
|
||||||
@ -583,7 +584,7 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri
|
|||||||
// We don't want to update the cache's copy of the service account
|
// We don't want to update the cache's copy of the service account
|
||||||
// so remove the secret from a freshly retrieved copy of the service account
|
// so remove the secret from a freshly retrieved copy of the service account
|
||||||
serviceAccounts := e.client.CoreV1().ServiceAccounts(saNamespace)
|
serviceAccounts := e.client.CoreV1().ServiceAccounts(saNamespace)
|
||||||
serviceAccount, err := serviceAccounts.Get(saName, metav1.GetOptions{})
|
serviceAccount, err := serviceAccounts.Get(context.TODO(), saName, metav1.GetOptions{})
|
||||||
// Ignore NotFound errors when attempting to remove a reference
|
// Ignore NotFound errors when attempting to remove a reference
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
@ -610,7 +611,7 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
serviceAccount.Secrets = secrets
|
serviceAccount.Secrets = secrets
|
||||||
_, err = serviceAccounts.Update(serviceAccount)
|
_, err = serviceAccounts.Update(context.TODO(), serviceAccount)
|
||||||
// Ignore NotFound errors when attempting to remove a reference
|
// Ignore NotFound errors when attempting to remove a reference
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
@ -636,7 +637,7 @@ func (e *TokensController) getServiceAccount(ns string, name string, uid types.U
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Live lookup
|
// Live lookup
|
||||||
sa, err = e.client.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{})
|
sa, err = e.client.CoreV1().ServiceAccounts(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@ -672,7 +673,7 @@ func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetc
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Live lookup
|
// Live lookup
|
||||||
secret, err := e.client.CoreV1().Secrets(ns).Get(name, metav1.GetOptions{})
|
secret, err := e.client.CoreV1().Secrets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package statefulset
|
package statefulset
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -77,7 +78,7 @@ func (spc *realStatefulPodControl) CreateStatefulPod(set *apps.StatefulSet, pod
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// If we created the PVCs attempt to create the Pod
|
// If we created the PVCs attempt to create the Pod
|
||||||
_, err := spc.client.CoreV1().Pods(set.Namespace).Create(pod)
|
_, err := spc.client.CoreV1().Pods(set.Namespace).Create(context.TODO(), pod)
|
||||||
// sink already exists errors
|
// sink already exists errors
|
||||||
if apierrors.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
return err
|
return err
|
||||||
@ -113,7 +114,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod
|
|||||||
|
|
||||||
attemptedUpdate = true
|
attemptedUpdate = true
|
||||||
// commit the update, retrying on conflicts
|
// commit the update, retrying on conflicts
|
||||||
_, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(pod)
|
_, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(context.TODO(), pod)
|
||||||
if updateErr == nil {
|
if updateErr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -134,7 +135,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (spc *realStatefulPodControl) DeleteStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error {
|
func (spc *realStatefulPodControl) DeleteStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error {
|
||||||
err := spc.client.CoreV1().Pods(set.Namespace).Delete(pod.Name, nil)
|
err := spc.client.CoreV1().Pods(set.Namespace).Delete(context.TODO(), pod.Name, nil)
|
||||||
spc.recordPodEvent("delete", set, pod, err)
|
spc.recordPodEvent("delete", set, pod, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -182,7 +183,7 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.Statef
|
|||||||
_, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
|
_, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
|
||||||
switch {
|
switch {
|
||||||
case apierrors.IsNotFound(err):
|
case apierrors.IsNotFound(err):
|
||||||
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(&claim)
|
_, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), &claim)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err))
|
errs = append(errs, fmt.Errorf("failed to create PVC %s: %s", claim.Name, err))
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package statefulset
|
package statefulset
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
@ -291,7 +292,7 @@ func (ssc *StatefulSetController) getPodsForStatefulSet(set *apps.StatefulSet, s
|
|||||||
// If any adoptions are attempted, we should first recheck for deletion with
|
// If any adoptions are attempted, we should first recheck for deletion with
|
||||||
// an uncached quorum read sometime after listing Pods (see #42639).
|
// an uncached quorum read sometime after listing Pods (see #42639).
|
||||||
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||||
fresh, err := ssc.kubeClient.AppsV1().StatefulSets(set.Namespace).Get(set.Name, metav1.GetOptions{})
|
fresh, err := ssc.kubeClient.AppsV1().StatefulSets(set.Namespace).Get(context.TODO(), set.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -318,7 +319,7 @@ func (ssc *StatefulSetController) adoptOrphanRevisions(set *apps.StatefulSet) er
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(orphanRevisions) > 0 {
|
if len(orphanRevisions) > 0 {
|
||||||
fresh, err := ssc.kubeClient.AppsV1().StatefulSets(set.Namespace).Get(set.Name, metav1.GetOptions{})
|
fresh, err := ssc.kubeClient.AppsV1().StatefulSets(set.Namespace).Get(context.TODO(), set.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package statefulset
|
package statefulset
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
@ -53,7 +54,7 @@ func (ssu *realStatefulSetStatusUpdater) UpdateStatefulSetStatus(
|
|||||||
// don't wait due to limited number of clients, but backoff after the default number of steps
|
// don't wait due to limited number of clients, but backoff after the default number of steps
|
||||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||||
set.Status = *status
|
set.Status = *status
|
||||||
_, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(set)
|
_, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(context.TODO(), set)
|
||||||
if updateErr == nil {
|
if updateErr == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,7 @@ limitations under the License.
|
|||||||
package ttl
|
package ttl
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -263,7 +264,7 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = ttlc.kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes)
|
_, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err)
|
klog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err)
|
||||||
return err
|
return err
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package ttlafterfinished
|
package ttlafterfinished
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -214,7 +215,7 @@ func (tc *Controller) processJob(key string) error {
|
|||||||
// Before deleting the Job, do a final sanity check.
|
// Before deleting the Job, do a final sanity check.
|
||||||
// If TTL is modified before we do this check, we cannot be sure if the TTL truly expires.
|
// If TTL is modified before we do this check, we cannot be sure if the TTL truly expires.
|
||||||
// The latest Job may have a different UID, but it's fine because the checks will be run again.
|
// The latest Job may have a different UID, but it's fine because the checks will be run again.
|
||||||
fresh, err := tc.client.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{})
|
fresh, err := tc.client.BatchV1().Jobs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -234,7 +235,7 @@ func (tc *Controller) processJob(key string) error {
|
|||||||
Preconditions: &metav1.Preconditions{UID: &fresh.UID},
|
Preconditions: &metav1.Preconditions{UID: &fresh.UID},
|
||||||
}
|
}
|
||||||
klog.V(4).Infof("Cleaning up Job %s/%s", namespace, name)
|
klog.V(4).Infof("Cleaning up Job %s/%s", namespace, name)
|
||||||
return tc.client.BatchV1().Jobs(fresh.Namespace).Delete(fresh.Name, options)
|
return tc.client.BatchV1().Jobs(fresh.Namespace).Delete(context.TODO(), fresh.Name, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// processTTL checks whether a given Job's TTL has expired, and add it to the queue after the TTL is expected to expire
|
// processTTL checks whether a given Job's TTL has expired, and add it to the queue after the TTL is expected to expire
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package node
|
package node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -79,7 +80,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record.
|
|||||||
|
|
||||||
klog.V(2).Infof("Starting deletion of pod %v/%v", pod.Namespace, pod.Name)
|
klog.V(2).Infof("Starting deletion of pod %v/%v", pod.Namespace, pod.Name)
|
||||||
recorder.Eventf(pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
|
recorder.Eventf(pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
|
||||||
if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
|
if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, nil); err != nil {
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
// NotFound error means that pod was already deleted.
|
// NotFound error means that pod was already deleted.
|
||||||
// There is nothing left to do with this pod.
|
// There is nothing left to do with this pod.
|
||||||
@ -109,7 +110,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
|
|||||||
|
|
||||||
var updatedPod *v1.Pod
|
var updatedPod *v1.Pod
|
||||||
var err error
|
var err error
|
||||||
if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod); err != nil {
|
if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return updatedPod, nil
|
return updatedPod, nil
|
||||||
@ -136,7 +137,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
|
klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
|
||||||
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
|
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
// NotFound error means that pod was already deleted.
|
// NotFound error means that pod was already deleted.
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package attachdetach
|
package attachdetach
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -160,7 +161,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
|
|
||||||
pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{})
|
pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
||||||
}
|
}
|
||||||
@ -170,7 +171,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
podInformer.GetIndexer().Add(&podToAdd)
|
podInformer.GetIndexer().Add(&podToAdd)
|
||||||
podsNum++
|
podsNum++
|
||||||
}
|
}
|
||||||
nodes, err := fakeKubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
|
nodes, err := fakeKubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
||||||
}
|
}
|
||||||
@ -180,7 +181,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
nodesNum++
|
nodesNum++
|
||||||
}
|
}
|
||||||
|
|
||||||
csiNodes, err := fakeKubeClient.StorageV1().CSINodes().List(metav1.ListOptions{})
|
csiNodes, err := fakeKubeClient.StorageV1().CSINodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
||||||
}
|
}
|
||||||
@ -269,7 +270,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
|
|
||||||
for _, newPod := range extraPods1 {
|
for _, newPod := range extraPods1 {
|
||||||
// Add a new pod between ASW and DSW ppoulators
|
// Add a new pod between ASW and DSW ppoulators
|
||||||
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(newPod)
|
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
||||||
}
|
}
|
||||||
@ -286,7 +287,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
|
|
||||||
for _, newPod := range extraPods2 {
|
for _, newPod := range extraPods2 {
|
||||||
// Add a new pod between DSW ppoulator and reconciler run
|
// Add a new pod between DSW ppoulator and reconciler run
|
||||||
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(newPod)
|
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
@ -202,7 +203,7 @@ func (resizeMap *volumeResizeMap) UpdatePVSize(pvcr *PVCWithResizeRequest, newSi
|
|||||||
return fmt.Errorf("Error Creating two way merge patch for PV %q with error : %v", pvClone.Name, err)
|
return fmt.Errorf("Error Creating two way merge patch for PV %q with error : %v", pvClone.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(pvClone.Name, commontypes.StrategicMergePatchType, patchBytes)
|
_, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, commontypes.StrategicMergePatchType, patchBytes)
|
||||||
|
|
||||||
if updateErr != nil {
|
if updateErr != nil {
|
||||||
klog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr)
|
klog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr)
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package persistentvolume
|
package persistentvolume
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -559,7 +560,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
|
|||||||
}
|
}
|
||||||
found = !apierrors.IsNotFound(err)
|
found = !apierrors.IsNotFound(err)
|
||||||
if !found {
|
if !found {
|
||||||
obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(volume.Spec.ClaimRef.Name, metav1.GetOptions{})
|
obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(context.TODO(), volume.Spec.ClaimRef.Name, metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -753,7 +754,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo
|
|||||||
return claim, nil
|
return claim, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone)
|
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(context.TODO(), claimClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err)
|
klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err)
|
||||||
return newClaim, err
|
return newClaim, err
|
||||||
@ -809,7 +810,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV
|
|||||||
volumeClone.Status.Phase = phase
|
volumeClone.Status.Phase = phase
|
||||||
volumeClone.Status.Message = message
|
volumeClone.Status.Message = message
|
||||||
|
|
||||||
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(volumeClone)
|
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), volumeClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err)
|
klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err)
|
||||||
return newVol, err
|
return newVol, err
|
||||||
@ -871,7 +872,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV
|
|||||||
func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, updateCache bool) (*v1.PersistentVolume, error) {
|
func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, updateCache bool) (*v1.PersistentVolume, error) {
|
||||||
claimKey := claimrefToClaimKey(volumeClone.Spec.ClaimRef)
|
claimKey := claimrefToClaimKey(volumeClone.Spec.ClaimRef)
|
||||||
klog.V(2).Infof("claim %q bound to volume %q", claimKey, volumeClone.Name)
|
klog.V(2).Infof("claim %q bound to volume %q", claimKey, volumeClone.Name)
|
||||||
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone)
|
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimKey, err)
|
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimKey, err)
|
||||||
return newVol, err
|
return newVol, err
|
||||||
@ -923,7 +924,7 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo
|
|||||||
|
|
||||||
if dirty {
|
if dirty {
|
||||||
klog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim))
|
klog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim))
|
||||||
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
|
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err)
|
klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err)
|
||||||
return newClaim, err
|
return newClaim, err
|
||||||
@ -1010,7 +1011,7 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume
|
|||||||
volumeClone.Spec.ClaimRef.UID = ""
|
volumeClone.Spec.ClaimRef.UID = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone)
|
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err)
|
klog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err)
|
||||||
return err
|
return err
|
||||||
@ -1081,7 +1082,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis
|
|||||||
// This method may have been waiting for a volume lock for some time.
|
// This method may have been waiting for a volume lock for some time.
|
||||||
// Previous recycleVolumeOperation might just have saved an updated version,
|
// Previous recycleVolumeOperation might just have saved an updated version,
|
||||||
// so read current volume state now.
|
// so read current volume state now.
|
||||||
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
|
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), volume.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
|
klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
|
||||||
return
|
return
|
||||||
@ -1177,7 +1178,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist
|
|||||||
// This method may have been waiting for a volume lock for some time.
|
// This method may have been waiting for a volume lock for some time.
|
||||||
// Previous deleteVolumeOperation might just have saved an updated version, so
|
// Previous deleteVolumeOperation might just have saved an updated version, so
|
||||||
// read current volume state now.
|
// read current volume state now.
|
||||||
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
|
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), volume.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
|
klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
|
||||||
return "", nil
|
return "", nil
|
||||||
@ -1221,7 +1222,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist
|
|||||||
|
|
||||||
klog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name)
|
klog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name)
|
||||||
// Delete the volume
|
// Delete the volume
|
||||||
if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(volume.Name, nil); err != nil {
|
if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(context.TODO(), volume.Name, nil); err != nil {
|
||||||
// Oops, could not delete the volume and therefore the controller will
|
// Oops, could not delete the volume and therefore the controller will
|
||||||
// try to delete the volume again on next update. We _could_ maintain a
|
// try to delete the volume again on next update. We _could_ maintain a
|
||||||
// cache of "recently deleted volumes" and avoid unnecessary deletion,
|
// cache of "recently deleted volumes" and avoid unnecessary deletion,
|
||||||
@ -1415,7 +1416,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
|
|||||||
// yet.
|
// yet.
|
||||||
|
|
||||||
pvName := ctrl.getProvisionedVolumeNameForClaim(claim)
|
pvName := ctrl.getProvisionedVolumeNameForClaim(claim)
|
||||||
volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
klog.V(3).Infof("error reading persistent volume %q: %v", pvName, err)
|
klog.V(3).Infof("error reading persistent volume %q: %v", pvName, err)
|
||||||
return pluginName, err
|
return pluginName, err
|
||||||
@ -1514,7 +1515,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
|
|||||||
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
|
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
|
||||||
klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name)
|
klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name)
|
||||||
var newVol *v1.PersistentVolume
|
var newVol *v1.PersistentVolume
|
||||||
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrors.IsAlreadyExists(err) {
|
if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), volume); err == nil || apierrors.IsAlreadyExists(err) {
|
||||||
// Save succeeded.
|
// Save succeeded.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim))
|
klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim))
|
||||||
@ -1630,7 +1631,7 @@ func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.Persist
|
|||||||
newClaim := claim.DeepCopy()
|
newClaim := claim.DeepCopy()
|
||||||
delete(newClaim.Annotations, pvutil.AnnSelectedNode)
|
delete(newClaim.Annotations, pvutil.AnnSelectedNode)
|
||||||
// Try to update the PVC object
|
// Try to update the PVC object
|
||||||
if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(newClaim); err != nil {
|
if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(context.TODO(), newClaim); err != nil {
|
||||||
klog.V(4).Infof("Failed to delete annotation 'pvutil.AnnSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err)
|
klog.V(4).Infof("Failed to delete annotation 'pvutil.AnnSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package persistentvolume
|
package persistentvolume
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
@ -321,7 +322,7 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(claim *v
|
|||||||
if !modified {
|
if !modified {
|
||||||
return claimClone, nil
|
return claimClone, nil
|
||||||
}
|
}
|
||||||
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone)
|
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
|
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
|
||||||
}
|
}
|
||||||
@ -338,7 +339,7 @@ func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotations(volume
|
|||||||
if !modified {
|
if !modified {
|
||||||
return volumeClone, nil
|
return volumeClone, nil
|
||||||
}
|
}
|
||||||
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone)
|
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
|
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
|
||||||
}
|
}
|
||||||
@ -545,7 +546,7 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent
|
|||||||
claimClone := claim.DeepCopy()
|
claimClone := claim.DeepCopy()
|
||||||
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, pvutil.AnnStorageProvisioner, provisionerName)
|
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, pvutil.AnnStorageProvisioner, provisionerName)
|
||||||
updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, pvutil.AnnStorageProvisioner)
|
updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, pvutil.AnnStorageProvisioner)
|
||||||
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
|
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return newClaim, err
|
return newClaim, err
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package pvcprotection
|
package pvcprotection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -188,7 +189,7 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
|||||||
}
|
}
|
||||||
claimClone := pvc.DeepCopy()
|
claimClone := pvc.DeepCopy()
|
||||||
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
|
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
|
||||||
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone)
|
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
klog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
||||||
return err
|
return err
|
||||||
@ -200,7 +201,7 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
|||||||
func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
||||||
claimClone := pvc.DeepCopy()
|
claimClone := pvc.DeepCopy()
|
||||||
claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)
|
claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)
|
||||||
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone)
|
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
klog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
||||||
return err
|
return err
|
||||||
@ -248,7 +249,7 @@ func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
|||||||
func (c *Controller) askAPIServer(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
func (c *Controller) askAPIServer(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||||
klog.V(4).Infof("Looking for Pods using PVC %s/%s with a live list", pvc.Namespace, pvc.Name)
|
klog.V(4).Infof("Looking for Pods using PVC %s/%s with a live list", pvc.Namespace, pvc.Name)
|
||||||
|
|
||||||
podsList, err := c.client.CoreV1().Pods(pvc.Namespace).List(metav1.ListOptions{})
|
podsList, err := c.client.CoreV1().Pods(pvc.Namespace).List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("live list of pods failed: %s", err.Error())
|
return false, fmt.Errorf("live list of pods failed: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package pvprotection
|
package pvprotection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -161,7 +162,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
|
|||||||
}
|
}
|
||||||
pvClone := pv.DeepCopy()
|
pvClone := pv.DeepCopy()
|
||||||
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
|
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
|
||||||
_, err := c.client.CoreV1().PersistentVolumes().Update(pvClone)
|
_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err)
|
klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err)
|
||||||
return err
|
return err
|
||||||
@ -173,7 +174,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
|
|||||||
func (c *Controller) removeFinalizer(pv *v1.PersistentVolume) error {
|
func (c *Controller) removeFinalizer(pv *v1.PersistentVolume) error {
|
||||||
pvClone := pv.DeepCopy()
|
pvClone := pv.DeepCopy()
|
||||||
pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)
|
pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)
|
||||||
_, err := c.client.CoreV1().PersistentVolumes().Update(pvClone)
|
_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err)
|
klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err)
|
||||||
return err
|
return err
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package scheduling
|
package scheduling
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
@ -423,7 +424,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
|
|||||||
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
||||||
claimKey := claimToClaimKey(binding.pvc)
|
claimKey := claimToClaimKey(binding.pvc)
|
||||||
klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name)
|
klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name)
|
||||||
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(binding.pv)
|
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err)
|
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err)
|
||||||
return err
|
return err
|
||||||
@ -438,7 +439,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
|
|||||||
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
||||||
for i, claim = range claimsToProvision {
|
for i, claim = range claimsToProvision {
|
||||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
||||||
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim)
|
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -295,7 +295,7 @@ func (env *testEnv) initVolumes(cachedPVs []*v1.PersistentVolume, apiPVs []*v1.P
|
|||||||
|
|
||||||
func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, waitCache bool) {
|
func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, waitCache bool) {
|
||||||
for _, pv := range pvs {
|
for _, pv := range pvs {
|
||||||
if _, err := env.client.CoreV1().PersistentVolumes().Update(pv); err != nil {
|
if _, err := env.client.CoreV1().PersistentVolumes().Update(context.TODO(), pv); err != nil {
|
||||||
t.Fatalf("failed to update PV %q", pv.Name)
|
t.Fatalf("failed to update PV %q", pv.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -321,7 +321,7 @@ func (env *testEnv) updateVolumes(t *testing.T, pvs []*v1.PersistentVolume, wait
|
|||||||
|
|
||||||
func (env *testEnv) updateClaims(t *testing.T, pvcs []*v1.PersistentVolumeClaim, waitCache bool) {
|
func (env *testEnv) updateClaims(t *testing.T, pvcs []*v1.PersistentVolumeClaim, waitCache bool) {
|
||||||
for _, pvc := range pvcs {
|
for _, pvc := range pvcs {
|
||||||
if _, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(pvc); err != nil {
|
if _, err := env.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), pvc); err != nil {
|
||||||
t.Fatalf("failed to update PVC %q", getPVCName(pvc))
|
t.Fatalf("failed to update PVC %q", getPVCName(pvc))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1769,7 +1769,7 @@ func TestBindPodVolumes(t *testing.T) {
|
|||||||
newPVC := pvc.DeepCopy()
|
newPVC := pvc.DeepCopy()
|
||||||
newPVC.Spec.VolumeName = pv.Name
|
newPVC.Spec.VolumeName = pv.Name
|
||||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
||||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(newPVC); err != nil {
|
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil {
|
||||||
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1780,20 +1780,20 @@ func TestBindPodVolumes(t *testing.T) {
|
|||||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||||
pvc := pvcs[0]
|
pvc := pvcs[0]
|
||||||
// Update PVC to be fully bound to PV
|
// Update PVC to be fully bound to PV
|
||||||
newPVC, err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
newPVC, err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to get PVC %q: %v", pvc.Name, err)
|
t.Errorf("failed to get PVC %q: %v", pvc.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dynamicPV := makeTestPV("dynamic-pv", "node1", "1G", "1", newPVC, waitClass)
|
dynamicPV := makeTestPV("dynamic-pv", "node1", "1G", "1", newPVC, waitClass)
|
||||||
dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(dynamicPV)
|
dynamicPV, err = testEnv.client.CoreV1().PersistentVolumes().Create(context.TODO(), dynamicPV)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to create PV %q: %v", dynamicPV.Name, err)
|
t.Errorf("failed to create PV %q: %v", dynamicPV.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
newPVC.Spec.VolumeName = dynamicPV.Name
|
newPVC.Spec.VolumeName = dynamicPV.Name
|
||||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
||||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(newPVC); err != nil {
|
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil {
|
||||||
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1846,7 +1846,7 @@ func TestBindPodVolumes(t *testing.T) {
|
|||||||
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pvs []*v1.PersistentVolume, pvcs []*v1.PersistentVolumeClaim) {
|
||||||
pvc := pvcs[0]
|
pvc := pvcs[0]
|
||||||
// Delete PVC will fail check
|
// Delete PVC will fail check
|
||||||
if err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, &metav1.DeleteOptions{}); err != nil {
|
if err := testEnv.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, &metav1.DeleteOptions{}); err != nil {
|
||||||
t.Errorf("failed to delete PVC %q: %v", pvc.Name, err)
|
t.Errorf("failed to delete PVC %q: %v", pvc.Name, err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1869,7 +1869,7 @@ func TestBindPodVolumes(t *testing.T) {
|
|||||||
newPVC := pvcs[0].DeepCopy()
|
newPVC := pvcs[0].DeepCopy()
|
||||||
newPVC.Spec.VolumeName = pvNode2.Name
|
newPVC.Spec.VolumeName = pvNode2.Name
|
||||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, pvutil.AnnBindCompleted, "yes")
|
||||||
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(newPVC); err != nil {
|
if _, err := testEnv.client.CoreV1().PersistentVolumeClaims(newPVC.Namespace).Update(context.TODO(), newPVC); err != nil {
|
||||||
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
t.Errorf("failed to update PVC %q: %v", newPVC.Name, err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1904,13 +1904,13 @@ func TestBindPodVolumes(t *testing.T) {
|
|||||||
|
|
||||||
// Before Execute
|
// Before Execute
|
||||||
if scenario.apiPV != nil {
|
if scenario.apiPV != nil {
|
||||||
_, err := testEnv.client.CoreV1().PersistentVolumes().Update(scenario.apiPV)
|
_, err := testEnv.client.CoreV1().PersistentVolumes().Update(context.TODO(), scenario.apiPV)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to update PV %q", scenario.apiPV.Name)
|
t.Fatalf("failed to update PV %q", scenario.apiPV.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if scenario.apiPVC != nil {
|
if scenario.apiPVC != nil {
|
||||||
_, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(scenario.apiPVC)
|
_, err := testEnv.client.CoreV1().PersistentVolumeClaims(scenario.apiPVC.Namespace).Update(context.TODO(), scenario.apiPVC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to update PVC %q", getPVCName(scenario.apiPVC))
|
t.Fatalf("failed to update PVC %q", getPVCName(scenario.apiPVC))
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package auth
|
package auth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -222,7 +223,7 @@ func (o *CanIOptions) RunAccessList() error {
|
|||||||
Namespace: o.Namespace,
|
Namespace: o.Namespace,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
response, err := o.AuthClient.SelfSubjectRulesReviews().Create(sar)
|
response, err := o.AuthClient.SelfSubjectRulesReviews().Create(context.TODO(), sar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -257,7 +258,7 @@ func (o *CanIOptions) RunAccessCheck() (bool, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := o.AuthClient.SelfSubjectAccessReviews().Create(sar)
|
response, err := o.AuthClient.SelfSubjectAccessReviews().Create(context.TODO(), sar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package configmap
|
package configmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -61,7 +62,7 @@ func NewSimpleConfigMapManager(kubeClient clientset.Interface) Manager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *simpleConfigMapManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) {
|
func (s *simpleConfigMapManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) {
|
||||||
return s.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
|
return s.kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *simpleConfigMapManager) RegisterPod(pod *v1.Pod) {
|
func (s *simpleConfigMapManager) RegisterPod(pod *v1.Pod) {
|
||||||
@ -120,7 +121,7 @@ const (
|
|||||||
// value in cache; otherwise it is just fetched from cache
|
// value in cache; otherwise it is just fetched from cache
|
||||||
func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager {
|
func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.GetObjectTTLFunc) Manager {
|
||||||
getConfigMap := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) {
|
getConfigMap := func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) {
|
||||||
return kubeClient.CoreV1().ConfigMaps(namespace).Get(name, opts)
|
return kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, opts)
|
||||||
}
|
}
|
||||||
configMapStore := manager.NewObjectStore(getConfigMap, clock.RealClock{}, getTTL, defaultTTL)
|
configMapStore := manager.NewObjectStore(getConfigMap, clock.RealClock{}, getTTL, defaultTTL)
|
||||||
return &configMapManager{
|
return &configMapManager{
|
||||||
@ -136,10 +137,10 @@ func NewCachingConfigMapManager(kubeClient clientset.Interface, getTTL manager.G
|
|||||||
// - every GetObject() returns a value from local cache propagated via watches
|
// - every GetObject() returns a value from local cache propagated via watches
|
||||||
func NewWatchingConfigMapManager(kubeClient clientset.Interface) Manager {
|
func NewWatchingConfigMapManager(kubeClient clientset.Interface) Manager {
|
||||||
listConfigMap := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) {
|
listConfigMap := func(namespace string, opts metav1.ListOptions) (runtime.Object, error) {
|
||||||
return kubeClient.CoreV1().ConfigMaps(namespace).List(opts)
|
return kubeClient.CoreV1().ConfigMaps(namespace).List(context.TODO(), opts)
|
||||||
}
|
}
|
||||||
watchConfigMap := func(namespace string, opts metav1.ListOptions) (watch.Interface, error) {
|
watchConfigMap := func(namespace string, opts metav1.ListOptions) (watch.Interface, error) {
|
||||||
return kubeClient.CoreV1().ConfigMaps(namespace).Watch(opts)
|
return kubeClient.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), opts)
|
||||||
}
|
}
|
||||||
newConfigMap := func() runtime.Object {
|
newConfigMap := func() runtime.Object {
|
||||||
return &v1.ConfigMap{}
|
return &v1.ConfigMap{}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package configmap
|
package configmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@ -49,7 +50,7 @@ func noObjectTTL() (time.Duration, bool) {
|
|||||||
|
|
||||||
func getConfigMap(fakeClient clientset.Interface) manager.GetObjectFunc {
|
func getConfigMap(fakeClient clientset.Interface) manager.GetObjectFunc {
|
||||||
return func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) {
|
return func(namespace, name string, opts metav1.GetOptions) (runtime.Object, error) {
|
||||||
return fakeClient.CoreV1().ConfigMaps(namespace).Get(name, opts)
|
return fakeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, opts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ func (kl *Kubelet) registerWithAPIServer() {
|
|||||||
// value of the annotation for controller-managed attach-detach of attachable
|
// value of the annotation for controller-managed attach-detach of attachable
|
||||||
// persistent volumes for the node.
|
// persistent volumes for the node.
|
||||||
func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
|
func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
|
||||||
_, err := kl.kubeClient.CoreV1().Nodes().Create(node)
|
_, err := kl.kubeClient.CoreV1().Nodes().Create(context.TODO(), node)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -93,7 +93,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(string(kl.nodeName), metav1.GetOptions{})
|
existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err)
|
klog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err)
|
||||||
return false
|
return false
|
||||||
@ -420,7 +420,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
|
|||||||
if tryNumber == 0 {
|
if tryNumber == 0 {
|
||||||
util.FromApiserverCache(&opts)
|
util.FromApiserverCache(&opts)
|
||||||
}
|
}
|
||||||
node, err := kl.heartbeatClient.CoreV1().Nodes().Get(string(kl.nodeName), opts)
|
node, err := kl.heartbeatClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
|
return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package kubelet
|
package kubelet
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
@ -678,7 +679,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
|||||||
require.True(t, actions[1].Matches("patch", "nodes"))
|
require.True(t, actions[1].Matches("patch", "nodes"))
|
||||||
require.Equal(t, actions[1].GetSubresource(), "status")
|
require.Equal(t, actions[1].GetSubresource(), "status")
|
||||||
|
|
||||||
updatedNode, err := kubeClient.CoreV1().Nodes().Get(testKubeletHostname, metav1.GetOptions{})
|
updatedNode, err := kubeClient.CoreV1().Nodes().Get(context.TODO(), testKubeletHostname, metav1.GetOptions{})
|
||||||
require.NoError(t, err, "can't apply node status patch")
|
require.NoError(t, err, "can't apply node status patch")
|
||||||
|
|
||||||
for i, cond := range updatedNode.Status.Conditions {
|
for i, cond := range updatedNode.Status.Conditions {
|
||||||
@ -2263,7 +2264,7 @@ func TestUpdateNodeAddresses(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := kubeClient.CoreV1().Nodes().Update(oldNode)
|
_, err := kubeClient.CoreV1().Nodes().Update(context.TODO(), oldNode)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
|
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
|
||||||
func(node *v1.Node) error {
|
func(node *v1.Node) error {
|
||||||
|
@ -1779,13 +1779,13 @@ func hasHostNamespace(pod *v1.Pod) bool {
|
|||||||
func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
|
func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
|
||||||
for _, volume := range pod.Spec.Volumes {
|
for _, volume := range pod.Spec.Volumes {
|
||||||
if volume.PersistentVolumeClaim != nil {
|
if volume.PersistentVolumeClaim != nil {
|
||||||
pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{})
|
pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err)
|
klog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if pvc != nil {
|
if pvc != nil {
|
||||||
referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
|
referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err)
|
klog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err)
|
||||||
continue
|
continue
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package checkpoint
|
package checkpoint
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
"time"
|
||||||
@ -191,7 +192,7 @@ func (r *remoteConfigMap) Download(client clientset.Interface, store cache.Store
|
|||||||
// if we didn't find the ConfigMap in the in-memory store, download it from the API server
|
// if we didn't find the ConfigMap in the in-memory store, download it from the API server
|
||||||
if cm == nil {
|
if cm == nil {
|
||||||
utillog.Infof("attempting to download %s", r.APIPath())
|
utillog.Infof("attempting to download %s", r.APIPath())
|
||||||
cm, err = client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(r.source.ConfigMap.Name, metav1.GetOptions{})
|
cm, err = client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(context.TODO(), r.source.ConfigMap.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.DownloadError, fmt.Errorf("%s, error: %v", status.DownloadError, err)
|
return nil, status.DownloadError, fmt.Errorf("%s, error: %v", status.DownloadError, err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package kubeletconfig
|
package kubeletconfig
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
@ -198,7 +199,7 @@ func restartForNewConfig(eventClient v1core.EventsGetter, nodeName string, sourc
|
|||||||
// because the event recorder won't flush its queue before we exit (we'd lose the event)
|
// because the event recorder won't flush its queue before we exit (we'd lose the event)
|
||||||
event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message)
|
event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message)
|
||||||
klog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
|
klog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
|
||||||
if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(event); err != nil {
|
if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(context.TODO(), event); err != nil {
|
||||||
utillog.Errorf("failed to send event, error: %v", err)
|
utillog.Errorf("failed to send event, error: %v", err)
|
||||||
}
|
}
|
||||||
utillog.Infof(message)
|
utillog.Infof(message)
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package status
|
package status
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -158,7 +159,7 @@ func (s *nodeConfigStatus) Sync(client clientset.Interface, nodeName string) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// get the Node so we can check the current status
|
// get the Node so we can check the current status
|
||||||
oldNode, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
oldNode, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("could not get Node %q, will not sync status, error: %v", nodeName, err)
|
err = fmt.Errorf("could not get Node %q, will not sync status, error: %v", nodeName, err)
|
||||||
return
|
return
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user