Move from glog to klog
- Move from the old github.com/golang/glog to k8s.io/klog - klog as explicit InitFlags() so we add them as necessary - we update the other repositories that we vendor that made a similar change from glog to klog * github.com/kubernetes/repo-infra * k8s.io/gengo/ * k8s.io/kube-openapi/ * github.com/google/cadvisor - Entirely remove all references to glog - Fix some tests by explicit InitFlags in their init() methods Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
@@ -104,7 +104,6 @@
|
||||
"github.com/cloudflare/cfssl/signer/local",
|
||||
"github.com/davecgh/go-spew/spew",
|
||||
"github.com/evanphx/json-patch",
|
||||
"github.com/golang/glog",
|
||||
"github.com/golang/groupcache/lru",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/robfig/cron",
|
||||
|
@@ -88,8 +88,8 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/golang/groupcache/lru:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -59,8 +59,8 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/gopkg.in/square/go-jose.v2:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"fmt"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -162,10 +162,10 @@ func (e *BootstrapSigner) Run(stopCh <-chan struct{}) {
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Starting workers")
|
||||
klog.V(5).Infof("Starting workers")
|
||||
go wait.Until(e.serviceConfigMapQueue, 0, stopCh)
|
||||
<-stopCh
|
||||
glog.V(1).Infof("Shutting down")
|
||||
klog.V(1).Infof("Shutting down")
|
||||
}
|
||||
|
||||
func (e *BootstrapSigner) pokeConfigMapSync() {
|
||||
@@ -198,7 +198,7 @@ func (e *BootstrapSigner) signConfigMap() {
|
||||
// First capture the config we are signing
|
||||
content, ok := newCM.Data[bootstrapapi.KubeConfigKey]
|
||||
if !ok {
|
||||
glog.V(3).Infof("No %s key in %s/%s ConfigMap", bootstrapapi.KubeConfigKey, origCM.Namespace, origCM.Name)
|
||||
klog.V(3).Infof("No %s key in %s/%s ConfigMap", bootstrapapi.KubeConfigKey, origCM.Namespace, origCM.Name)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -244,7 +244,7 @@ func (e *BootstrapSigner) signConfigMap() {
|
||||
func (e *BootstrapSigner) updateConfigMap(cm *v1.ConfigMap) {
|
||||
_, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(cm)
|
||||
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
|
||||
glog.V(3).Infof("Error updating ConfigMap: %v", err)
|
||||
klog.V(3).Infof("Error updating ConfigMap: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,7 +295,7 @@ func (e *BootstrapSigner) getTokens() map[string]string {
|
||||
if _, ok := ret[tokenID]; ok {
|
||||
// This should never happen as we ensure a consistent secret name.
|
||||
// But leave this in here just in case.
|
||||
glog.V(1).Infof("Duplicate bootstrap tokens found for id %s, ignoring on in %s/%s", tokenID, secret.Namespace, secret.Name)
|
||||
klog.V(1).Infof("Duplicate bootstrap tokens found for id %s, ignoring on in %s/%s", tokenID, secret.Namespace, secret.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -32,6 +31,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
|
||||
"k8s.io/klog"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
@@ -113,8 +113,8 @@ func (tc *TokenCleaner) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer tc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting token cleaner controller")
|
||||
defer glog.Infof("Shutting down token cleaner controller")
|
||||
klog.Infof("Starting token cleaner controller")
|
||||
defer klog.Infof("Shutting down token cleaner controller")
|
||||
|
||||
if !controller.WaitForCacheSync("token_cleaner", stopCh, tc.secretSynced) {
|
||||
return
|
||||
@@ -161,7 +161,7 @@ func (tc *TokenCleaner) processNextWorkItem() bool {
|
||||
func (tc *TokenCleaner) syncFunc(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -171,7 +171,7 @@ func (tc *TokenCleaner) syncFunc(key string) error {
|
||||
|
||||
ret, err := tc.secretLister.Secrets(namespace).Get(name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.V(3).Infof("secret has been deleted: %v", key)
|
||||
klog.V(3).Infof("secret has been deleted: %v", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -188,7 +188,7 @@ func (tc *TokenCleaner) syncFunc(key string) error {
|
||||
func (tc *TokenCleaner) evalSecret(o interface{}) {
|
||||
secret := o.(*v1.Secret)
|
||||
if isSecretExpired(secret) {
|
||||
glog.V(3).Infof("Deleting expired secret %s/%s", secret.Namespace, secret.Name)
|
||||
klog.V(3).Infof("Deleting expired secret %s/%s", secret.Namespace, secret.Name)
|
||||
var options *metav1.DeleteOptions
|
||||
if len(secret.UID) > 0 {
|
||||
options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}}
|
||||
@@ -197,7 +197,7 @@ func (tc *TokenCleaner) evalSecret(o interface{}) {
|
||||
// NotFound isn't a real error (it's already been deleted)
|
||||
// Conflict isn't a real error (the UID precondition failed)
|
||||
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
|
||||
glog.V(3).Infof("Error deleting Secret: %v", err)
|
||||
klog.V(3).Infof("Error deleting Secret: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
|
||||
@@ -52,24 +52,24 @@ func parseSecretName(name string) (secretID string, ok bool) {
|
||||
func validateSecretForSigning(secret *v1.Secret) (tokenID, tokenSecret string, ok bool) {
|
||||
nameTokenID, ok := parseSecretName(secret.Name)
|
||||
if !ok {
|
||||
glog.V(3).Infof("Invalid secret name: %s. Must be of form %s<secret-id>.", secret.Name, bootstrapapi.BootstrapTokenSecretPrefix)
|
||||
klog.V(3).Infof("Invalid secret name: %s. Must be of form %s<secret-id>.", secret.Name, bootstrapapi.BootstrapTokenSecretPrefix)
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
tokenID = getSecretString(secret, bootstrapapi.BootstrapTokenIDKey)
|
||||
if len(tokenID) == 0 {
|
||||
glog.V(3).Infof("No %s key in %s/%s Secret", bootstrapapi.BootstrapTokenIDKey, secret.Namespace, secret.Name)
|
||||
klog.V(3).Infof("No %s key in %s/%s Secret", bootstrapapi.BootstrapTokenIDKey, secret.Namespace, secret.Name)
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
if nameTokenID != tokenID {
|
||||
glog.V(3).Infof("Token ID (%s) doesn't match secret name: %s", tokenID, nameTokenID)
|
||||
klog.V(3).Infof("Token ID (%s) doesn't match secret name: %s", tokenID, nameTokenID)
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
tokenSecret = getSecretString(secret, bootstrapapi.BootstrapTokenSecretKey)
|
||||
if len(tokenSecret) == 0 {
|
||||
glog.V(3).Infof("No %s key in %s/%s Secret", bootstrapapi.BootstrapTokenSecretKey, secret.Namespace, secret.Name)
|
||||
klog.V(3).Infof("No %s key in %s/%s Secret", bootstrapapi.BootstrapTokenSecretKey, secret.Namespace, secret.Name)
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
@@ -95,12 +95,12 @@ func isSecretExpired(secret *v1.Secret) bool {
|
||||
if len(expiration) > 0 {
|
||||
expTime, err2 := time.Parse(time.RFC3339, expiration)
|
||||
if err2 != nil {
|
||||
glog.V(3).Infof("Unparseable expiration time (%s) in %s/%s Secret: %v. Treating as expired.",
|
||||
klog.V(3).Infof("Unparseable expiration time (%s) in %s/%s Secret: %v. Treating as expired.",
|
||||
expiration, secret.Namespace, secret.Name, err2)
|
||||
return true
|
||||
}
|
||||
if time.Now().After(expTime) {
|
||||
glog.V(3).Infof("Expired bootstrap token in %s/%s Secret: %v",
|
||||
klog.V(3).Infof("Expired bootstrap token in %s/%s Secret: %v",
|
||||
secret.Namespace, secret.Name, expiration)
|
||||
return true
|
||||
}
|
||||
|
@@ -25,8 +25,8 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/time/rate:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -22,8 +22,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/time/rate"
|
||||
"k8s.io/klog"
|
||||
|
||||
certificates "k8s.io/api/certificates/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -57,7 +57,7 @@ func NewCertificateController(
|
||||
) *CertificateController {
|
||||
// Send events to the apiserver
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
cc := &CertificateController{
|
||||
@@ -74,12 +74,12 @@ func NewCertificateController(
|
||||
csrInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
csr := obj.(*certificates.CertificateSigningRequest)
|
||||
glog.V(4).Infof("Adding certificate request %s", csr.Name)
|
||||
klog.V(4).Infof("Adding certificate request %s", csr.Name)
|
||||
cc.enqueueCertificateRequest(obj)
|
||||
},
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
oldCSR := old.(*certificates.CertificateSigningRequest)
|
||||
glog.V(4).Infof("Updating certificate request %s", oldCSR.Name)
|
||||
klog.V(4).Infof("Updating certificate request %s", oldCSR.Name)
|
||||
cc.enqueueCertificateRequest(new)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
@@ -87,16 +87,16 @@ func NewCertificateController(
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.V(2).Infof("Couldn't get object from tombstone %#v", obj)
|
||||
klog.V(2).Infof("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
csr, ok = tombstone.Obj.(*certificates.CertificateSigningRequest)
|
||||
if !ok {
|
||||
glog.V(2).Infof("Tombstone contained object that is not a CSR: %#v", obj)
|
||||
klog.V(2).Infof("Tombstone contained object that is not a CSR: %#v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Deleting certificate request %s", csr.Name)
|
||||
klog.V(4).Infof("Deleting certificate request %s", csr.Name)
|
||||
cc.enqueueCertificateRequest(obj)
|
||||
},
|
||||
})
|
||||
@@ -110,8 +110,8 @@ func (cc *CertificateController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer cc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting certificate controller")
|
||||
defer glog.Infof("Shutting down certificate controller")
|
||||
klog.Infof("Starting certificate controller")
|
||||
defer klog.Infof("Shutting down certificate controller")
|
||||
|
||||
if !controller.WaitForCacheSync("certificate", stopCh, cc.csrsSynced) {
|
||||
return
|
||||
@@ -143,7 +143,7 @@ func (cc *CertificateController) processNextWorkItem() bool {
|
||||
if _, ignorable := err.(ignorableError); !ignorable {
|
||||
utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", cKey, err))
|
||||
} else {
|
||||
glog.V(4).Infof("Sync %v failed with : %v", cKey, err)
|
||||
klog.V(4).Infof("Sync %v failed with : %v", cKey, err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -169,11 +169,11 @@ func (cc *CertificateController) enqueueCertificateRequest(obj interface{}) {
|
||||
func (cc *CertificateController) syncFunc(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
csr, err := cc.csrLister.Get(key)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(3).Infof("csr has been deleted: %v", key)
|
||||
klog.V(3).Infof("csr has been deleted: %v", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
|
@@ -14,7 +14,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/certificates/v1beta1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -26,7 +26,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
capi "k8s.io/api/certificates/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -78,8 +78,8 @@ func NewCSRCleanerController(
|
||||
func (ccc *CSRCleanerController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting CSR cleaner controller")
|
||||
defer glog.Infof("Shutting down CSR cleaner controller")
|
||||
klog.Infof("Starting CSR cleaner controller")
|
||||
defer klog.Infof("Shutting down CSR cleaner controller")
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(ccc.worker, pollingInterval, stopCh)
|
||||
@@ -92,12 +92,12 @@ func (ccc *CSRCleanerController) Run(workers int, stopCh <-chan struct{}) {
|
||||
func (ccc *CSRCleanerController) worker() {
|
||||
csrs, err := ccc.csrLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to list CSRs: %v", err)
|
||||
klog.Errorf("Unable to list CSRs: %v", err)
|
||||
return
|
||||
}
|
||||
for _, csr := range csrs {
|
||||
if err := ccc.handle(csr); err != nil {
|
||||
glog.Errorf("Error while attempting to clean CSR %q: %v", csr.Name, err)
|
||||
klog.Errorf("Error while attempting to clean CSR %q: %v", csr.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func isIssuedExpired(csr *capi.CertificateSigningRequest) (bool, error) {
|
||||
}
|
||||
for _, c := range csr.Status.Conditions {
|
||||
if c.Type == capi.CertificateApproved && isIssued(csr) && isExpired {
|
||||
glog.Infof("Cleaning CSR %q as the associated certificate is expired.", csr.Name)
|
||||
klog.Infof("Cleaning CSR %q as the associated certificate is expired.", csr.Name)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
@@ -138,7 +138,7 @@ func isPendingPastDeadline(csr *capi.CertificateSigningRequest) bool {
|
||||
// If there are no Conditions on the status, the CSR will appear via
|
||||
// `kubectl` as `Pending`.
|
||||
if len(csr.Status.Conditions) == 0 && isOlderThan(csr.CreationTimestamp, pendingExpiration) {
|
||||
glog.Infof("Cleaning CSR %q as it is more than %v old and unhandled.", csr.Name, pendingExpiration)
|
||||
klog.Infof("Cleaning CSR %q as it is more than %v old and unhandled.", csr.Name, pendingExpiration)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -150,7 +150,7 @@ func isPendingPastDeadline(csr *capi.CertificateSigningRequest) bool {
|
||||
func isDeniedPastDeadline(csr *capi.CertificateSigningRequest) bool {
|
||||
for _, c := range csr.Status.Conditions {
|
||||
if c.Type == capi.CertificateDenied && isOlderThan(c.LastUpdateTime, deniedExpiration) {
|
||||
glog.Infof("Cleaning CSR %q as it is more than %v old and denied.", csr.Name, deniedExpiration)
|
||||
klog.Infof("Cleaning CSR %q as it is more than %v old and denied.", csr.Name, deniedExpiration)
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -163,7 +163,7 @@ func isDeniedPastDeadline(csr *capi.CertificateSigningRequest) bool {
|
||||
func isIssuedPastDeadline(csr *capi.CertificateSigningRequest) bool {
|
||||
for _, c := range csr.Status.Conditions {
|
||||
if c.Type == capi.CertificateApproved && isIssued(csr) && isOlderThan(c.LastUpdateTime, approvedExpiration) {
|
||||
glog.Infof("Cleaning CSR %q as it is more than %v old and approved.", csr.Name, approvedExpiration)
|
||||
klog.Infof("Cleaning CSR %q as it is more than %v old and approved.", csr.Name, approvedExpiration)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@@ -18,7 +18,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -21,7 +21,6 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -32,6 +31,7 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
)
|
||||
@@ -102,8 +102,8 @@ func (c *Publisher) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting root CA certificate configmap publisher")
|
||||
defer glog.Infof("Shutting down root CA certificate configmap publisher")
|
||||
klog.Infof("Starting root CA certificate configmap publisher")
|
||||
defer klog.Infof("Shutting down root CA certificate configmap publisher")
|
||||
|
||||
if !controller.WaitForCacheSync("crt configmap", stopCh, c.cmListerSynced, c.nsListerSynced) {
|
||||
return
|
||||
@@ -190,7 +190,7 @@ func (c *Publisher) processNextWorkItem() bool {
|
||||
func (c *Publisher) syncNamespace(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
ns, err := c.nsLister.Get(key)
|
||||
|
@@ -38,7 +38,7 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// ControllerClientBuilder allows you to get clients and configs for controllers
|
||||
@@ -65,7 +65,7 @@ func (b SimpleControllerClientBuilder) Config(name string) (*restclient.Config,
|
||||
func (b SimpleControllerClientBuilder) ConfigOrDie(name string) *restclient.Config {
|
||||
clientConfig, err := b.Config(name)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
klog.Fatal(err)
|
||||
}
|
||||
return clientConfig
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface,
|
||||
func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interface {
|
||||
client, err := b.Client(name)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
klog.Fatal(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
@@ -146,15 +146,15 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro
|
||||
}
|
||||
validConfig, valid, err := b.getAuthenticatedConfig(sa, string(secret.Data[v1.ServiceAccountTokenKey]))
|
||||
if err != nil {
|
||||
glog.Warningf("error validating API token for %s/%s in secret %s: %v", sa.Name, sa.Namespace, secret.Name, err)
|
||||
klog.Warningf("error validating API token for %s/%s in secret %s: %v", sa.Name, sa.Namespace, secret.Name, err)
|
||||
// continue watching for good tokens
|
||||
return false, nil
|
||||
}
|
||||
if !valid {
|
||||
glog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Name, sa.Namespace)
|
||||
klog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Name, sa.Namespace)
|
||||
// try to delete the secret containing the invalid token
|
||||
if err := b.CoreClient.Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
|
||||
glog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Name, sa.Namespace, err)
|
||||
klog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Name, sa.Namespace, err)
|
||||
}
|
||||
// continue watching for good tokens
|
||||
return false, nil
|
||||
@@ -208,14 +208,14 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount,
|
||||
tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}}
|
||||
if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(tokenReview); err == nil {
|
||||
if !tokenResult.Status.Authenticated {
|
||||
glog.Warningf("Token for %s/%s did not authenticate correctly", sa.Name, sa.Namespace)
|
||||
klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Name, sa.Namespace)
|
||||
return nil, false, nil
|
||||
}
|
||||
if tokenResult.Status.User.Username != username {
|
||||
glog.Warningf("Token for %s/%s authenticated as unexpected username: %s", sa.Name, sa.Namespace, tokenResult.Status.User.Username)
|
||||
klog.Warningf("Token for %s/%s authenticated as unexpected username: %s", sa.Name, sa.Namespace, tokenResult.Status.User.Username)
|
||||
return nil, false, nil
|
||||
}
|
||||
glog.V(4).Infof("Verified credential for %s/%s", sa.Name, sa.Namespace)
|
||||
klog.V(4).Infof("Verified credential for %s/%s", sa.Name, sa.Namespace)
|
||||
return clientConfig, true, nil
|
||||
}
|
||||
|
||||
@@ -229,7 +229,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount,
|
||||
}
|
||||
err = client.Get().AbsPath("/apis").Do().Error()
|
||||
if apierrors.IsUnauthorized(err) {
|
||||
glog.Warningf("Token for %s/%s did not authenticate correctly: %v", sa.Name, sa.Namespace, err)
|
||||
klog.Warningf("Token for %s/%s did not authenticate correctly: %v", sa.Name, sa.Namespace, err)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount,
|
||||
func (b SAControllerClientBuilder) ConfigOrDie(name string) *restclient.Config {
|
||||
clientConfig, err := b.Config(name)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
klog.Fatal(err)
|
||||
}
|
||||
return clientConfig
|
||||
}
|
||||
@@ -255,7 +255,7 @@ func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, err
|
||||
func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface {
|
||||
client, err := b.Client(name)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
klog.Fatal(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
@@ -43,7 +43,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -74,8 +74,8 @@ go_test(
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -84,12 +84,12 @@ func NewCloudNodeController(
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
if kubeClient != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
} else {
|
||||
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
|
||||
klog.V(0).Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
|
||||
cnc := &CloudNodeController{
|
||||
@@ -137,7 +137,7 @@ func (cnc *CloudNodeController) UpdateNodeStatus() {
|
||||
|
||||
nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
glog.Errorf("Error monitoring node status: %v", err)
|
||||
klog.Errorf("Error monitoring node status: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -151,27 +151,27 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud
|
||||
// Do not process nodes that are still tainted
|
||||
cloudTaint := getCloudTaint(node.Spec.Taints)
|
||||
if cloudTaint != nil {
|
||||
glog.V(5).Infof("This node %s is still tainted. Will not process.", node.Name)
|
||||
klog.V(5).Infof("This node %s is still tainted. Will not process.", node.Name)
|
||||
return
|
||||
}
|
||||
// Node that isn't present according to the cloud provider shouldn't have its address updated
|
||||
exists, err := ensureNodeExistsByProviderID(instances, node)
|
||||
if err != nil {
|
||||
// Continue to update node address when not sure the node is not exists
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
} else if !exists {
|
||||
glog.V(4).Infof("The node %s is no longer present according to the cloud provider, do not process.", node.Name)
|
||||
klog.V(4).Infof("The node %s is no longer present according to the cloud provider, do not process.", node.Name)
|
||||
return
|
||||
}
|
||||
|
||||
nodeAddresses, err := getNodeAddressesByProviderIDOrName(instances, node)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(nodeAddresses) == 0 {
|
||||
glog.V(5).Infof("Skipping node address update for node %q since cloud provider did not return any", node.Name)
|
||||
klog.V(5).Infof("Skipping node address update for node %q since cloud provider did not return any", node.Name)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -195,7 +195,7 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud
|
||||
// it can be found in the cloud as well (consistent with the behaviour in kubelet)
|
||||
if nodeIP, ok := ensureNodeProvidedIPExists(node, nodeAddresses); ok {
|
||||
if nodeIP == nil {
|
||||
glog.Errorf("Specified Node IP not found in cloudprovider")
|
||||
klog.Errorf("Specified Node IP not found in cloudprovider")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -206,7 +206,7 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud
|
||||
}
|
||||
_, _, err = nodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode)
|
||||
if err != nil {
|
||||
glog.Errorf("Error patching node with cloud ip addresses = [%v]", err)
|
||||
klog.Errorf("Error patching node with cloud ip addresses = [%v]", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,7 +221,7 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
|
||||
nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
glog.Errorf("Error monitoring node status: %v", err)
|
||||
klog.Errorf("Error monitoring node status: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -238,13 +238,13 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
name := node.Name
|
||||
node, err = cnc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name)
|
||||
klog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name)
|
||||
break
|
||||
}
|
||||
time.Sleep(retrySleepTime)
|
||||
}
|
||||
if currentReadyCondition == nil {
|
||||
glog.Errorf("Update status of Node %v from CloudNodeController exceeds retry count or the Node was deleted.", node.Name)
|
||||
klog.Errorf("Update status of Node %v from CloudNodeController exceeds retry count or the Node was deleted.", node.Name)
|
||||
continue
|
||||
}
|
||||
// If the known node status says that Node is NotReady, then check if the node has been removed
|
||||
@@ -256,14 +256,14 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
// does not delete node from kubernetes cluster when instance it is shutdown see issue #46442
|
||||
shutdown, err := nodectrlutil.ShutdownInCloudProvider(context.TODO(), cnc.cloud, node)
|
||||
if err != nil {
|
||||
glog.Errorf("Error checking if node %s is shutdown: %v", node.Name, err)
|
||||
klog.Errorf("Error checking if node %s is shutdown: %v", node.Name, err)
|
||||
}
|
||||
|
||||
if shutdown && err == nil {
|
||||
// if node is shutdown add shutdown taint
|
||||
err = controller.AddOrUpdateTaintOnNode(cnc.kubeClient, node.Name, controller.ShutdownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Error patching node taints: %v", err)
|
||||
klog.Errorf("Error patching node taints: %v", err)
|
||||
}
|
||||
// Continue checking the remaining nodes since the current one is shutdown.
|
||||
continue
|
||||
@@ -273,7 +273,7 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
// doesn't, delete the node immediately.
|
||||
exists, err := ensureNodeExistsByProviderID(instances, node)
|
||||
if err != nil {
|
||||
glog.Errorf("Error checking if node %s exists: %v", node.Name, err)
|
||||
klog.Errorf("Error checking if node %s exists: %v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Deleting node since it is no longer present in cloud provider: %s", node.Name)
|
||||
klog.V(2).Infof("Deleting node since it is no longer present in cloud provider: %s", node.Name)
|
||||
|
||||
ref := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
@@ -290,14 +290,14 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
UID: types.UID(node.UID),
|
||||
Namespace: "",
|
||||
}
|
||||
glog.V(2).Infof("Recording %s event message for node %s", "DeletingNode", node.Name)
|
||||
klog.V(2).Infof("Recording %s event message for node %s", "DeletingNode", node.Name)
|
||||
|
||||
cnc.recorder.Eventf(ref, v1.EventTypeNormal, fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name), "Node %s event: %s", node.Name, "DeletingNode")
|
||||
|
||||
go func(nodeName string) {
|
||||
defer utilruntime.HandleCrash()
|
||||
if err := cnc.kubeClient.CoreV1().Nodes().Delete(nodeName, nil); err != nil {
|
||||
glog.Errorf("unable to delete node %q: %v", nodeName, err)
|
||||
klog.Errorf("unable to delete node %q: %v", nodeName, err)
|
||||
}
|
||||
}(node.Name)
|
||||
|
||||
@@ -305,7 +305,7 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
// if taint exist remove taint
|
||||
err = controller.RemoveTaintOffNode(cnc.kubeClient, node.Name, node, controller.ShutdownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Error patching node taints: %v", err)
|
||||
klog.Errorf("Error patching node taints: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -326,7 +326,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
|
||||
cloudTaint := getCloudTaint(node.Spec.Taints)
|
||||
if cloudTaint == nil {
|
||||
glog.V(2).Infof("This node %s is registered without the cloud taint. Will not process.", node.Name)
|
||||
klog.V(2).Infof("This node %s is registered without the cloud taint. Will not process.", node.Name)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -365,7 +365,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
// we should attempt to set providerID on curNode, but
|
||||
// we can continue if we fail since we will attempt to set
|
||||
// node addresses given the node name in getNodeAddressesByProviderIDOrName
|
||||
glog.Errorf("failed to set node provider id: %v", err)
|
||||
klog.Errorf("failed to set node provider id: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -385,7 +385,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
if instanceType, err := getInstanceTypeByProviderIDOrName(instances, curNode); err != nil {
|
||||
return err
|
||||
} else if instanceType != "" {
|
||||
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType)
|
||||
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType)
|
||||
curNode.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType
|
||||
}
|
||||
|
||||
@@ -395,11 +395,11 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
return fmt.Errorf("failed to get zone from cloud provider: %v", err)
|
||||
}
|
||||
if zone.FailureDomain != "" {
|
||||
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain)
|
||||
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain)
|
||||
curNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain
|
||||
}
|
||||
if zone.Region != "" {
|
||||
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region)
|
||||
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region)
|
||||
curNode.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region
|
||||
}
|
||||
}
|
||||
@@ -420,7 +420,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
glog.Infof("Successfully initialized node %s with cloud provider", node.Name)
|
||||
klog.Infof("Successfully initialized node %s with cloud provider", node.Name)
|
||||
}
|
||||
|
||||
func getCloudTaint(taints []v1.Taint) *v1.Taint {
|
||||
@@ -458,7 +458,7 @@ func ensureNodeExistsByProviderID(instances cloudprovider.Instances, node *v1.No
|
||||
}
|
||||
|
||||
if providerID == "" {
|
||||
glog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name)
|
||||
klog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
@@ -37,8 +37,8 @@ import (
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
func TestEnsureNodeExistsByProviderID(t *testing.T) {
|
||||
@@ -250,7 +250,7 @@ func TestNodeShutdown(t *testing.T) {
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
cloudNodeController.Run(wait.NeverStop)
|
||||
|
||||
@@ -349,7 +349,7 @@ func TestNodeDeleted(t *testing.T) {
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
cloudNodeController.Run(wait.NeverStop)
|
||||
|
||||
@@ -429,7 +429,7 @@ func TestNodeInitialized(t *testing.T) {
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
cloudNodeController.AddCloudNode(fnh.Existing[0])
|
||||
|
||||
@@ -494,7 +494,7 @@ func TestNodeIgnored(t *testing.T) {
|
||||
nodeMonitorPeriod: 5 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
cloudNodeController.AddCloudNode(fnh.Existing[0])
|
||||
assert.Equal(t, 0, len(fnh.UpdatedNodes), "Node was wrongly updated")
|
||||
@@ -568,7 +568,7 @@ func TestGCECondition(t *testing.T) {
|
||||
nodeMonitorPeriod: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
cloudNodeController.AddCloudNode(fnh.Existing[0])
|
||||
|
||||
@@ -658,7 +658,7 @@ func TestZoneInitialized(t *testing.T) {
|
||||
nodeMonitorPeriod: 5 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
cloudNodeController.AddCloudNode(fnh.Existing[0])
|
||||
|
||||
@@ -749,7 +749,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
cloudNodeController.AddCloudNode(fnh.Existing[0])
|
||||
|
||||
@@ -864,7 +864,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
cloudNodeController.AddCloudNode(fnh.Existing[0])
|
||||
|
||||
@@ -1156,7 +1156,7 @@ func TestNodeProviderID(t *testing.T) {
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
cloudNodeController.AddCloudNode(fnh.Existing[0])
|
||||
|
||||
@@ -1240,7 +1240,7 @@ func TestNodeProviderIDAlreadySet(t *testing.T) {
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
cloudNodeController.AddCloudNode(fnh.Existing[0])
|
||||
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
|
||||
@@ -109,8 +109,8 @@ func (pvlc *PersistentVolumeLabelController) Run(threadiness int, stopCh <-chan
|
||||
defer utilruntime.HandleCrash()
|
||||
defer pvlc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting PersistentVolumeLabelController")
|
||||
defer glog.Infof("Shutting down PersistentVolumeLabelController")
|
||||
klog.Infof("Starting PersistentVolumeLabelController")
|
||||
defer klog.Infof("Shutting down PersistentVolumeLabelController")
|
||||
|
||||
go pvlc.pvlController.Run(stopCh)
|
||||
|
||||
@@ -197,7 +197,7 @@ func (pvlc *PersistentVolumeLabelController) addLabelsAndAffinityToVolume(vol *v
|
||||
}
|
||||
volumeLabels = labels
|
||||
} else {
|
||||
glog.V(4).Info("cloud provider does not support PVLabeler")
|
||||
klog.V(4).Info("cloud provider does not support PVLabeler")
|
||||
}
|
||||
return pvlc.updateVolume(vol, volumeLabels)
|
||||
}
|
||||
@@ -244,7 +244,7 @@ func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolum
|
||||
}
|
||||
// Populate NodeAffinity with requirements if there are no conflicting keys found
|
||||
if v1helper.NodeSelectorRequirementKeysExistInNodeSelectorTerms(requirements, newVolume.Spec.NodeAffinity.Required.NodeSelectorTerms) {
|
||||
glog.V(4).Infof("NodeSelectorRequirements for cloud labels %v conflict with existing NodeAffinity %v. Skipping addition of NodeSelectorRequirements for cloud labels.",
|
||||
klog.V(4).Infof("NodeSelectorRequirements for cloud labels %v conflict with existing NodeAffinity %v. Skipping addition of NodeSelectorRequirements for cloud labels.",
|
||||
requirements, newVolume.Spec.NodeAffinity)
|
||||
} else {
|
||||
for _, req := range requirements {
|
||||
@@ -255,7 +255,7 @@ func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolum
|
||||
}
|
||||
}
|
||||
newVolume.Initializers = removeInitializer(newVolume.Initializers, initializerName)
|
||||
glog.V(4).Infof("removed initializer on PersistentVolume %s", newVolume.Name)
|
||||
klog.V(4).Infof("removed initializer on PersistentVolume %s", newVolume.Name)
|
||||
|
||||
oldData, err := json.Marshal(vol)
|
||||
if err != nil {
|
||||
@@ -276,7 +276,7 @@ func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolum
|
||||
|
||||
func (pvlc *PersistentVolumeLabelController) updateVolume(vol *v1.PersistentVolume, volLabels map[string]string) error {
|
||||
volName := vol.Name
|
||||
glog.V(4).Infof("updating PersistentVolume %s", volName)
|
||||
klog.V(4).Infof("updating PersistentVolume %s", volName)
|
||||
patchBytes, err := pvlc.createPatch(vol, volLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -286,7 +286,7 @@ func (pvlc *PersistentVolumeLabelController) updateVolume(vol *v1.PersistentVolu
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update PersistentVolume %s: %v", volName, err)
|
||||
}
|
||||
glog.V(4).Infof("updated PersistentVolume %s", volName)
|
||||
klog.V(4).Infof("updated PersistentVolume %s", volName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -19,7 +19,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
@@ -145,8 +145,8 @@ func (c *ClusterRoleAggregationController) Run(workers int, stopCh <-chan struct
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting ClusterRoleAggregator")
|
||||
defer glog.Infof("Shutting down ClusterRoleAggregator")
|
||||
klog.Infof("Starting ClusterRoleAggregator")
|
||||
defer klog.Infof("Shutting down ClusterRoleAggregator")
|
||||
|
||||
if !controller.WaitForCacheSync("ClusterRoleAggregator", stopCh, c.clusterRolesSynced) {
|
||||
return
|
||||
|
@@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -28,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type BaseControllerRefManager struct {
|
||||
@@ -223,7 +223,7 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
|
||||
// ReleasePod sends a patch to free the pod from the control of the controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
|
||||
glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
klog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pod.UID)
|
||||
err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch))
|
||||
@@ -345,7 +345,7 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) er
|
||||
// ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error {
|
||||
glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
klog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID)
|
||||
err := m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(deleteOwnerRefPatch))
|
||||
@@ -480,7 +480,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history
|
||||
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error {
|
||||
glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
klog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID)
|
||||
err := m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(deleteOwnerRefPatch))
|
||||
|
@@ -51,7 +51,7 @@ import (
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -170,7 +170,7 @@ func (r *ControllerExpectations) GetExpectations(controllerKey string) (*Control
|
||||
func (r *ControllerExpectations) DeleteExpectations(controllerKey string) {
|
||||
if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists {
|
||||
if err := r.Delete(exp); err != nil {
|
||||
glog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err)
|
||||
klog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -181,24 +181,24 @@ func (r *ControllerExpectations) DeleteExpectations(controllerKey string) {
|
||||
func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); exists {
|
||||
if exp.Fulfilled() {
|
||||
glog.V(4).Infof("Controller expectations fulfilled %#v", exp)
|
||||
klog.V(4).Infof("Controller expectations fulfilled %#v", exp)
|
||||
return true
|
||||
} else if exp.isExpired() {
|
||||
glog.V(4).Infof("Controller expectations expired %#v", exp)
|
||||
klog.V(4).Infof("Controller expectations expired %#v", exp)
|
||||
return true
|
||||
} else {
|
||||
glog.V(4).Infof("Controller still waiting on expectations %#v", exp)
|
||||
klog.V(4).Infof("Controller still waiting on expectations %#v", exp)
|
||||
return false
|
||||
}
|
||||
} else if err != nil {
|
||||
glog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err)
|
||||
klog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err)
|
||||
} else {
|
||||
// When a new controller is created, it doesn't have expectations.
|
||||
// When it doesn't see expected watch events for > TTL, the expectations expire.
|
||||
// - In this case it wakes up, creates/deletes controllees, and sets expectations again.
|
||||
// When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire.
|
||||
// - In this case it continues without setting expectations till it needs to create/delete controllees.
|
||||
glog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey)
|
||||
klog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey)
|
||||
}
|
||||
// Trigger a sync if we either encountered and error (which shouldn't happen since we're
|
||||
// getting from local store) or this controller hasn't established expectations.
|
||||
@@ -215,7 +215,7 @@ func (exp *ControlleeExpectations) isExpired() bool {
|
||||
// SetExpectations registers new expectations for the given controller. Forgets existing expectations.
|
||||
func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error {
|
||||
exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: clock.RealClock{}.Now()}
|
||||
glog.V(4).Infof("Setting expectations %#v", exp)
|
||||
klog.V(4).Infof("Setting expectations %#v", exp)
|
||||
return r.Add(exp)
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, de
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
|
||||
exp.Add(int64(-add), int64(-del))
|
||||
// The expectations might've been modified since the update on the previous line.
|
||||
glog.V(4).Infof("Lowered expectations %#v", exp)
|
||||
klog.V(4).Infof("Lowered expectations %#v", exp)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, de
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
|
||||
exp.Add(int64(add), int64(del))
|
||||
// The expectations might've been modified since the update on the previous line.
|
||||
glog.V(4).Infof("Raised expectations %#v", exp)
|
||||
klog.V(4).Infof("Raised expectations %#v", exp)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,13 +340,13 @@ func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, delete
|
||||
defer u.uidStoreLock.Unlock()
|
||||
|
||||
if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 {
|
||||
glog.Errorf("Clobbering existing delete keys: %+v", existing)
|
||||
klog.Errorf("Clobbering existing delete keys: %+v", existing)
|
||||
}
|
||||
expectedUIDs := sets.NewString()
|
||||
for _, k := range deletedKeys {
|
||||
expectedUIDs.Insert(k)
|
||||
}
|
||||
glog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys)
|
||||
klog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys)
|
||||
if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -360,7 +360,7 @@ func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey st
|
||||
|
||||
uids := u.GetUIDs(rcKey)
|
||||
if uids != nil && uids.Has(deleteKey) {
|
||||
glog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey)
|
||||
klog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey)
|
||||
u.ControllerExpectationsInterface.DeletionObserved(rcKey)
|
||||
uids.Delete(deleteKey)
|
||||
}
|
||||
@@ -375,7 +375,7 @@ func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) {
|
||||
u.ControllerExpectationsInterface.DeleteExpectations(rcKey)
|
||||
if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists {
|
||||
if err := u.uidStore.Delete(uidExp); err != nil {
|
||||
glog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err)
|
||||
klog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -581,10 +581,10 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT
|
||||
} else {
|
||||
accessor, err := meta.Accessor(object)
|
||||
if err != nil {
|
||||
glog.Errorf("parentObject does not have ObjectMeta, %v", err)
|
||||
klog.Errorf("parentObject does not have ObjectMeta, %v", err)
|
||||
return nil
|
||||
}
|
||||
glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name)
|
||||
klog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name)
|
||||
r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name)
|
||||
}
|
||||
return nil
|
||||
@@ -595,7 +595,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
|
||||
if err != nil {
|
||||
return fmt.Errorf("object does not have ObjectMeta, %v", err)
|
||||
}
|
||||
glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID)
|
||||
klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID)
|
||||
if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil && !apierrors.IsNotFound(err) {
|
||||
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
|
||||
return fmt.Errorf("unable to delete pods: %v", err)
|
||||
@@ -806,7 +806,7 @@ func FilterActivePods(pods []*v1.Pod) []*v1.Pod {
|
||||
if IsPodActive(p) {
|
||||
result = append(result, p)
|
||||
} else {
|
||||
glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v",
|
||||
klog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v",
|
||||
p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp)
|
||||
}
|
||||
}
|
||||
@@ -1024,14 +1024,14 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
|
||||
// indicating that the controller identified by controllerName is waiting for syncs, followed by
|
||||
// either a successful or failed sync.
|
||||
func WaitForCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool {
|
||||
glog.Infof("Waiting for caches to sync for %s controller", controllerName)
|
||||
klog.Infof("Waiting for caches to sync for %s controller", controllerName)
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, cacheSyncs...) {
|
||||
utilruntime.HandleError(fmt.Errorf("Unable to sync caches for %s controller", controllerName))
|
||||
return false
|
||||
}
|
||||
|
||||
glog.Infof("Caches are synced for %s controller", controllerName)
|
||||
klog.Infof("Caches are synced for %s controller", controllerName)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@@ -34,8 +34,8 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/robfig/cron:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -33,7 +33,7 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
@@ -68,7 +68,7 @@ type CronJobController struct {
|
||||
|
||||
func NewCronJobController(kubeClient clientset.Interface) (*CronJobController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
@@ -91,11 +91,11 @@ func NewCronJobController(kubeClient clientset.Interface) (*CronJobController, e
|
||||
// Run the main goroutine responsible for watching and syncing jobs.
|
||||
func (jm *CronJobController) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
glog.Infof("Starting CronJob Manager")
|
||||
klog.Infof("Starting CronJob Manager")
|
||||
// Check things every 10 second.
|
||||
go wait.Until(jm.syncAll, 10*time.Second, stopCh)
|
||||
<-stopCh
|
||||
glog.Infof("Shutting down CronJob Manager")
|
||||
klog.Infof("Shutting down CronJob Manager")
|
||||
}
|
||||
|
||||
// syncAll lists all the CronJobs and Jobs and reconciles them.
|
||||
@@ -110,7 +110,7 @@ func (jm *CronJobController) syncAll() {
|
||||
return
|
||||
}
|
||||
js := jl.Items
|
||||
glog.V(4).Infof("Found %d jobs", len(js))
|
||||
klog.V(4).Infof("Found %d jobs", len(js))
|
||||
|
||||
sjl, err := jm.kubeClient.BatchV1beta1().CronJobs(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -118,10 +118,10 @@ func (jm *CronJobController) syncAll() {
|
||||
return
|
||||
}
|
||||
sjs := sjl.Items
|
||||
glog.V(4).Infof("Found %d cronjobs", len(sjs))
|
||||
klog.V(4).Infof("Found %d cronjobs", len(sjs))
|
||||
|
||||
jobsBySj := groupJobsByParent(js)
|
||||
glog.V(4).Infof("Found %d groups", len(jobsBySj))
|
||||
klog.V(4).Infof("Found %d groups", len(jobsBySj))
|
||||
|
||||
for _, sj := range sjs {
|
||||
syncOne(&sj, jobsBySj[sj.UID], time.Now(), jm.jobControl, jm.sjControl, jm.podControl, jm.recorder)
|
||||
@@ -170,7 +170,7 @@ func cleanupFinishedJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobContr
|
||||
// Update the CronJob, in case jobs were removed from the list.
|
||||
if _, err := sjc.UpdateStatus(sj); err != nil {
|
||||
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
|
||||
glog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
|
||||
klog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,11 +183,11 @@ func removeOldestJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobControlI
|
||||
}
|
||||
|
||||
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
|
||||
glog.V(4).Infof("Cleaning up %d/%d jobs from %s", numToDelete, len(js), nameForLog)
|
||||
klog.V(4).Infof("Cleaning up %d/%d jobs from %s", numToDelete, len(js), nameForLog)
|
||||
|
||||
sort.Sort(byJobStartTime(js))
|
||||
for i := 0; i < numToDelete; i++ {
|
||||
glog.V(4).Infof("Removing job %s from %s", js[i].Name, nameForLog)
|
||||
klog.V(4).Infof("Removing job %s from %s", js[i].Name, nameForLog)
|
||||
deleteJob(sj, &js[i], jc, pc, recorder, "history limit reached")
|
||||
}
|
||||
}
|
||||
@@ -234,7 +234,7 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo
|
||||
|
||||
updatedSJ, err := sjc.UpdateStatus(sj)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
|
||||
klog.Errorf("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
|
||||
return
|
||||
}
|
||||
*sj = *updatedSJ
|
||||
@@ -246,23 +246,23 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo
|
||||
}
|
||||
|
||||
if sj.Spec.Suspend != nil && *sj.Spec.Suspend {
|
||||
glog.V(4).Infof("Not starting job for %s because it is suspended", nameForLog)
|
||||
klog.V(4).Infof("Not starting job for %s because it is suspended", nameForLog)
|
||||
return
|
||||
}
|
||||
|
||||
times, err := getRecentUnmetScheduleTimes(*sj, now)
|
||||
if err != nil {
|
||||
recorder.Eventf(sj, v1.EventTypeWarning, "FailedNeedsStart", "Cannot determine if job needs to be started: %v", err)
|
||||
glog.Errorf("Cannot determine if %s needs to be started: %v", nameForLog, err)
|
||||
klog.Errorf("Cannot determine if %s needs to be started: %v", nameForLog, err)
|
||||
return
|
||||
}
|
||||
// TODO: handle multiple unmet start times, from oldest to newest, updating status as needed.
|
||||
if len(times) == 0 {
|
||||
glog.V(4).Infof("No unmet start times for %s", nameForLog)
|
||||
klog.V(4).Infof("No unmet start times for %s", nameForLog)
|
||||
return
|
||||
}
|
||||
if len(times) > 1 {
|
||||
glog.V(4).Infof("Multiple unmet start times for %s so only starting last one", nameForLog)
|
||||
klog.V(4).Infof("Multiple unmet start times for %s so only starting last one", nameForLog)
|
||||
}
|
||||
|
||||
scheduledTime := times[len(times)-1]
|
||||
@@ -271,7 +271,7 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo
|
||||
tooLate = scheduledTime.Add(time.Second * time.Duration(*sj.Spec.StartingDeadlineSeconds)).Before(now)
|
||||
}
|
||||
if tooLate {
|
||||
glog.V(4).Infof("Missed starting window for %s", nameForLog)
|
||||
klog.V(4).Infof("Missed starting window for %s", nameForLog)
|
||||
recorder.Eventf(sj, v1.EventTypeWarning, "MissSchedule", "Missed scheduled time to start a job: %s", scheduledTime.Format(time.RFC1123Z))
|
||||
// TODO: Since we don't set LastScheduleTime when not scheduling, we are going to keep noticing
|
||||
// the miss every cycle. In order to avoid sending multiple events, and to avoid processing
|
||||
@@ -292,14 +292,14 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo
|
||||
// TODO: for Forbid, we could use the same name for every execution, as a lock.
|
||||
// With replace, we could use a name that is deterministic per execution time.
|
||||
// But that would mean that you could not inspect prior successes or failures of Forbid jobs.
|
||||
glog.V(4).Infof("Not starting job for %s because of prior execution still running and concurrency policy is Forbid", nameForLog)
|
||||
klog.V(4).Infof("Not starting job for %s because of prior execution still running and concurrency policy is Forbid", nameForLog)
|
||||
return
|
||||
}
|
||||
if sj.Spec.ConcurrencyPolicy == batchv1beta1.ReplaceConcurrent {
|
||||
for _, j := range sj.Status.Active {
|
||||
// TODO: this should be replaced with server side job deletion
|
||||
// currently this mimics JobReaper from pkg/kubectl/stop.go
|
||||
glog.V(4).Infof("Deleting job %s of %s that was still running at next scheduled start time", j.Name, nameForLog)
|
||||
klog.V(4).Infof("Deleting job %s of %s that was still running at next scheduled start time", j.Name, nameForLog)
|
||||
|
||||
job, err := jc.GetJob(j.Namespace, j.Name)
|
||||
if err != nil {
|
||||
@@ -314,7 +314,7 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo
|
||||
|
||||
jobReq, err := getJobFromTemplate(sj, scheduledTime)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to make Job from template in %s: %v", nameForLog, err)
|
||||
klog.Errorf("Unable to make Job from template in %s: %v", nameForLog, err)
|
||||
return
|
||||
}
|
||||
jobResp, err := jc.CreateJob(sj.Namespace, jobReq)
|
||||
@@ -322,7 +322,7 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo
|
||||
recorder.Eventf(sj, v1.EventTypeWarning, "FailedCreate", "Error creating job: %v", err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Created Job %s for %s", jobResp.Name, nameForLog)
|
||||
klog.V(4).Infof("Created Job %s for %s", jobResp.Name, nameForLog)
|
||||
recorder.Eventf(sj, v1.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name)
|
||||
|
||||
// ------------------------------------------------------------------ //
|
||||
@@ -338,13 +338,13 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo
|
||||
// Add the just-started job to the status list.
|
||||
ref, err := getRef(jobResp)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Unable to make object reference for job for %s", nameForLog)
|
||||
klog.V(2).Infof("Unable to make object reference for job for %s", nameForLog)
|
||||
} else {
|
||||
sj.Status.Active = append(sj.Status.Active, *ref)
|
||||
}
|
||||
sj.Status.LastScheduleTime = &metav1.Time{Time: scheduledTime}
|
||||
if _, err := sjc.UpdateStatus(sj); err != nil {
|
||||
glog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
|
||||
klog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
|
||||
}
|
||||
|
||||
return
|
||||
@@ -378,7 +378,7 @@ func deleteJob(sj *batchv1beta1.CronJob, job *batchv1.Job, jc jobControlInterfac
|
||||
}
|
||||
errList := []error{}
|
||||
for _, pod := range podList.Items {
|
||||
glog.V(2).Infof("CronJob controller is deleting Pod %v/%v", pod.Namespace, pod.Name)
|
||||
klog.V(2).Infof("CronJob controller is deleting Pod %v/%v", pod.Namespace, pod.Name)
|
||||
if err := pc.DeletePod(pod.Namespace, pod.Name); err != nil {
|
||||
// ignores the error when the pod isn't found
|
||||
if !errors.IsNotFound(err) {
|
||||
@@ -393,7 +393,7 @@ func deleteJob(sj *batchv1beta1.CronJob, job *batchv1.Job, jc jobControlInterfac
|
||||
// ... the job itself...
|
||||
if err := jc.DeleteJob(job.Namespace, job.Name); err != nil {
|
||||
recorder.Eventf(sj, v1.EventTypeWarning, "FailedDelete", "Deleted job: %v", err)
|
||||
glog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err)
|
||||
klog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err)
|
||||
return false
|
||||
}
|
||||
// ... and its reference from active list
|
||||
|
@@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/robfig/cron"
|
||||
"k8s.io/klog"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
@@ -64,7 +64,7 @@ func getParentUIDFromJob(j batchv1.Job) (types.UID, bool) {
|
||||
}
|
||||
|
||||
if controllerRef.Kind != "CronJob" {
|
||||
glog.V(4).Infof("Job with non-CronJob parent, name %s namespace %s", j.Name, j.Namespace)
|
||||
klog.V(4).Infof("Job with non-CronJob parent, name %s namespace %s", j.Name, j.Namespace)
|
||||
return types.UID(""), false
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func groupJobsByParent(js []batchv1.Job) map[types.UID][]batchv1.Job {
|
||||
for _, job := range js {
|
||||
parentUID, found := getParentUIDFromJob(job)
|
||||
if !found {
|
||||
glog.V(4).Infof("Unable to get parent uid from job %s in namespace %s", job.Name, job.Namespace)
|
||||
klog.V(4).Infof("Unable to get parent uid from job %s in namespace %s", job.Name, job.Namespace)
|
||||
continue
|
||||
}
|
||||
jobsBySj[parentUID] = append(jobsBySj[parentUID], job)
|
||||
|
@@ -53,7 +53,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -149,7 +149,7 @@ func NewDaemonSetsController(
|
||||
failedPodsBackoff *flowcontrol.Backoff,
|
||||
) (*DaemonSetsController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
@@ -176,13 +176,13 @@ func NewDaemonSetsController(
|
||||
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
ds := obj.(*apps.DaemonSet)
|
||||
glog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
klog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldDS := old.(*apps.DaemonSet)
|
||||
curDS := cur.(*apps.DaemonSet)
|
||||
glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
|
||||
klog.V(4).Infof("Updating daemon set %s", oldDS.Name)
|
||||
dsc.enqueueDaemonSet(curDS)
|
||||
},
|
||||
DeleteFunc: dsc.deleteDaemonset,
|
||||
@@ -257,7 +257,7 @@ func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Deleting daemon set %s", ds.Name)
|
||||
klog.V(4).Infof("Deleting daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
|
||||
@@ -266,8 +266,8 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer dsc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting daemon sets controller")
|
||||
defer glog.Infof("Shutting down daemon sets controller")
|
||||
klog.Infof("Starting daemon sets controller")
|
||||
defer klog.Infof("Shutting down daemon sets controller")
|
||||
|
||||
if !controller.WaitForCacheSync("daemon sets", stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.historyStoreSynced, dsc.dsStoreSynced) {
|
||||
return
|
||||
@@ -363,7 +363,7 @@ func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.Controlle
|
||||
if len(daemonSets) > 1 {
|
||||
// ControllerRef will ensure we don't do anything crazy, but more than one
|
||||
// item in this list nevertheless constitutes user error.
|
||||
glog.V(4).Infof("User error! more than one DaemonSets is selecting ControllerRevision %s/%s with labels: %#v",
|
||||
klog.V(4).Infof("User error! more than one DaemonSets is selecting ControllerRevision %s/%s with labels: %#v",
|
||||
history.Namespace, history.Name, history.Labels)
|
||||
}
|
||||
return daemonSets
|
||||
@@ -386,7 +386,7 @@ func (dsc *DaemonSetsController) addHistory(obj interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ControllerRevision %s added.", history.Name)
|
||||
klog.V(4).Infof("ControllerRevision %s added.", history.Name)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -396,7 +396,7 @@ func (dsc *DaemonSetsController) addHistory(obj interface{}) {
|
||||
if len(daemonSets) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan ControllerRevision %s added.", history.Name)
|
||||
klog.V(4).Infof("Orphan ControllerRevision %s added.", history.Name)
|
||||
for _, ds := range daemonSets {
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@@ -429,7 +429,7 @@ func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ControllerRevision %s updated.", curHistory.Name)
|
||||
klog.V(4).Infof("ControllerRevision %s updated.", curHistory.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
return
|
||||
}
|
||||
@@ -442,7 +442,7 @@ func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) {
|
||||
if len(daemonSets) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan ControllerRevision %s updated.", curHistory.Name)
|
||||
klog.V(4).Infof("Orphan ControllerRevision %s updated.", curHistory.Name)
|
||||
for _, ds := range daemonSets {
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@@ -481,7 +481,7 @@ func (dsc *DaemonSetsController) deleteHistory(obj interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ControllerRevision %s deleted.", history.Name)
|
||||
klog.V(4).Infof("ControllerRevision %s deleted.", history.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
|
||||
@@ -505,7 +505,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s added.", pod.Name)
|
||||
klog.V(4).Infof("Pod %s added.", pod.Name)
|
||||
dsc.expectations.CreationObserved(dsKey)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
return
|
||||
@@ -519,7 +519,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
if len(dss) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan Pod %s added.", pod.Name)
|
||||
klog.V(4).Infof("Orphan Pod %s added.", pod.Name)
|
||||
for _, ds := range dss {
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@@ -553,7 +553,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s updated.", curPod.Name)
|
||||
klog.V(4).Infof("Pod %s updated.", curPod.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
changedToReady := !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod)
|
||||
// See https://github.com/kubernetes/kubernetes/pull/38076 for more details
|
||||
@@ -571,7 +571,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
if len(dss) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan Pod %s updated.", curPod.Name)
|
||||
klog.V(4).Infof("Orphan Pod %s updated.", curPod.Name)
|
||||
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
|
||||
if labelChanged || controllerRefChanged {
|
||||
for _, ds := range dss {
|
||||
@@ -602,10 +602,10 @@ func (dsc *DaemonSetsController) requeueSuspendedDaemonPods(node string) {
|
||||
dss := dsc.listSuspendedDaemonPods(node)
|
||||
for _, dsKey := range dss {
|
||||
if ns, name, err := cache.SplitMetaNamespaceKey(dsKey); err != nil {
|
||||
glog.Errorf("Failed to get DaemonSet's namespace and name from %s: %v", dsKey, err)
|
||||
klog.Errorf("Failed to get DaemonSet's namespace and name from %s: %v", dsKey, err)
|
||||
continue
|
||||
} else if ds, err := dsc.dsLister.DaemonSets(ns).Get(name); err != nil {
|
||||
glog.Errorf("Failed to get DaemonSet %s/%s: %v", ns, name, err)
|
||||
klog.Errorf("Failed to get DaemonSet %s/%s: %v", ns, name, err)
|
||||
continue
|
||||
} else {
|
||||
dsc.enqueueDaemonSetRateLimited(ds)
|
||||
@@ -682,7 +682,7 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s deleted.", pod.Name)
|
||||
klog.V(4).Infof("Pod %s deleted.", pod.Name)
|
||||
dsc.expectations.DeletionObserved(dsKey)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@@ -691,7 +691,7 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||
dsList, err := dsc.dsLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
||||
klog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
||||
return
|
||||
}
|
||||
node := obj.(*v1.Node)
|
||||
@@ -753,7 +753,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
|
||||
dsList, err := dsc.dsLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Error listing daemon sets: %v", err)
|
||||
klog.V(4).Infof("Error listing daemon sets: %v", err)
|
||||
return
|
||||
}
|
||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||
@@ -820,7 +820,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[s
|
||||
for _, pod := range claimedPods {
|
||||
nodeName, err := util.GetTargetNodeName(pod)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v",
|
||||
klog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v",
|
||||
pod.Namespace, pod.Name, ds.Namespace, ds.Name)
|
||||
continue
|
||||
}
|
||||
@@ -899,7 +899,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
inBackoff := dsc.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, now)
|
||||
if inBackoff {
|
||||
delay := dsc.failedPodsBackoff.Get(backoffKey)
|
||||
glog.V(4).Infof("Deleting failed pod %s/%s on node %s has been limited by backoff - %v remaining",
|
||||
klog.V(4).Infof("Deleting failed pod %s/%s on node %s has been limited by backoff - %v remaining",
|
||||
pod.Namespace, pod.Name, node.Name, delay)
|
||||
dsc.enqueueDaemonSetAfter(ds, delay)
|
||||
continue
|
||||
@@ -908,7 +908,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
dsc.failedPodsBackoff.Next(backoffKey, now)
|
||||
|
||||
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name)
|
||||
glog.V(2).Infof(msg)
|
||||
klog.V(2).Infof(msg)
|
||||
// Emit an event so that it's discoverable to users.
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg)
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
@@ -1003,7 +1003,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
// error channel to communicate back failures. make the buffer big enough to avoid any blocking
|
||||
errCh := make(chan error, createDiff+deleteDiff)
|
||||
|
||||
glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
|
||||
klog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
|
||||
createWait := sync.WaitGroup{}
|
||||
// If the returned error is not nil we have a parse error.
|
||||
// The controller handles this via the hash.
|
||||
@@ -1057,7 +1057,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
klog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
dsc.expectations.CreationObserved(dsKey)
|
||||
errCh <- err
|
||||
utilruntime.HandleError(err)
|
||||
@@ -1068,7 +1068,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
// any skipped pods that we never attempted to start shouldn't be expected.
|
||||
skippedPods := createDiff - batchSize
|
||||
if errorCount < len(errCh) && skippedPods > 0 {
|
||||
glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name)
|
||||
klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name)
|
||||
for i := 0; i < skippedPods; i++ {
|
||||
dsc.expectations.CreationObserved(dsKey)
|
||||
}
|
||||
@@ -1078,14 +1078,14 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Pods to delete for daemon set %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff)
|
||||
klog.V(4).Infof("Pods to delete for daemon set %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff)
|
||||
deleteWait := sync.WaitGroup{}
|
||||
deleteWait.Add(deleteDiff)
|
||||
for i := 0; i < deleteDiff; i++ {
|
||||
go func(ix int) {
|
||||
defer deleteWait.Done()
|
||||
if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[ix], ds); err != nil {
|
||||
glog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
klog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
dsc.expectations.DeletionObserved(dsKey)
|
||||
errCh <- err
|
||||
utilruntime.HandleError(err)
|
||||
@@ -1145,7 +1145,7 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash string, updateObservedGen bool) error {
|
||||
glog.V(4).Infof("Updating daemon set status")
|
||||
klog.V(4).Infof("Updating daemon set status")
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
@@ -1208,7 +1208,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash
|
||||
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -1217,7 +1217,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
}
|
||||
ds, err := dsc.dsLister.DaemonSets(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(3).Infof("daemon set has been deleted %v", key)
|
||||
klog.V(3).Infof("daemon set has been deleted %v", key)
|
||||
dsc.expectations.DeleteExpectations(key)
|
||||
return nil
|
||||
}
|
||||
@@ -1340,7 +1340,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
|
||||
|
||||
reasons, nodeInfo, err := dsc.simulate(newPod, node, ds)
|
||||
if err != nil {
|
||||
glog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err)
|
||||
klog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err)
|
||||
return false, false, false, err
|
||||
}
|
||||
|
||||
@@ -1349,7 +1349,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
|
||||
// into one result, e.g. selectedNode.
|
||||
var insufficientResourceErr error
|
||||
for _, r := range reasons {
|
||||
glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
|
||||
klog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
|
||||
switch reason := r.(type) {
|
||||
case *predicates.InsufficientResourceError:
|
||||
insufficientResourceErr = reason
|
||||
@@ -1392,10 +1392,10 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
|
||||
case
|
||||
predicates.ErrPodAffinityNotMatch,
|
||||
predicates.ErrServiceAffinityViolated:
|
||||
glog.Warningf("unexpected predicate failure reason: %s", reason.GetReason())
|
||||
klog.Warningf("unexpected predicate failure reason: %s", reason.GetReason())
|
||||
return false, false, false, fmt.Errorf("unexpected reason: DaemonSet Predicates should not return reason %s", reason.GetReason())
|
||||
default:
|
||||
glog.V(4).Infof("unknown predicate failure reason: %s", reason.GetReason())
|
||||
klog.V(4).Infof("unknown predicate failure reason: %s", reason.GetReason())
|
||||
wantToRun, shouldSchedule, shouldContinueRunning = false, false, false
|
||||
emitEvent = true
|
||||
}
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -55,23 +55,23 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, hash string)
|
||||
|
||||
// for oldPods delete all not running pods
|
||||
var oldPodsToDelete []string
|
||||
glog.V(4).Infof("Marking all unavailable old pods for deletion")
|
||||
klog.V(4).Infof("Marking all unavailable old pods for deletion")
|
||||
for _, pod := range oldUnavailablePods {
|
||||
// Skip terminating pods. We won't delete them again
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Marking old pods for deletion")
|
||||
klog.V(4).Infof("Marking old pods for deletion")
|
||||
for _, pod := range oldAvailablePods {
|
||||
if numUnavailable >= maxUnavailable {
|
||||
glog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
|
||||
klog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
|
||||
break
|
||||
}
|
||||
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
|
||||
numUnavailable++
|
||||
}
|
||||
@@ -364,7 +364,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
|
||||
if updateErr != nil {
|
||||
return nil, updateErr
|
||||
}
|
||||
glog.V(2).Infof("Found a hash collision for DaemonSet %q - bumping collisionCount to %d to resolve it", ds.Name, *currDS.Status.CollisionCount)
|
||||
klog.V(2).Infof("Found a hash collision for DaemonSet %q - bumping collisionCount to %d to resolve it", ds.Name, *currDS.Status.CollisionCount)
|
||||
return nil, outerErr
|
||||
}
|
||||
return history, err
|
||||
@@ -393,7 +393,7 @@ func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToD
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
|
||||
glog.V(4).Infof("Getting unavailable numbers")
|
||||
klog.V(4).Infof("Getting unavailable numbers")
|
||||
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
|
||||
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
@@ -432,7 +432,7 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeT
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
|
||||
}
|
||||
glog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
|
||||
klog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
|
||||
return maxUnavailable, numUnavailable, nil
|
||||
}
|
||||
|
||||
|
@@ -42,7 +42,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -25,7 +25,7 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -99,7 +99,7 @@ type DeploymentController struct {
|
||||
// NewDeploymentController creates a new DeploymentController.
|
||||
func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
@@ -149,8 +149,8 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer dc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting deployment controller")
|
||||
defer glog.Infof("Shutting down deployment controller")
|
||||
klog.Infof("Starting deployment controller")
|
||||
defer klog.Infof("Shutting down deployment controller")
|
||||
|
||||
if !controller.WaitForCacheSync("deployment", stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
|
||||
return
|
||||
@@ -165,14 +165,14 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
|
||||
|
||||
func (dc *DeploymentController) addDeployment(obj interface{}) {
|
||||
d := obj.(*apps.Deployment)
|
||||
glog.V(4).Infof("Adding deployment %s", d.Name)
|
||||
klog.V(4).Infof("Adding deployment %s", d.Name)
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) updateDeployment(old, cur interface{}) {
|
||||
oldD := old.(*apps.Deployment)
|
||||
curD := cur.(*apps.Deployment)
|
||||
glog.V(4).Infof("Updating deployment %s", oldD.Name)
|
||||
klog.V(4).Infof("Updating deployment %s", oldD.Name)
|
||||
dc.enqueueDeployment(curD)
|
||||
}
|
||||
|
||||
@@ -190,7 +190,7 @@ func (dc *DeploymentController) deleteDeployment(obj interface{}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Deleting deployment %s", d.Name)
|
||||
klog.V(4).Infof("Deleting deployment %s", d.Name)
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
|
||||
@@ -211,7 +211,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) {
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ReplicaSet %s added.", rs.Name)
|
||||
klog.V(4).Infof("ReplicaSet %s added.", rs.Name)
|
||||
dc.enqueueDeployment(d)
|
||||
return
|
||||
}
|
||||
@@ -222,7 +222,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) {
|
||||
if len(ds) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan ReplicaSet %s added.", rs.Name)
|
||||
klog.V(4).Infof("Orphan ReplicaSet %s added.", rs.Name)
|
||||
for _, d := range ds {
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
@@ -242,7 +242,7 @@ func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *apps.ReplicaSet)
|
||||
if len(deployments) > 1 {
|
||||
// ControllerRef will ensure we don't do anything crazy, but more than one
|
||||
// item in this list nevertheless constitutes user error.
|
||||
glog.V(4).Infof("user error! more than one deployment is selecting replica set %s/%s with labels: %#v, returning %s/%s",
|
||||
klog.V(4).Infof("user error! more than one deployment is selecting replica set %s/%s with labels: %#v, returning %s/%s",
|
||||
rs.Namespace, rs.Name, rs.Labels, deployments[0].Namespace, deployments[0].Name)
|
||||
}
|
||||
return deployments
|
||||
@@ -277,7 +277,7 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ReplicaSet %s updated.", curRS.Name)
|
||||
klog.V(4).Infof("ReplicaSet %s updated.", curRS.Name)
|
||||
dc.enqueueDeployment(d)
|
||||
return
|
||||
}
|
||||
@@ -290,7 +290,7 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
|
||||
if len(ds) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan ReplicaSet %s updated.", curRS.Name)
|
||||
klog.V(4).Infof("Orphan ReplicaSet %s updated.", curRS.Name)
|
||||
for _, d := range ds {
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
@@ -329,7 +329,7 @@ func (dc *DeploymentController) deleteReplicaSet(obj interface{}) {
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ReplicaSet %s deleted.", rs.Name)
|
||||
klog.V(4).Infof("ReplicaSet %s deleted.", rs.Name)
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
|
||||
@@ -353,7 +353,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Pod %s deleted.", pod.Name)
|
||||
klog.V(4).Infof("Pod %s deleted.", pod.Name)
|
||||
if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType {
|
||||
// Sync if this Deployment now has no more Pods.
|
||||
rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.AppsV1()))
|
||||
@@ -421,7 +421,7 @@ func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *apps.Deploymen
|
||||
}
|
||||
rs, err = dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
|
||||
if err != nil || rs.UID != controllerRef.UID {
|
||||
glog.V(4).Infof("Cannot get replicaset %q for pod %q: %v", controllerRef.Name, pod.Name, err)
|
||||
klog.V(4).Infof("Cannot get replicaset %q for pod %q: %v", controllerRef.Name, pod.Name, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -481,13 +481,13 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) {
|
||||
}
|
||||
|
||||
if dc.queue.NumRequeues(key) < maxRetries {
|
||||
glog.V(2).Infof("Error syncing deployment %v: %v", key, err)
|
||||
klog.V(2).Infof("Error syncing deployment %v: %v", key, err)
|
||||
dc.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
utilruntime.HandleError(err)
|
||||
glog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err)
|
||||
klog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err)
|
||||
dc.queue.Forget(key)
|
||||
}
|
||||
|
||||
@@ -559,9 +559,9 @@ func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsLis
|
||||
// This function is not meant to be invoked concurrently with the same key.
|
||||
func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
startTime := time.Now()
|
||||
glog.V(4).Infof("Started syncing deployment %q (%v)", key, startTime)
|
||||
klog.V(4).Infof("Started syncing deployment %q (%v)", key, startTime)
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -570,7 +570,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
}
|
||||
deployment, err := dc.dLister.Deployments(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(2).Infof("Deployment %v has been deleted", key)
|
||||
klog.V(2).Infof("Deployment %v has been deleted", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -186,11 +186,11 @@ func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newSt
|
||||
// Make it ratelimited so we stay on the safe side, eventually the Deployment should
|
||||
// transition either to a Complete or to a TimedOut condition.
|
||||
if after < time.Second {
|
||||
glog.V(4).Infof("Queueing up deployment %q for a progress check now", d.Name)
|
||||
klog.V(4).Infof("Queueing up deployment %q for a progress check now", d.Name)
|
||||
dc.enqueueRateLimited(d)
|
||||
return time.Duration(0)
|
||||
}
|
||||
glog.V(4).Infof("Queueing up deployment %q for a progress check after %ds", d.Name, int(after.Seconds()))
|
||||
klog.V(4).Infof("Queueing up deployment %q for a progress check after %ds", d.Name, int(after.Seconds()))
|
||||
// Add a second to avoid milliseconds skew in AddAfter.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info.
|
||||
dc.enqueueAfter(d, after+time.Second)
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -49,11 +49,11 @@ func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.Repl
|
||||
for _, rs := range allRSs {
|
||||
v, err := deploymentutil.Revision(rs)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
|
||||
klog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
|
||||
continue
|
||||
}
|
||||
if v == rollbackTo.Revision {
|
||||
glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
|
||||
klog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
|
||||
// rollback by copying podTemplate.Spec from the replica set
|
||||
// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
|
||||
// no-op if the spec matches current deployment's podTemplate.Spec
|
||||
@@ -75,7 +75,7 @@ func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.Repl
|
||||
func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) {
|
||||
performedRollback := false
|
||||
if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
|
||||
glog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
|
||||
klog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
|
||||
deploymentutil.SetFromReplicaSetTemplate(d, rs.Spec.Template)
|
||||
// set RS (the old RS we'll rolling back to) annotations back to the deployment;
|
||||
// otherwise, the deployment's current annotations (should be the same as current new RS) will be copied to the RS after the rollback.
|
||||
@@ -91,7 +91,7 @@ func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.
|
||||
deploymentutil.SetDeploymentAnnotationsTo(d, rs)
|
||||
performedRollback = true
|
||||
} else {
|
||||
glog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %q, skipping rollback...", d.Name)
|
||||
klog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %q, skipping rollback...", d.Name)
|
||||
eventMsg := fmt.Sprintf("The rollback revision contains the same template as current deployment %q", d.Name)
|
||||
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackTemplateUnchanged, eventMsg)
|
||||
}
|
||||
@@ -111,7 +111,7 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, mess
|
||||
// It is assumed that the caller will have updated the deployment template appropriately (in case
|
||||
// we want to rollback).
|
||||
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
|
||||
glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
|
||||
klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
|
||||
setRollbackTo(d, nil)
|
||||
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d)
|
||||
return err
|
||||
|
@@ -20,9 +20,9 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/client-go/util/integer"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
@@ -91,7 +91,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
|
||||
}
|
||||
|
||||
allPodsCount := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
glog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas)
|
||||
klog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas)
|
||||
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
|
||||
|
||||
// Check if we can scale down. We can scale down in the following 2 cases:
|
||||
@@ -137,7 +137,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount)
|
||||
klog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount)
|
||||
|
||||
// Scale down old replica sets, need check maxUnavailable to ensure we can scale down
|
||||
allRSs = append(oldRSs, newRS)
|
||||
@@ -145,7 +145,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount)
|
||||
klog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount)
|
||||
|
||||
totalScaledDown := cleanupCount + scaledDownCount
|
||||
return totalScaledDown > 0, nil
|
||||
@@ -166,7 +166,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaS
|
||||
// cannot scale down this replica set.
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
|
||||
klog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
|
||||
if *(targetRS.Spec.Replicas) == targetRS.Status.AvailableReplicas {
|
||||
// no unhealthy replicas found, no scaling required.
|
||||
continue
|
||||
@@ -200,7 +200,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
|
||||
// Cannot scale down.
|
||||
return 0, nil
|
||||
}
|
||||
glog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name)
|
||||
klog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name)
|
||||
|
||||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
|
||||
|
||||
|
@@ -22,11 +22,11 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
@@ -248,7 +248,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
||||
// error.
|
||||
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
if dErr == nil {
|
||||
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||
klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||
}
|
||||
return nil, err
|
||||
case err != nil:
|
||||
@@ -440,7 +440,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
|
||||
}
|
||||
|
||||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(cleanableRSes))
|
||||
glog.V(4).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name)
|
||||
klog.V(4).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name)
|
||||
|
||||
for i := int32(0); i < diff; i++ {
|
||||
rs := cleanableRSes[i]
|
||||
@@ -448,7 +448,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
|
||||
if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration || rs.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
|
||||
klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
|
||||
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
|
||||
// Return error instead of aggregating and continuing DELETEs on the theory
|
||||
// that we may be overloading the api server.
|
||||
|
@@ -24,7 +24,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -186,7 +186,7 @@ func MaxRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
for _, rs := range allRSs {
|
||||
if v, err := Revision(rs); err != nil {
|
||||
// Skip the replica sets when it failed to parse their revision information
|
||||
glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
|
||||
klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
|
||||
} else if v > max {
|
||||
max = v
|
||||
}
|
||||
@@ -200,7 +200,7 @@ func LastRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
for _, rs := range allRSs {
|
||||
if v, err := Revision(rs); err != nil {
|
||||
// Skip the replica sets when it failed to parse their revision information
|
||||
glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
|
||||
klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
|
||||
} else if v >= max {
|
||||
secMax = max
|
||||
max = v
|
||||
@@ -241,7 +241,7 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
|
||||
oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64)
|
||||
if err != nil {
|
||||
if oldRevision != "" {
|
||||
glog.Warningf("Updating replica set revision OldRevision not int %s", err)
|
||||
klog.Warningf("Updating replica set revision OldRevision not int %s", err)
|
||||
return false
|
||||
}
|
||||
//If the RS annotation is empty then initialise it to 0
|
||||
@@ -249,13 +249,13 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
|
||||
}
|
||||
newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64)
|
||||
if err != nil {
|
||||
glog.Warningf("Updating replica set revision NewRevision not int %s", err)
|
||||
klog.Warningf("Updating replica set revision NewRevision not int %s", err)
|
||||
return false
|
||||
}
|
||||
if oldRevisionInt < newRevisionInt {
|
||||
newRS.Annotations[RevisionAnnotation] = newRevision
|
||||
annotationChanged = true
|
||||
glog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision)
|
||||
klog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision)
|
||||
}
|
||||
// If a revision annotation already existed and this replica set was updated with a new revision
|
||||
// then that means we are rolling back to this replica set. We need to preserve the old revisions
|
||||
@@ -376,7 +376,7 @@ func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, boo
|
||||
}
|
||||
intValue, err := strconv.Atoi(annotationValue)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name)
|
||||
klog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name)
|
||||
return int32(0), false
|
||||
}
|
||||
return int32(intValue), true
|
||||
@@ -787,7 +787,7 @@ func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentS
|
||||
delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second
|
||||
timedOut := from.Add(delta).Before(now)
|
||||
|
||||
glog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now)
|
||||
klog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now)
|
||||
return timedOut
|
||||
}
|
||||
|
||||
|
@@ -38,7 +38,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -49,7 +49,7 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const statusUpdateRetries = 2
|
||||
@@ -285,18 +285,18 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer dc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting disruption controller")
|
||||
defer glog.Infof("Shutting down disruption controller")
|
||||
klog.Infof("Starting disruption controller")
|
||||
defer klog.Infof("Shutting down disruption controller")
|
||||
|
||||
if !controller.WaitForCacheSync("disruption", stopCh, dc.podListerSynced, dc.pdbListerSynced, dc.rcListerSynced, dc.rsListerSynced, dc.dListerSynced, dc.ssListerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
if dc.kubeClient != nil {
|
||||
glog.Infof("Sending events to api server.")
|
||||
klog.Infof("Sending events to api server.")
|
||||
dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.kubeClient.CoreV1().Events("")})
|
||||
} else {
|
||||
glog.Infof("No api server defined - no events will be sent to API server.")
|
||||
klog.Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
go wait.Until(dc.worker, time.Second, stopCh)
|
||||
go wait.Until(dc.recheckWorker, time.Second, stopCh)
|
||||
@@ -306,44 +306,44 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
|
||||
|
||||
func (dc *DisruptionController) addDb(obj interface{}) {
|
||||
pdb := obj.(*policy.PodDisruptionBudget)
|
||||
glog.V(4).Infof("add DB %q", pdb.Name)
|
||||
klog.V(4).Infof("add DB %q", pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) updateDb(old, cur interface{}) {
|
||||
// TODO(mml) ignore updates where 'old' is equivalent to 'cur'.
|
||||
pdb := cur.(*policy.PodDisruptionBudget)
|
||||
glog.V(4).Infof("update DB %q", pdb.Name)
|
||||
klog.V(4).Infof("update DB %q", pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) removeDb(obj interface{}) {
|
||||
pdb := obj.(*policy.PodDisruptionBudget)
|
||||
glog.V(4).Infof("remove DB %q", pdb.Name)
|
||||
klog.V(4).Infof("remove DB %q", pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) addPod(obj interface{}) {
|
||||
pod := obj.(*v1.Pod)
|
||||
glog.V(4).Infof("addPod called on pod %q", pod.Name)
|
||||
klog.V(4).Infof("addPod called on pod %q", pod.Name)
|
||||
pdb := dc.getPdbForPod(pod)
|
||||
if pdb == nil {
|
||||
glog.V(4).Infof("No matching pdb for pod %q", pod.Name)
|
||||
klog.V(4).Infof("No matching pdb for pod %q", pod.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("addPod %q -> PDB %q", pod.Name, pdb.Name)
|
||||
klog.V(4).Infof("addPod %q -> PDB %q", pod.Name, pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) updatePod(old, cur interface{}) {
|
||||
pod := cur.(*v1.Pod)
|
||||
glog.V(4).Infof("updatePod called on pod %q", pod.Name)
|
||||
klog.V(4).Infof("updatePod called on pod %q", pod.Name)
|
||||
pdb := dc.getPdbForPod(pod)
|
||||
if pdb == nil {
|
||||
glog.V(4).Infof("No matching pdb for pod %q", pod.Name)
|
||||
klog.V(4).Infof("No matching pdb for pod %q", pod.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("updatePod %q -> PDB %q", pod.Name, pdb.Name)
|
||||
klog.V(4).Infof("updatePod %q -> PDB %q", pod.Name, pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
@@ -357,29 +357,29 @@ func (dc *DisruptionController) deletePod(obj interface{}) {
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Couldn't get object from tombstone %+v", obj)
|
||||
klog.Errorf("Couldn't get object from tombstone %+v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
|
||||
klog.Errorf("Tombstone contained object that is not a pod %+v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("deletePod called on pod %q", pod.Name)
|
||||
klog.V(4).Infof("deletePod called on pod %q", pod.Name)
|
||||
pdb := dc.getPdbForPod(pod)
|
||||
if pdb == nil {
|
||||
glog.V(4).Infof("No matching pdb for pod %q", pod.Name)
|
||||
klog.V(4).Infof("No matching pdb for pod %q", pod.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("deletePod %q -> PDB %q", pod.Name, pdb.Name)
|
||||
klog.V(4).Infof("deletePod %q -> PDB %q", pod.Name, pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) enqueuePdb(pdb *policy.PodDisruptionBudget) {
|
||||
key, err := controller.KeyFunc(pdb)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for PodDisruptionBudget object %+v: %v", pdb, err)
|
||||
klog.Errorf("Couldn't get key for PodDisruptionBudget object %+v: %v", pdb, err)
|
||||
return
|
||||
}
|
||||
dc.queue.Add(key)
|
||||
@@ -388,7 +388,7 @@ func (dc *DisruptionController) enqueuePdb(pdb *policy.PodDisruptionBudget) {
|
||||
func (dc *DisruptionController) enqueuePdbForRecheck(pdb *policy.PodDisruptionBudget, delay time.Duration) {
|
||||
key, err := controller.KeyFunc(pdb)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for PodDisruptionBudget object %+v: %v", pdb, err)
|
||||
klog.Errorf("Couldn't get key for PodDisruptionBudget object %+v: %v", pdb, err)
|
||||
return
|
||||
}
|
||||
dc.recheckQueue.AddAfter(key, delay)
|
||||
@@ -400,13 +400,13 @@ func (dc *DisruptionController) getPdbForPod(pod *v1.Pod) *policy.PodDisruptionB
|
||||
// caller.
|
||||
pdbs, err := dc.pdbLister.GetPodPodDisruptionBudgets(pod)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("No PodDisruptionBudgets found for pod %v, PodDisruptionBudget controller will avoid syncing.", pod.Name)
|
||||
klog.V(4).Infof("No PodDisruptionBudgets found for pod %v, PodDisruptionBudget controller will avoid syncing.", pod.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(pdbs) > 1 {
|
||||
msg := fmt.Sprintf("Pod %q/%q matches multiple PodDisruptionBudgets. Chose %q arbitrarily.", pod.Namespace, pod.Name, pdbs[0].Name)
|
||||
glog.Warning(msg)
|
||||
klog.Warning(msg)
|
||||
dc.recorder.Event(pod, v1.EventTypeWarning, "MultiplePodDisruptionBudgets", msg)
|
||||
}
|
||||
return pdbs[0]
|
||||
@@ -471,7 +471,7 @@ func (dc *DisruptionController) processNextRecheckWorkItem() bool {
|
||||
func (dc *DisruptionController) sync(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -480,7 +480,7 @@ func (dc *DisruptionController) sync(key string) error {
|
||||
}
|
||||
pdb, err := dc.pdbLister.PodDisruptionBudgets(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(4).Infof("PodDisruptionBudget %q has been deleted", key)
|
||||
klog.V(4).Infof("PodDisruptionBudget %q has been deleted", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
@@ -488,7 +488,7 @@ func (dc *DisruptionController) sync(key string) error {
|
||||
}
|
||||
|
||||
if err := dc.trySync(pdb); err != nil {
|
||||
glog.Errorf("Failed to sync pdb %s/%s: %v", pdb.Namespace, pdb.Name, err)
|
||||
klog.Errorf("Failed to sync pdb %s/%s: %v", pdb.Namespace, pdb.Name, err)
|
||||
return dc.failSafe(pdb)
|
||||
}
|
||||
|
||||
@@ -656,7 +656,7 @@ func (dc *DisruptionController) buildDisruptedPodMap(pods []*v1.Pod, pdb *policy
|
||||
}
|
||||
expectedDeletion := disruptionTime.Time.Add(DeletionTimeout)
|
||||
if expectedDeletion.Before(currentTime) {
|
||||
glog.V(1).Infof("Pod %s/%s was expected to be deleted at %s but it wasn't, updating pdb %s/%s",
|
||||
klog.V(1).Infof("Pod %s/%s was expected to be deleted at %s but it wasn't, updating pdb %s/%s",
|
||||
pod.Namespace, pod.Name, disruptionTime.String(), pdb.Namespace, pdb.Name)
|
||||
dc.recorder.Eventf(pod, v1.EventTypeWarning, "NotDeleted", "Pod was expected by PDB %s/%s to be deleted but it wasn't",
|
||||
pdb.Namespace, pdb.Namespace)
|
||||
|
@@ -33,7 +33,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -42,7 +42,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -146,8 +146,8 @@ func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer e.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting endpoint controller")
|
||||
defer glog.Infof("Shutting down endpoint controller")
|
||||
klog.Infof("Starting endpoint controller")
|
||||
defer klog.Infof("Shutting down endpoint controller")
|
||||
|
||||
if !controller.WaitForCacheSync("endpoint", stopCh, e.podsSynced, e.servicesSynced, e.endpointsSynced) {
|
||||
return
|
||||
@@ -324,7 +324,7 @@ func (e *EndpointController) deletePod(obj interface{}) {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Pod: %#v", obj))
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Enqueuing services of deleted pod %s/%s having final state unrecorded", pod.Namespace, pod.Name)
|
||||
klog.V(4).Infof("Enqueuing services of deleted pod %s/%s having final state unrecorded", pod.Namespace, pod.Name)
|
||||
e.addPod(pod)
|
||||
}
|
||||
|
||||
@@ -368,12 +368,12 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
|
||||
}
|
||||
|
||||
if e.queue.NumRequeues(key) < maxRetries {
|
||||
glog.V(2).Infof("Error syncing endpoints for service %q, retrying. Error: %v", key, err)
|
||||
klog.V(2).Infof("Error syncing endpoints for service %q, retrying. Error: %v", key, err)
|
||||
e.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
glog.Warningf("Dropping service %q out of the queue: %v", key, err)
|
||||
klog.Warningf("Dropping service %q out of the queue: %v", key, err)
|
||||
e.queue.Forget(key)
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
@@ -381,7 +381,7 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
|
||||
func (e *EndpointController) syncService(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -408,7 +408,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(5).Infof("About to update endpoints for service %q", key)
|
||||
klog.V(5).Infof("About to update endpoints for service %q", key)
|
||||
pods, err := e.podLister.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelectorPreValidated())
|
||||
if err != nil {
|
||||
// Since we're getting stuff from a local cache, it is
|
||||
@@ -433,11 +433,11 @@ func (e *EndpointController) syncService(key string) error {
|
||||
|
||||
for _, pod := range pods {
|
||||
if len(pod.Status.PodIP) == 0 {
|
||||
glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
|
||||
klog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
|
||||
continue
|
||||
}
|
||||
if !tolerateUnreadyEndpoints && pod.DeletionTimestamp != nil {
|
||||
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
|
||||
klog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -462,7 +462,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
portProto := servicePort.Protocol
|
||||
portNum, err := podutil.FindPort(pod, servicePort)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
|
||||
klog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -496,7 +496,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
if !createEndpoints &&
|
||||
apiequality.Semantic.DeepEqual(currentEndpoints.Subsets, subsets) &&
|
||||
apiequality.Semantic.DeepEqual(currentEndpoints.Labels, service.Labels) {
|
||||
glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
|
||||
klog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
|
||||
return nil
|
||||
}
|
||||
newEndpoints := currentEndpoints.DeepCopy()
|
||||
@@ -506,7 +506,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
newEndpoints.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
|
||||
klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
|
||||
if createEndpoints {
|
||||
// No previous endpoints, create them
|
||||
_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(newEndpoints)
|
||||
@@ -520,7 +520,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
// 1. namespace is terminating, endpoint creation is not allowed by default.
|
||||
// 2. policy is misconfigured, in which case no service would function anywhere.
|
||||
// Given the frequency of 1, we log at a lower level.
|
||||
glog.V(5).Infof("Forbidden from creating endpoints: %v", err)
|
||||
klog.V(5).Infof("Forbidden from creating endpoints: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -572,7 +572,7 @@ func addEndpointSubset(subsets []v1.EndpointSubset, pod *v1.Pod, epa v1.Endpoint
|
||||
})
|
||||
readyEps++
|
||||
} else if shouldPodBeInEndpoints(pod) {
|
||||
glog.V(5).Infof("Pod is out of service: %s/%s", pod.Namespace, pod.Name)
|
||||
klog.V(5).Infof("Pod is out of service: %s/%s", pod.Namespace, pod.Name)
|
||||
subsets = append(subsets, v1.EndpointSubset{
|
||||
NotReadyAddresses: []v1.EndpointAddress{epa},
|
||||
Ports: ports,
|
||||
|
@@ -43,12 +43,12 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/golang/groupcache/lru:go_default_library",
|
||||
"//vendor/gonum.org/v1/gonum/graph:go_default_library",
|
||||
"//vendor/gonum.org/v1/gonum/graph/encoding:go_default_library",
|
||||
"//vendor/gonum.org/v1/gonum/graph/encoding/dot:go_default_library",
|
||||
"//vendor/gonum.org/v1/gonum/graph/simple:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -130,8 +130,8 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer gc.attemptToOrphan.ShutDown()
|
||||
defer gc.dependencyGraphBuilder.graphChanges.ShutDown()
|
||||
|
||||
glog.Infof("Starting garbage collector controller")
|
||||
defer glog.Infof("Shutting down garbage collector controller")
|
||||
klog.Infof("Starting garbage collector controller")
|
||||
defer klog.Infof("Shutting down garbage collector controller")
|
||||
|
||||
go gc.dependencyGraphBuilder.Run(stopCh)
|
||||
|
||||
@@ -139,7 +139,7 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
|
||||
return
|
||||
}
|
||||
|
||||
glog.Infof("Garbage collector: all resource monitors have synced. Proceeding to collect garbage")
|
||||
klog.Infof("Garbage collector: all resource monitors have synced. Proceeding to collect garbage")
|
||||
|
||||
// gc workers
|
||||
for i := 0; i < workers; i++ {
|
||||
@@ -172,13 +172,13 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf
|
||||
|
||||
// This can occur if there is an internal error in GetDeletableResources.
|
||||
if len(newResources) == 0 {
|
||||
glog.V(2).Infof("no resources reported by discovery, skipping garbage collector sync")
|
||||
klog.V(2).Infof("no resources reported by discovery, skipping garbage collector sync")
|
||||
return
|
||||
}
|
||||
|
||||
// Decide whether discovery has reported a change.
|
||||
if reflect.DeepEqual(oldResources, newResources) {
|
||||
glog.V(5).Infof("no resource updates from discovery, skipping garbage collector sync")
|
||||
klog.V(5).Infof("no resource updates from discovery, skipping garbage collector sync")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -196,18 +196,18 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf
|
||||
if attempt > 1 {
|
||||
newResources = GetDeletableResources(discoveryClient)
|
||||
if len(newResources) == 0 {
|
||||
glog.V(2).Infof("no resources reported by discovery (attempt %d)", attempt)
|
||||
klog.V(2).Infof("no resources reported by discovery (attempt %d)", attempt)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("syncing garbage collector with updated resources from discovery (attempt %d): %s", attempt, printDiff(oldResources, newResources))
|
||||
klog.V(2).Infof("syncing garbage collector with updated resources from discovery (attempt %d): %s", attempt, printDiff(oldResources, newResources))
|
||||
|
||||
// Resetting the REST mapper will also invalidate the underlying discovery
|
||||
// client. This is a leaky abstraction and assumes behavior about the REST
|
||||
// mapper, but we'll deal with it for now.
|
||||
gc.restMapper.Reset()
|
||||
glog.V(4).Infof("reset restmapper")
|
||||
klog.V(4).Infof("reset restmapper")
|
||||
|
||||
// Perform the monitor resync and wait for controllers to report cache sync.
|
||||
//
|
||||
@@ -222,7 +222,7 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf
|
||||
utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors (attempt %d): %v", attempt, err))
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Infof("resynced monitors")
|
||||
klog.V(4).Infof("resynced monitors")
|
||||
|
||||
// wait for caches to fill for a while (our sync period) before attempting to rediscover resources and retry syncing.
|
||||
// this protects us from deadlocks where available resources changed and one of our informer caches will never fill.
|
||||
@@ -242,7 +242,7 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf
|
||||
// have succeeded to ensure we'll retry on subsequent syncs if an error
|
||||
// occurred.
|
||||
oldResources = newResources
|
||||
glog.V(2).Infof("synced garbage collector")
|
||||
klog.V(2).Infof("synced garbage collector")
|
||||
}, period, stopCh)
|
||||
}
|
||||
|
||||
@@ -308,7 +308,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool {
|
||||
// have a way to distinguish this from a valid type we will recognize
|
||||
// after the next discovery sync.
|
||||
// For now, record the error and retry.
|
||||
glog.V(5).Infof("error syncing item %s: %v", n, err)
|
||||
klog.V(5).Infof("error syncing item %s: %v", n, err)
|
||||
} else {
|
||||
utilruntime.HandleError(fmt.Errorf("error syncing item %s: %v", n, err))
|
||||
}
|
||||
@@ -318,7 +318,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool {
|
||||
// requeue if item hasn't been observed via an informer event yet.
|
||||
// otherwise a virtual node for an item added AND removed during watch reestablishment can get stuck in the graph and never removed.
|
||||
// see https://issue.k8s.io/56121
|
||||
glog.V(5).Infof("item %s hasn't been observed via informer yet", n.identity)
|
||||
klog.V(5).Infof("item %s hasn't been observed via informer yet", n.identity)
|
||||
gc.attemptToDelete.AddRateLimited(item)
|
||||
}
|
||||
return true
|
||||
@@ -330,7 +330,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool {
|
||||
func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *node) (
|
||||
dangling bool, owner *unstructured.Unstructured, err error) {
|
||||
if gc.absentOwnerCache.Has(reference.UID) {
|
||||
glog.V(5).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
|
||||
klog.V(5).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
|
||||
return true, nil, nil
|
||||
}
|
||||
// TODO: we need to verify the reference resource is supported by the
|
||||
@@ -351,14 +351,14 @@ func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *no
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
gc.absentOwnerCache.Add(reference.UID)
|
||||
glog.V(5).Infof("object %s's owner %s/%s, %s is not found", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
|
||||
klog.V(5).Infof("object %s's owner %s/%s, %s is not found", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
|
||||
return true, nil, nil
|
||||
case err != nil:
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
if owner.GetUID() != reference.UID {
|
||||
glog.V(5).Infof("object %s's owner %s/%s, %s is not found, UID mismatch", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
|
||||
klog.V(5).Infof("object %s's owner %s/%s, %s is not found, UID mismatch", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
|
||||
gc.absentOwnerCache.Add(reference.UID)
|
||||
return true, nil, nil
|
||||
}
|
||||
@@ -405,10 +405,10 @@ func ownerRefsToUIDs(refs []metav1.OwnerReference) []types.UID {
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
glog.V(2).Infof("processing item %s", item.identity)
|
||||
klog.V(2).Infof("processing item %s", item.identity)
|
||||
// "being deleted" is an one-way trip to the final deletion. We'll just wait for the final deletion, and then process the object's dependents.
|
||||
if item.isBeingDeleted() && !item.isDeletingDependents() {
|
||||
glog.V(5).Infof("processing item %s returned at once, because its DeletionTimestamp is non-nil", item.identity)
|
||||
klog.V(5).Infof("processing item %s returned at once, because its DeletionTimestamp is non-nil", item.identity)
|
||||
return nil
|
||||
}
|
||||
// TODO: It's only necessary to talk to the API server if this is a
|
||||
@@ -420,7 +420,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
// the GraphBuilder can add "virtual" node for an owner that doesn't
|
||||
// exist yet, so we need to enqueue a virtual Delete event to remove
|
||||
// the virtual node from GraphBuilder.uidToNode.
|
||||
glog.V(5).Infof("item %v not found, generating a virtual delete event", item.identity)
|
||||
klog.V(5).Infof("item %v not found, generating a virtual delete event", item.identity)
|
||||
gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity)
|
||||
// since we're manually inserting a delete event to remove this node,
|
||||
// we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete
|
||||
@@ -431,7 +431,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
}
|
||||
|
||||
if latest.GetUID() != item.identity.UID {
|
||||
glog.V(5).Infof("UID doesn't match, item %v not found, generating a virtual delete event", item.identity)
|
||||
klog.V(5).Infof("UID doesn't match, item %v not found, generating a virtual delete event", item.identity)
|
||||
gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity)
|
||||
// since we're manually inserting a delete event to remove this node,
|
||||
// we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete
|
||||
@@ -448,7 +448,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
// compute if we should delete the item
|
||||
ownerReferences := latest.GetOwnerReferences()
|
||||
if len(ownerReferences) == 0 {
|
||||
glog.V(2).Infof("object %s's doesn't have an owner, continue on next item", item.identity)
|
||||
klog.V(2).Infof("object %s's doesn't have an owner, continue on next item", item.identity)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -456,15 +456,15 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(5).Infof("classify references of %s.\nsolid: %#v\ndangling: %#v\nwaitingForDependentsDeletion: %#v\n", item.identity, solid, dangling, waitingForDependentsDeletion)
|
||||
klog.V(5).Infof("classify references of %s.\nsolid: %#v\ndangling: %#v\nwaitingForDependentsDeletion: %#v\n", item.identity, solid, dangling, waitingForDependentsDeletion)
|
||||
|
||||
switch {
|
||||
case len(solid) != 0:
|
||||
glog.V(2).Infof("object %#v has at least one existing owner: %#v, will not garbage collect", solid, item.identity)
|
||||
klog.V(2).Infof("object %#v has at least one existing owner: %#v, will not garbage collect", solid, item.identity)
|
||||
if len(dangling) == 0 && len(waitingForDependentsDeletion) == 0 {
|
||||
return nil
|
||||
}
|
||||
glog.V(2).Infof("remove dangling references %#v and waiting references %#v for object %s", dangling, waitingForDependentsDeletion, item.identity)
|
||||
klog.V(2).Infof("remove dangling references %#v and waiting references %#v for object %s", dangling, waitingForDependentsDeletion, item.identity)
|
||||
// waitingForDependentsDeletion needs to be deleted from the
|
||||
// ownerReferences, otherwise the referenced objects will be stuck with
|
||||
// the FinalizerDeletingDependents and never get deleted.
|
||||
@@ -483,7 +483,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
// problem.
|
||||
// there are multiple workers run attemptToDeleteItem in
|
||||
// parallel, the circle detection can fail in a race condition.
|
||||
glog.V(2).Infof("processing object %s, some of its owners and its dependent [%s] have FinalizerDeletingDependents, to prevent potential cycle, its ownerReferences are going to be modified to be non-blocking, then the object is going to be deleted with Foreground", item.identity, dep.identity)
|
||||
klog.V(2).Infof("processing object %s, some of its owners and its dependent [%s] have FinalizerDeletingDependents, to prevent potential cycle, its ownerReferences are going to be modified to be non-blocking, then the object is going to be deleted with Foreground", item.identity, dep.identity)
|
||||
patch, err := item.unblockOwnerReferencesStrategicMergePatch()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -494,7 +494,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("at least one owner of object %s has FinalizerDeletingDependents, and the object itself has dependents, so it is going to be deleted in Foreground", item.identity)
|
||||
klog.V(2).Infof("at least one owner of object %s has FinalizerDeletingDependents, and the object itself has dependents, so it is going to be deleted in Foreground", item.identity)
|
||||
// the deletion event will be observed by the graphBuilder, so the item
|
||||
// will be processed again in processDeletingDependentsItem. If it
|
||||
// doesn't have dependents, the function will remove the
|
||||
@@ -518,7 +518,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
// otherwise, default to background.
|
||||
policy = metav1.DeletePropagationBackground
|
||||
}
|
||||
glog.V(2).Infof("delete object %s with propagation policy %s", item.identity, policy)
|
||||
klog.V(2).Infof("delete object %s with propagation policy %s", item.identity, policy)
|
||||
return gc.deleteObject(item.identity, &policy)
|
||||
}
|
||||
}
|
||||
@@ -527,12 +527,12 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
func (gc *GarbageCollector) processDeletingDependentsItem(item *node) error {
|
||||
blockingDependents := item.blockingDependents()
|
||||
if len(blockingDependents) == 0 {
|
||||
glog.V(2).Infof("remove DeleteDependents finalizer for item %s", item.identity)
|
||||
klog.V(2).Infof("remove DeleteDependents finalizer for item %s", item.identity)
|
||||
return gc.removeFinalizer(item, metav1.FinalizerDeleteDependents)
|
||||
}
|
||||
for _, dep := range blockingDependents {
|
||||
if !dep.isDeletingDependents() {
|
||||
glog.V(2).Infof("adding %s to attemptToDelete, because its owner %s is deletingDependents", dep.identity, item.identity)
|
||||
klog.V(2).Infof("adding %s to attemptToDelete, because its owner %s is deletingDependents", dep.identity, item.identity)
|
||||
gc.attemptToDelete.Add(dep)
|
||||
}
|
||||
}
|
||||
@@ -570,7 +570,7 @@ func (gc *GarbageCollector) orphanDependents(owner objectReference, dependents [
|
||||
if len(errorsSlice) != 0 {
|
||||
return fmt.Errorf("failed to orphan dependents of owner %s, got errors: %s", owner, utilerrors.NewAggregate(errorsSlice).Error())
|
||||
}
|
||||
glog.V(5).Infof("successfully updated all dependents of owner %s", owner)
|
||||
klog.V(5).Infof("successfully updated all dependents of owner %s", owner)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -644,9 +644,9 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m
|
||||
preferredResources, err := discoveryClient.ServerPreferredResources()
|
||||
if err != nil {
|
||||
if discovery.IsGroupDiscoveryFailedError(err) {
|
||||
glog.Warningf("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups)
|
||||
klog.Warningf("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups)
|
||||
} else {
|
||||
glog.Warningf("failed to discover preferred resources: %v", err)
|
||||
klog.Warningf("failed to discover preferred resources: %v", err)
|
||||
}
|
||||
}
|
||||
if preferredResources == nil {
|
||||
@@ -660,7 +660,7 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m
|
||||
for _, rl := range deletableResources {
|
||||
gv, err := schema.ParseGroupVersion(rl.GroupVersion)
|
||||
if err != nil {
|
||||
glog.Warningf("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err)
|
||||
klog.Warningf("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err)
|
||||
continue
|
||||
}
|
||||
for i := range rl.APIResources {
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -176,16 +176,16 @@ func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind
|
||||
}
|
||||
shared, err := gb.sharedInformers.ForResource(resource)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("using a shared informer for resource %q, kind %q", resource.String(), kind.String())
|
||||
klog.V(4).Infof("using a shared informer for resource %q, kind %q", resource.String(), kind.String())
|
||||
// need to clone because it's from a shared cache
|
||||
shared.Informer().AddEventHandlerWithResyncPeriod(handlers, ResourceResyncTime)
|
||||
return shared.Informer().GetController(), shared.Informer().GetStore(), nil
|
||||
} else {
|
||||
glog.V(4).Infof("unable to use a shared informer for resource %q, kind %q: %v", resource.String(), kind.String(), err)
|
||||
klog.V(4).Infof("unable to use a shared informer for resource %q, kind %q: %v", resource.String(), kind.String(), err)
|
||||
}
|
||||
|
||||
// TODO: consider store in one storage.
|
||||
glog.V(5).Infof("create storage for resource %s", resource)
|
||||
klog.V(5).Infof("create storage for resource %s", resource)
|
||||
store, monitor := cache.NewInformer(
|
||||
listWatcher(gb.dynamicClient, resource),
|
||||
nil,
|
||||
@@ -245,7 +245,7 @@ func (gb *GraphBuilder) syncMonitors(resources map[schema.GroupVersionResource]s
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
|
||||
klog.V(4).Infof("synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
|
||||
// NewAggregate returns nil if errs is 0-length
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
@@ -277,7 +277,7 @@ func (gb *GraphBuilder) startMonitors() {
|
||||
started++
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("started %d new monitors, %d currently running", started, len(monitors))
|
||||
klog.V(4).Infof("started %d new monitors, %d currently running", started, len(monitors))
|
||||
}
|
||||
|
||||
// IsSynced returns true if any monitors exist AND all those monitors'
|
||||
@@ -289,13 +289,13 @@ func (gb *GraphBuilder) IsSynced() bool {
|
||||
defer gb.monitorLock.Unlock()
|
||||
|
||||
if len(gb.monitors) == 0 {
|
||||
glog.V(4).Info("garbage controller monitor not synced: no monitors")
|
||||
klog.V(4).Info("garbage controller monitor not synced: no monitors")
|
||||
return false
|
||||
}
|
||||
|
||||
for resource, monitor := range gb.monitors {
|
||||
if !monitor.controller.HasSynced() {
|
||||
glog.V(4).Infof("garbage controller monitor not yet synced: %+v", resource)
|
||||
klog.V(4).Infof("garbage controller monitor not yet synced: %+v", resource)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -305,8 +305,8 @@ func (gb *GraphBuilder) IsSynced() bool {
|
||||
// Run sets the stop channel and starts monitor execution until stopCh is
|
||||
// closed. Any running monitors will be stopped before Run returns.
|
||||
func (gb *GraphBuilder) Run(stopCh <-chan struct{}) {
|
||||
glog.Infof("GraphBuilder running")
|
||||
defer glog.Infof("GraphBuilder stopping")
|
||||
klog.Infof("GraphBuilder running")
|
||||
defer klog.Infof("GraphBuilder stopping")
|
||||
|
||||
// Set up the stop channel.
|
||||
gb.monitorLock.Lock()
|
||||
@@ -333,7 +333,7 @@ func (gb *GraphBuilder) Run(stopCh <-chan struct{}) {
|
||||
|
||||
// reset monitors so that the graph builder can be safely re-run/synced.
|
||||
gb.monitors = nil
|
||||
glog.Infof("stopped %d of %d monitors", stopped, len(monitors))
|
||||
klog.Infof("stopped %d of %d monitors", stopped, len(monitors))
|
||||
}
|
||||
|
||||
var ignoredResources = map[schema.GroupResource]struct{}{
|
||||
@@ -377,7 +377,7 @@ func (gb *GraphBuilder) addDependentToOwners(n *node, owners []metav1.OwnerRefer
|
||||
dependents: make(map[*node]struct{}),
|
||||
virtual: true,
|
||||
}
|
||||
glog.V(5).Infof("add virtual node.identity: %s\n\n", ownerNode.identity)
|
||||
klog.V(5).Infof("add virtual node.identity: %s\n\n", ownerNode.identity)
|
||||
gb.uidToNode.Write(ownerNode)
|
||||
}
|
||||
ownerNode.addDependent(n)
|
||||
@@ -515,7 +515,7 @@ func (gb *GraphBuilder) addUnblockedOwnersToDeleteQueue(removed []metav1.OwnerRe
|
||||
if ref.BlockOwnerDeletion != nil && *ref.BlockOwnerDeletion {
|
||||
node, found := gb.uidToNode.Read(ref.UID)
|
||||
if !found {
|
||||
glog.V(5).Infof("cannot find %s in uidToNode", ref.UID)
|
||||
klog.V(5).Infof("cannot find %s in uidToNode", ref.UID)
|
||||
continue
|
||||
}
|
||||
gb.attemptToDelete.Add(node)
|
||||
@@ -527,7 +527,7 @@ func (gb *GraphBuilder) addUnblockedOwnersToDeleteQueue(removed []metav1.OwnerRe
|
||||
if wasBlocked && isUnblocked {
|
||||
node, found := gb.uidToNode.Read(c.newRef.UID)
|
||||
if !found {
|
||||
glog.V(5).Infof("cannot find %s in uidToNode", c.newRef.UID)
|
||||
klog.V(5).Infof("cannot find %s in uidToNode", c.newRef.UID)
|
||||
continue
|
||||
}
|
||||
gb.attemptToDelete.Add(node)
|
||||
@@ -537,12 +537,12 @@ func (gb *GraphBuilder) addUnblockedOwnersToDeleteQueue(removed []metav1.OwnerRe
|
||||
|
||||
func (gb *GraphBuilder) processTransitions(oldObj interface{}, newAccessor metav1.Object, n *node) {
|
||||
if startsWaitingForDependentsOrphaned(oldObj, newAccessor) {
|
||||
glog.V(5).Infof("add %s to the attemptToOrphan", n.identity)
|
||||
klog.V(5).Infof("add %s to the attemptToOrphan", n.identity)
|
||||
gb.attemptToOrphan.Add(n)
|
||||
return
|
||||
}
|
||||
if startsWaitingForDependentsDeleted(oldObj, newAccessor) {
|
||||
glog.V(2).Infof("add %s to the attemptToDelete, because it's waiting for its dependents to be deleted", n.identity)
|
||||
klog.V(2).Infof("add %s to the attemptToDelete, because it's waiting for its dependents to be deleted", n.identity)
|
||||
// if the n is added as a "virtual" node, its deletingDependents field is not properly set, so always set it here.
|
||||
n.markDeletingDependents()
|
||||
for dep := range n.dependents {
|
||||
@@ -575,7 +575,7 @@ func (gb *GraphBuilder) processGraphChanges() bool {
|
||||
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
|
||||
return true
|
||||
}
|
||||
glog.V(5).Infof("GraphBuilder process object: %s/%s, namespace %s, name %s, uid %s, event type %v", event.gvk.GroupVersion().String(), event.gvk.Kind, accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
|
||||
klog.V(5).Infof("GraphBuilder process object: %s/%s, namespace %s, name %s, uid %s, event type %v", event.gvk.GroupVersion().String(), event.gvk.Kind, accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
|
||||
// Check if the node already exists
|
||||
existingNode, found := gb.uidToNode.Read(accessor.GetUID())
|
||||
if found {
|
||||
@@ -627,7 +627,7 @@ func (gb *GraphBuilder) processGraphChanges() bool {
|
||||
gb.processTransitions(event.oldObj, accessor, existingNode)
|
||||
case event.eventType == deleteEvent:
|
||||
if !found {
|
||||
glog.V(5).Infof("%v doesn't exist in the graph, this shouldn't happen", accessor.GetUID())
|
||||
klog.V(5).Infof("%v doesn't exist in the graph, this shouldn't happen", accessor.GetUID())
|
||||
return true
|
||||
}
|
||||
// removeNode updates the graph
|
||||
|
@@ -19,7 +19,7 @@ package garbagecollector
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -110,7 +110,7 @@ func (gc *GarbageCollector) removeFinalizer(owner *node, targetFinalizer string)
|
||||
newFinalizers = append(newFinalizers, f)
|
||||
}
|
||||
if !found {
|
||||
glog.V(5).Infof("the %s finalizer is already removed from object %s", targetFinalizer, owner.identity)
|
||||
klog.V(5).Infof("the %s finalizer is already removed from object %s", targetFinalizer, owner.identity)
|
||||
return nil
|
||||
}
|
||||
// remove the owner from dependent's OwnerReferences
|
||||
|
@@ -35,7 +35,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -45,7 +45,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const statusUpdateRetries = 3
|
||||
@@ -91,7 +91,7 @@ type JobController struct {
|
||||
|
||||
func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *JobController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
@@ -140,8 +140,8 @@ func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer jm.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting job controller")
|
||||
defer glog.Infof("Shutting down job controller")
|
||||
klog.Infof("Starting job controller")
|
||||
defer klog.Infof("Shutting down job controller")
|
||||
|
||||
if !controller.WaitForCacheSync("job", stopCh, jm.podStoreSynced, jm.jobStoreSynced) {
|
||||
return
|
||||
@@ -343,7 +343,7 @@ func (jm *JobController) updateJob(old, cur interface{}) {
|
||||
total := time.Duration(*curADS) * time.Second
|
||||
// AddAfter will handle total < passed
|
||||
jm.queue.AddAfter(key, total-passed)
|
||||
glog.V(4).Infof("job ActiveDeadlineSeconds updated, will rsync after %d seconds", total-passed)
|
||||
klog.V(4).Infof("job ActiveDeadlineSeconds updated, will rsync after %d seconds", total-passed)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -436,7 +436,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
|
||||
func (jm *JobController) syncJob(key string) (bool, error) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
ns, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -449,7 +449,7 @@ func (jm *JobController) syncJob(key string) (bool, error) {
|
||||
sharedJob, err := jm.jobLister.Jobs(ns).Get(name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(4).Infof("Job has been deleted: %v", key)
|
||||
klog.V(4).Infof("Job has been deleted: %v", key)
|
||||
jm.expectations.DeleteExpectations(key)
|
||||
return true, nil
|
||||
}
|
||||
@@ -485,7 +485,7 @@ func (jm *JobController) syncJob(key string) (bool, error) {
|
||||
job.Status.StartTime = &now
|
||||
// enqueue a sync to check if job past ActiveDeadlineSeconds
|
||||
if job.Spec.ActiveDeadlineSeconds != nil {
|
||||
glog.V(4).Infof("Job %s have ActiveDeadlineSeconds will sync after %d seconds",
|
||||
klog.V(4).Infof("Job %s have ActiveDeadlineSeconds will sync after %d seconds",
|
||||
key, *job.Spec.ActiveDeadlineSeconds)
|
||||
jm.queue.AddAfter(key, time.Duration(*job.Spec.ActiveDeadlineSeconds)*time.Second)
|
||||
}
|
||||
@@ -614,7 +614,7 @@ func (jm *JobController) deleteJobPods(job *batch.Job, pods []*v1.Pod, errCh cha
|
||||
defer wait.Done()
|
||||
if err := jm.podControl.DeletePod(job.Namespace, pods[ix].Name, job); err != nil {
|
||||
defer utilruntime.HandleError(err)
|
||||
glog.V(2).Infof("Failed to delete %v, job %q/%q deadline exceeded", pods[ix].Name, job.Namespace, job.Name)
|
||||
klog.V(2).Infof("Failed to delete %v, job %q/%q deadline exceeded", pods[ix].Name, job.Namespace, job.Name)
|
||||
errCh <- err
|
||||
}
|
||||
}(i)
|
||||
@@ -697,7 +697,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
||||
diff := active - parallelism
|
||||
errCh = make(chan error, diff)
|
||||
jm.expectations.ExpectDeletions(jobKey, int(diff))
|
||||
glog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff)
|
||||
klog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff)
|
||||
// Sort the pods in the order such that not-ready < ready, unscheduled
|
||||
// < scheduled, and pending < running. This ensures that we delete pods
|
||||
// in the earlier stages whenever possible.
|
||||
@@ -712,7 +712,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
||||
if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, job); err != nil {
|
||||
defer utilruntime.HandleError(err)
|
||||
// Decrement the expected number of deletes because the informer won't observe this deletion
|
||||
glog.V(2).Infof("Failed to delete %v, decrementing expectations for job %q/%q", activePods[ix].Name, job.Namespace, job.Name)
|
||||
klog.V(2).Infof("Failed to delete %v, decrementing expectations for job %q/%q", activePods[ix].Name, job.Namespace, job.Name)
|
||||
jm.expectations.DeletionObserved(jobKey)
|
||||
activeLock.Lock()
|
||||
active++
|
||||
@@ -749,7 +749,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
||||
}
|
||||
jm.expectations.ExpectCreations(jobKey, int(diff))
|
||||
errCh = make(chan error, diff)
|
||||
glog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, wantActive, diff)
|
||||
klog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, wantActive, diff)
|
||||
|
||||
active += diff
|
||||
wait := sync.WaitGroup{}
|
||||
@@ -782,7 +782,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
||||
if err != nil {
|
||||
defer utilruntime.HandleError(err)
|
||||
// Decrement the expected number of creates because the informer won't observe this pod
|
||||
glog.V(2).Infof("Failed creation, decrementing expectations for job %q/%q", job.Namespace, job.Name)
|
||||
klog.V(2).Infof("Failed creation, decrementing expectations for job %q/%q", job.Namespace, job.Name)
|
||||
jm.expectations.CreationObserved(jobKey)
|
||||
activeLock.Lock()
|
||||
active--
|
||||
@@ -795,7 +795,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
||||
// any skipped pods that we never attempted to start shouldn't be expected.
|
||||
skippedPods := diff - batchSize
|
||||
if errorCount < len(errCh) && skippedPods > 0 {
|
||||
glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for job %q/%q", skippedPods, job.Namespace, job.Name)
|
||||
klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for job %q/%q", skippedPods, job.Namespace, job.Name)
|
||||
active -= skippedPods
|
||||
for i := int32(0); i < skippedPods; i++ {
|
||||
// Decrement the expected number of creates because the informer won't observe this pod
|
||||
|
@@ -27,7 +27,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -22,7 +22,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -110,7 +110,7 @@ func (d *namespacedResourcesDeleter) Delete(nsName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(5).Infof("namespace controller - syncNamespace - namespace: %s, finalizerToken: %s", namespace.Name, d.finalizerToken)
|
||||
klog.V(5).Infof("namespace controller - syncNamespace - namespace: %s, finalizerToken: %s", namespace.Name, d.finalizerToken)
|
||||
|
||||
// ensure that the status is up to date on the namespace
|
||||
// if we get a not found error, we assume the namespace is truly gone
|
||||
@@ -169,13 +169,13 @@ func (d *namespacedResourcesDeleter) initOpCache() {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to get all supported resources from server: %v", err))
|
||||
}
|
||||
if len(resources) == 0 {
|
||||
glog.Fatalf("Unable to get any supported resources from server: %v", err)
|
||||
klog.Fatalf("Unable to get any supported resources from server: %v", err)
|
||||
}
|
||||
deletableGroupVersionResources := []schema.GroupVersionResource{}
|
||||
for _, rl := range resources {
|
||||
gv, err := schema.ParseGroupVersion(rl.GroupVersion)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse GroupVersion %q, skipping: %v", rl.GroupVersion, err)
|
||||
klog.Errorf("Failed to parse GroupVersion %q, skipping: %v", rl.GroupVersion, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -184,7 +184,7 @@ func (d *namespacedResourcesDeleter) initOpCache() {
|
||||
verbs := sets.NewString([]string(r.Verbs)...)
|
||||
|
||||
if !verbs.Has("delete") {
|
||||
glog.V(6).Infof("Skipping resource %v because it cannot be deleted.", gvr)
|
||||
klog.V(6).Infof("Skipping resource %v because it cannot be deleted.", gvr)
|
||||
}
|
||||
|
||||
for _, op := range []operation{operationList, operationDeleteCollection} {
|
||||
@@ -329,11 +329,11 @@ func (d *namespacedResourcesDeleter) finalizeNamespace(namespace *v1.Namespace)
|
||||
// it returns true if the operation was supported on the server.
|
||||
// it returns an error if the operation was supported on the server but was unable to complete.
|
||||
func (d *namespacedResourcesDeleter) deleteCollection(gvr schema.GroupVersionResource, namespace string) (bool, error) {
|
||||
glog.V(5).Infof("namespace controller - deleteCollection - namespace: %s, gvr: %v", namespace, gvr)
|
||||
klog.V(5).Infof("namespace controller - deleteCollection - namespace: %s, gvr: %v", namespace, gvr)
|
||||
|
||||
key := operationKey{operation: operationDeleteCollection, gvr: gvr}
|
||||
if !d.opCache.isSupported(key) {
|
||||
glog.V(5).Infof("namespace controller - deleteCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr)
|
||||
klog.V(5).Infof("namespace controller - deleteCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -355,12 +355,12 @@ func (d *namespacedResourcesDeleter) deleteCollection(gvr schema.GroupVersionRes
|
||||
// when working with this resource type, we will get a literal not found error rather than expected method not supported
|
||||
// remember next time that this resource does not support delete collection...
|
||||
if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) {
|
||||
glog.V(5).Infof("namespace controller - deleteCollection not supported - namespace: %s, gvr: %v", namespace, gvr)
|
||||
klog.V(5).Infof("namespace controller - deleteCollection not supported - namespace: %s, gvr: %v", namespace, gvr)
|
||||
d.opCache.setNotSupported(key)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
glog.V(5).Infof("namespace controller - deleteCollection unexpected error - namespace: %s, gvr: %v, error: %v", namespace, gvr, err)
|
||||
klog.V(5).Infof("namespace controller - deleteCollection unexpected error - namespace: %s, gvr: %v, error: %v", namespace, gvr, err)
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -370,11 +370,11 @@ func (d *namespacedResourcesDeleter) deleteCollection(gvr schema.GroupVersionRes
|
||||
// a boolean if the operation is supported
|
||||
// an error if the operation is supported but could not be completed.
|
||||
func (d *namespacedResourcesDeleter) listCollection(gvr schema.GroupVersionResource, namespace string) (*unstructured.UnstructuredList, bool, error) {
|
||||
glog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr)
|
||||
klog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr)
|
||||
|
||||
key := operationKey{operation: operationList, gvr: gvr}
|
||||
if !d.opCache.isSupported(key) {
|
||||
glog.V(5).Infof("namespace controller - listCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr)
|
||||
klog.V(5).Infof("namespace controller - listCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
@@ -390,7 +390,7 @@ func (d *namespacedResourcesDeleter) listCollection(gvr schema.GroupVersionResou
|
||||
// when working with this resource type, we will get a literal not found error rather than expected method not supported
|
||||
// remember next time that this resource does not support delete collection...
|
||||
if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) {
|
||||
glog.V(5).Infof("namespace controller - listCollection not supported - namespace: %s, gvr: %v", namespace, gvr)
|
||||
klog.V(5).Infof("namespace controller - listCollection not supported - namespace: %s, gvr: %v", namespace, gvr)
|
||||
d.opCache.setNotSupported(key)
|
||||
return nil, false, nil
|
||||
}
|
||||
@@ -400,7 +400,7 @@ func (d *namespacedResourcesDeleter) listCollection(gvr schema.GroupVersionResou
|
||||
|
||||
// deleteEachItem is a helper function that will list the collection of resources and delete each item 1 by 1.
|
||||
func (d *namespacedResourcesDeleter) deleteEachItem(gvr schema.GroupVersionResource, namespace string) error {
|
||||
glog.V(5).Infof("namespace controller - deleteEachItem - namespace: %s, gvr: %v", namespace, gvr)
|
||||
klog.V(5).Infof("namespace controller - deleteEachItem - namespace: %s, gvr: %v", namespace, gvr)
|
||||
|
||||
unstructuredList, listSupported, err := d.listCollection(gvr, namespace)
|
||||
if err != nil {
|
||||
@@ -425,15 +425,15 @@ func (d *namespacedResourcesDeleter) deleteEachItem(gvr schema.GroupVersionResou
|
||||
func (d *namespacedResourcesDeleter) deleteAllContentForGroupVersionResource(
|
||||
gvr schema.GroupVersionResource, namespace string,
|
||||
namespaceDeletedAt metav1.Time) (int64, error) {
|
||||
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - namespace: %s, gvr: %v", namespace, gvr)
|
||||
klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - namespace: %s, gvr: %v", namespace, gvr)
|
||||
|
||||
// estimate how long it will take for the resource to be deleted (needed for objects that support graceful delete)
|
||||
estimate, err := d.estimateGracefulTermination(gvr, namespace, namespaceDeletedAt)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - unable to estimate - namespace: %s, gvr: %v, err: %v", namespace, gvr, err)
|
||||
klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - unable to estimate - namespace: %s, gvr: %v, err: %v", namespace, gvr, err)
|
||||
return estimate, err
|
||||
}
|
||||
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - estimate - namespace: %s, gvr: %v, estimate: %v", namespace, gvr, estimate)
|
||||
klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - estimate - namespace: %s, gvr: %v, estimate: %v", namespace, gvr, estimate)
|
||||
|
||||
// first try to delete the entire collection
|
||||
deleteCollectionSupported, err := d.deleteCollection(gvr, namespace)
|
||||
@@ -451,21 +451,21 @@ func (d *namespacedResourcesDeleter) deleteAllContentForGroupVersionResource(
|
||||
|
||||
// verify there are no more remaining items
|
||||
// it is not an error condition for there to be remaining items if local estimate is non-zero
|
||||
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - checking for no more items in namespace: %s, gvr: %v", namespace, gvr)
|
||||
klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - checking for no more items in namespace: %s, gvr: %v", namespace, gvr)
|
||||
unstructuredList, listSupported, err := d.listCollection(gvr, namespace)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - error verifying no items in namespace: %s, gvr: %v, err: %v", namespace, gvr, err)
|
||||
klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - error verifying no items in namespace: %s, gvr: %v, err: %v", namespace, gvr, err)
|
||||
return estimate, err
|
||||
}
|
||||
if !listSupported {
|
||||
return estimate, nil
|
||||
}
|
||||
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining - namespace: %s, gvr: %v, items: %v", namespace, gvr, len(unstructuredList.Items))
|
||||
klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining - namespace: %s, gvr: %v, items: %v", namespace, gvr, len(unstructuredList.Items))
|
||||
if len(unstructuredList.Items) != 0 && estimate == int64(0) {
|
||||
// if any item has a finalizer, we treat that as a normal condition, and use a default estimation to allow for GC to complete.
|
||||
for _, item := range unstructuredList.Items {
|
||||
if len(item.GetFinalizers()) > 0 {
|
||||
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining with finalizers - namespace: %s, gvr: %v, finalizers: %v", namespace, gvr, item.GetFinalizers())
|
||||
klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining with finalizers - namespace: %s, gvr: %v, finalizers: %v", namespace, gvr, item.GetFinalizers())
|
||||
return finalizerEstimateSeconds, nil
|
||||
}
|
||||
}
|
||||
@@ -480,7 +480,7 @@ func (d *namespacedResourcesDeleter) deleteAllContentForGroupVersionResource(
|
||||
// If estimate > 0, not all resources are guaranteed to be gone.
|
||||
func (d *namespacedResourcesDeleter) deleteAllContent(namespace string, namespaceDeletedAt metav1.Time) (int64, error) {
|
||||
estimate := int64(0)
|
||||
glog.V(4).Infof("namespace controller - deleteAllContent - namespace: %s", namespace)
|
||||
klog.V(4).Infof("namespace controller - deleteAllContent - namespace: %s", namespace)
|
||||
resources, err := d.discoverResourcesFn()
|
||||
if err != nil {
|
||||
return estimate, err
|
||||
@@ -506,14 +506,14 @@ func (d *namespacedResourcesDeleter) deleteAllContent(namespace string, namespac
|
||||
if len(errs) > 0 {
|
||||
return estimate, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
glog.V(4).Infof("namespace controller - deleteAllContent - namespace: %s, estimate: %v", namespace, estimate)
|
||||
klog.V(4).Infof("namespace controller - deleteAllContent - namespace: %s, estimate: %v", namespace, estimate)
|
||||
return estimate, nil
|
||||
}
|
||||
|
||||
// estimateGrracefulTermination will estimate the graceful termination required for the specific entity in the namespace
|
||||
func (d *namespacedResourcesDeleter) estimateGracefulTermination(gvr schema.GroupVersionResource, ns string, namespaceDeletedAt metav1.Time) (int64, error) {
|
||||
groupResource := gvr.GroupResource()
|
||||
glog.V(5).Infof("namespace controller - estimateGracefulTermination - group %s, resource: %s", groupResource.Group, groupResource.Resource)
|
||||
klog.V(5).Infof("namespace controller - estimateGracefulTermination - group %s, resource: %s", groupResource.Group, groupResource.Resource)
|
||||
estimate := int64(0)
|
||||
var err error
|
||||
switch groupResource {
|
||||
@@ -534,7 +534,7 @@ func (d *namespacedResourcesDeleter) estimateGracefulTermination(gvr schema.Grou
|
||||
|
||||
// estimateGracefulTerminationForPods determines the graceful termination period for pods in the namespace
|
||||
func (d *namespacedResourcesDeleter) estimateGracefulTerminationForPods(ns string) (int64, error) {
|
||||
glog.V(5).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns)
|
||||
klog.V(5).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns)
|
||||
estimate := int64(0)
|
||||
podsGetter := d.podsGetter
|
||||
if podsGetter == nil || reflect.ValueOf(podsGetter).IsNil() {
|
||||
|
@@ -35,7 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/namespace/deletion"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -140,7 +140,7 @@ func (nm *NamespaceController) worker() {
|
||||
|
||||
if estimate, ok := err.(*deletion.ResourcesRemainingError); ok {
|
||||
t := estimate.Estimate/2 + 1
|
||||
glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", key, t)
|
||||
klog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", key, t)
|
||||
nm.queue.AddAfter(key, time.Duration(t)*time.Second)
|
||||
} else {
|
||||
// rather than wait for a full resync, re-add the namespace to the queue to be processed
|
||||
@@ -163,12 +163,12 @@ func (nm *NamespaceController) worker() {
|
||||
func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, err := nm.lister.Get(key)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Infof("Namespace has been deleted %v", key)
|
||||
klog.Infof("Namespace has been deleted %v", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
@@ -183,14 +183,14 @@ func (nm *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer nm.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting namespace controller")
|
||||
defer glog.Infof("Shutting down namespace controller")
|
||||
klog.Infof("Starting namespace controller")
|
||||
defer klog.Infof("Shutting down namespace controller")
|
||||
|
||||
if !controller.WaitForCacheSync("namespace", stopCh, nm.listerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(5).Info("Starting workers of namespace controller")
|
||||
klog.V(5).Info("Starting workers of namespace controller")
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(nm.worker, time.Second, stopCh)
|
||||
}
|
||||
|
@@ -44,7 +44,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -69,7 +69,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/scheme:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -48,9 +48,9 @@ func newAdapter(k8s clientset.Interface, cloud *gce.Cloud) *adapter {
|
||||
}
|
||||
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(glog.Infof)
|
||||
broadcaster.StartLogging(klog.Infof)
|
||||
ret.recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudCIDRAllocator"})
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
|
||||
Interface: k8s.CoreV1().Events(""),
|
||||
})
|
||||
@@ -70,7 +70,7 @@ func (a *adapter) Alias(ctx context.Context, nodeName string) (*net.IPNet, error
|
||||
case 1:
|
||||
break
|
||||
default:
|
||||
glog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", nodeName, cidrs)
|
||||
klog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", nodeName, cidrs)
|
||||
}
|
||||
|
||||
_, cidrRange, err := net.ParseCIDR(cidrs[0])
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -121,7 +121,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
|
||||
LabelSelector: labels.Everything().String(),
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to list all nodes: %v", err)
|
||||
klog.Errorf("Failed to list all nodes: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
@@ -10,7 +10,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cidr_set_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//vendor/github.com/golang/glog:go_default_library"],
|
||||
deps = ["//vendor/k8s.io/klog:go_default_library"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
func TestCIDRSetFullyAllocated(t *testing.T) {
|
||||
@@ -478,17 +478,17 @@ func TestGetBitforCIDR(t *testing.T) {
|
||||
|
||||
got, err := cs.getIndexForCIDR(subnetCIDR)
|
||||
if err == nil && tc.expectErr {
|
||||
glog.Errorf("expected error but got null for %v", tc.description)
|
||||
klog.Errorf("expected error but got null for %v", tc.description)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && !tc.expectErr {
|
||||
glog.Errorf("unexpected error: %v for %v", err, tc.description)
|
||||
klog.Errorf("unexpected error: %v for %v", err, tc.description)
|
||||
continue
|
||||
}
|
||||
|
||||
if got != tc.expectedBit {
|
||||
glog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description)
|
||||
klog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -84,13 +84,13 @@ var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
|
||||
// NewCloudCIDRAllocator creates a new cloud CIDR allocator.
|
||||
func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) {
|
||||
if client == nil {
|
||||
glog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
klog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
gceCloud, ok := cloud.(*gce.Cloud)
|
||||
@@ -127,15 +127,15 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR),
|
||||
})
|
||||
|
||||
glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
||||
klog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
||||
return ca, nil
|
||||
}
|
||||
|
||||
func (ca *cloudCIDRAllocator) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting cloud CIDR allocator")
|
||||
defer glog.Infof("Shutting down cloud CIDR allocator")
|
||||
klog.Infof("Starting cloud CIDR allocator")
|
||||
defer klog.Infof("Shutting down cloud CIDR allocator")
|
||||
|
||||
if !controller.WaitForCacheSync("cidrallocator", stopCh, ca.nodesSynced) {
|
||||
return
|
||||
@@ -153,22 +153,22 @@ func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
|
||||
select {
|
||||
case workItem, ok := <-ca.nodeUpdateChannel:
|
||||
if !ok {
|
||||
glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
klog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
return
|
||||
}
|
||||
if err := ca.updateCIDRAllocation(workItem); err == nil {
|
||||
glog.V(3).Infof("Updated CIDR for %q", workItem)
|
||||
klog.V(3).Infof("Updated CIDR for %q", workItem)
|
||||
} else {
|
||||
glog.Errorf("Error updating CIDR for %q: %v", workItem, err)
|
||||
klog.Errorf("Error updating CIDR for %q: %v", workItem, err)
|
||||
if canRetry, timeout := ca.retryParams(workItem); canRetry {
|
||||
glog.V(2).Infof("Retrying update for %q after %v", workItem, timeout)
|
||||
klog.V(2).Infof("Retrying update for %q after %v", workItem, timeout)
|
||||
time.AfterFunc(timeout, func() {
|
||||
// Requeue the failed node for update again.
|
||||
ca.nodeUpdateChannel <- workItem
|
||||
})
|
||||
continue
|
||||
}
|
||||
glog.Errorf("Exceeded retry count for %q, dropping from queue", workItem)
|
||||
klog.Errorf("Exceeded retry count for %q, dropping from queue", workItem)
|
||||
}
|
||||
ca.removeNodeFromProcessing(workItem)
|
||||
case <-stopChan:
|
||||
@@ -193,7 +193,7 @@ func (ca *cloudCIDRAllocator) retryParams(nodeName string) (bool, time.Duration)
|
||||
|
||||
entry, ok := ca.nodesInProcessing[nodeName]
|
||||
if !ok {
|
||||
glog.Errorf("Cannot get retryParams for %q as entry does not exist", nodeName)
|
||||
klog.Errorf("Cannot get retryParams for %q as entry does not exist", nodeName)
|
||||
return false, 0
|
||||
}
|
||||
|
||||
@@ -231,11 +231,11 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
return nil
|
||||
}
|
||||
if !ca.insertNodeToProcessing(node.Name) {
|
||||
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Putting node %s into the work queue", node.Name)
|
||||
klog.V(4).Infof("Putting node %s into the work queue", node.Name)
|
||||
ca.nodeUpdateChannel <- node.Name
|
||||
return nil
|
||||
}
|
||||
@@ -247,7 +247,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil // node no longer available, skip processing
|
||||
}
|
||||
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
|
||||
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -267,11 +267,11 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
podCIDR := cidr.String()
|
||||
|
||||
if node.Spec.PodCIDR == podCIDR {
|
||||
glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
// We don't return here, in order to set the NetworkUnavailable condition later below.
|
||||
} else {
|
||||
if node.Spec.PodCIDR != "" {
|
||||
glog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
klog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
// We fall through and set the CIDR despite this error. This
|
||||
// implements the same logic as implemented in the
|
||||
// rangeAllocator.
|
||||
@@ -280,14 +280,14 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
}
|
||||
for i := 0; i < cidrUpdateRetries; i++ {
|
||||
if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
|
||||
glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -299,13 +299,13 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
LastTransitionTime: metav1.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Error setting route status for node %v: %v", node.Name, err)
|
||||
klog.Errorf("Error setting route status for node %v: %v", node.Name, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error {
|
||||
glog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)",
|
||||
klog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)",
|
||||
node.Name, node.Spec.PodCIDR)
|
||||
return nil
|
||||
}
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
informers "k8s.io/client-go/informers/core/v1"
|
||||
@@ -99,7 +99,7 @@ func NewController(
|
||||
// registers the informers for node changes. This will start synchronization
|
||||
// of the node and cloud CIDR range allocations.
|
||||
func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
|
||||
glog.V(0).Infof("Starting IPAM controller (config=%+v)", c.config)
|
||||
klog.V(0).Infof("Starting IPAM controller (config=%+v)", c.config)
|
||||
|
||||
nodes, err := listNodes(c.adapter.k8s)
|
||||
if err != nil {
|
||||
@@ -110,9 +110,9 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
|
||||
_, cidrRange, err := net.ParseCIDR(node.Spec.PodCIDR)
|
||||
if err == nil {
|
||||
c.set.Occupy(cidrRange)
|
||||
glog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR)
|
||||
klog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR)
|
||||
} else {
|
||||
glog.Errorf("Node %q has an invalid CIDR (%q): %v", node.Name, node.Spec.PodCIDR, err)
|
||||
klog.Errorf("Node %q has an invalid CIDR (%q): %v", node.Name, node.Spec.PodCIDR, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,7 +180,7 @@ func (c *Controller) onAdd(node *v1.Node) error {
|
||||
c.syncers[node.Name] = syncer
|
||||
go syncer.Loop(nil)
|
||||
} else {
|
||||
glog.Warningf("Add for node %q that already exists", node.Name)
|
||||
klog.Warningf("Add for node %q that already exists", node.Name)
|
||||
}
|
||||
syncer.Update(node)
|
||||
|
||||
@@ -194,7 +194,7 @@ func (c *Controller) onUpdate(_, node *v1.Node) error {
|
||||
if sync, ok := c.syncers[node.Name]; ok {
|
||||
sync.Update(node)
|
||||
} else {
|
||||
glog.Errorf("Received update for non-existent node %q", node.Name)
|
||||
klog.Errorf("Received update for non-existent node %q", node.Name)
|
||||
return fmt.Errorf("unknown node %q", node.Name)
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ func (c *Controller) onDelete(node *v1.Node) error {
|
||||
syncer.Delete(node)
|
||||
delete(c.syncers, node.Name)
|
||||
} else {
|
||||
glog.Warningf("Node %q was already deleted", node.Name)
|
||||
klog.Warningf("Node %q was already deleted", node.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -69,13 +69,13 @@ type rangeAllocator struct {
|
||||
// can initialize its CIDR map. NodeList is only nil in testing.
|
||||
func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
||||
if client == nil {
|
||||
glog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
klog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
set, err := cidrset.NewCIDRSet(clusterCIDR, subNetMaskSize)
|
||||
@@ -96,16 +96,16 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
if serviceCIDR != nil {
|
||||
ra.filterOutServiceRange(serviceCIDR)
|
||||
} else {
|
||||
glog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
|
||||
klog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
|
||||
}
|
||||
|
||||
if nodeList != nil {
|
||||
for _, node := range nodeList.Items {
|
||||
if node.Spec.PodCIDR == "" {
|
||||
glog.Infof("Node %v has no CIDR, ignoring", node.Name)
|
||||
klog.Infof("Node %v has no CIDR, ignoring", node.Name)
|
||||
continue
|
||||
} else {
|
||||
glog.Infof("Node %v has CIDR %s, occupying it in CIDR map",
|
||||
klog.Infof("Node %v has CIDR %s, occupying it in CIDR map",
|
||||
node.Name, node.Spec.PodCIDR)
|
||||
}
|
||||
if err := ra.occupyCIDR(&node); err != nil {
|
||||
@@ -154,8 +154,8 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
func (r *rangeAllocator) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting range CIDR allocator")
|
||||
defer glog.Infof("Shutting down range CIDR allocator")
|
||||
klog.Infof("Starting range CIDR allocator")
|
||||
defer klog.Infof("Shutting down range CIDR allocator")
|
||||
|
||||
if !controller.WaitForCacheSync("cidrallocator", stopCh, r.nodesSynced) {
|
||||
return
|
||||
@@ -173,7 +173,7 @@ func (r *rangeAllocator) worker(stopChan <-chan struct{}) {
|
||||
select {
|
||||
case workItem, ok := <-r.nodeCIDRUpdateChannel:
|
||||
if !ok {
|
||||
glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
klog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
return
|
||||
}
|
||||
if err := r.updateCIDRAllocation(workItem); err != nil {
|
||||
@@ -225,7 +225,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
return nil
|
||||
}
|
||||
if !r.insertNodeToProcessing(node.Name) {
|
||||
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
return nil
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
@@ -238,7 +238,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
return fmt.Errorf("failed to allocate cidr: %v", err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
|
||||
klog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
|
||||
r.nodeCIDRUpdateChannel <- nodeAndCIDR{
|
||||
nodeName: node.Name,
|
||||
cidr: podCIDR,
|
||||
@@ -255,7 +255,7 @@ func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
|
||||
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", node.Spec.PodCIDR, node.Name, err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
|
||||
klog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
|
||||
if err = r.cidrs.Release(podCIDR); err != nil {
|
||||
return fmt.Errorf("Error when releasing CIDR %v: %v", node.Spec.PodCIDR, err)
|
||||
}
|
||||
@@ -275,7 +275,7 @@ func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
|
||||
}
|
||||
|
||||
if err := r.cidrs.Occupy(serviceCIDR); err != nil {
|
||||
glog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
|
||||
klog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -289,37 +289,37 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
|
||||
|
||||
node, err = r.nodeLister.Get(data.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
||||
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if node.Spec.PodCIDR == podCIDR {
|
||||
glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
return nil
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
glog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
klog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
if err := r.cidrs.Release(data.cidr); err != nil {
|
||||
glog.Errorf("Error when releasing CIDR %v", podCIDR)
|
||||
klog.Errorf("Error when releasing CIDR %v", podCIDR)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// If we reached here, it means that the node has no CIDR currently assigned. So we set it.
|
||||
for i := 0; i < cidrUpdateRetries; i++ {
|
||||
if err = utilnode.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
|
||||
// We accept the fact that we may leak CIDRs here. This is safer than releasing
|
||||
// them in case when we don't know if request went through.
|
||||
// NodeController restart will return all falsely allocated CIDRs to the pool.
|
||||
if !apierrors.IsServerTimeout(err) {
|
||||
glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err)
|
||||
klog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err)
|
||||
if releaseErr := r.cidrs.Release(data.cidr); releaseErr != nil {
|
||||
glog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr)
|
||||
klog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@@ -8,7 +8,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -21,7 +21,7 @@ go_test(
|
||||
"//pkg/controller/nodeipam/ipam/test:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
@@ -120,7 +120,7 @@ func New(c controller, cloudAlias cloudAlias, kubeAPI kubeAPI, mode NodeSyncMode
|
||||
// Loop runs the sync loop for a given node. done is an optional channel that
|
||||
// is closed when the Loop() returns.
|
||||
func (sync *NodeSync) Loop(done chan struct{}) {
|
||||
glog.V(2).Infof("Starting sync loop for node %q", sync.nodeName)
|
||||
klog.V(2).Infof("Starting sync loop for node %q", sync.nodeName)
|
||||
|
||||
defer func() {
|
||||
if done != nil {
|
||||
@@ -130,13 +130,13 @@ func (sync *NodeSync) Loop(done chan struct{}) {
|
||||
|
||||
timeout := sync.c.ResyncTimeout()
|
||||
delayTimer := time.NewTimer(timeout)
|
||||
glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
klog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
|
||||
for {
|
||||
select {
|
||||
case op, more := <-sync.opChan:
|
||||
if !more {
|
||||
glog.V(2).Infof("Stopping sync loop")
|
||||
klog.V(2).Infof("Stopping sync loop")
|
||||
return
|
||||
}
|
||||
sync.c.ReportResult(op.run(sync))
|
||||
@@ -144,13 +144,13 @@ func (sync *NodeSync) Loop(done chan struct{}) {
|
||||
<-delayTimer.C
|
||||
}
|
||||
case <-delayTimer.C:
|
||||
glog.V(4).Infof("Running resync for node %q", sync.nodeName)
|
||||
klog.V(4).Infof("Running resync for node %q", sync.nodeName)
|
||||
sync.c.ReportResult((&updateOp{}).run(sync))
|
||||
}
|
||||
|
||||
timeout := sync.c.ResyncTimeout()
|
||||
delayTimer.Reset(timeout)
|
||||
glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
klog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -190,15 +190,15 @@ func (op *updateOp) String() string {
|
||||
}
|
||||
|
||||
func (op *updateOp) run(sync *NodeSync) error {
|
||||
glog.V(3).Infof("Running updateOp %+v", op)
|
||||
klog.V(3).Infof("Running updateOp %+v", op)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if op.node == nil {
|
||||
glog.V(3).Infof("Getting node spec for %q", sync.nodeName)
|
||||
klog.V(3).Infof("Getting node spec for %q", sync.nodeName)
|
||||
node, err := sync.kubeAPI.Node(ctx, sync.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting node %q spec: %v", sync.nodeName, err)
|
||||
klog.Errorf("Error getting node %q spec: %v", sync.nodeName, err)
|
||||
return err
|
||||
}
|
||||
op.node = node
|
||||
@@ -206,7 +206,7 @@ func (op *updateOp) run(sync *NodeSync) error {
|
||||
|
||||
aliasRange, err := sync.cloudAlias.Alias(ctx, sync.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting cloud alias for node %q: %v", sync.nodeName, err)
|
||||
klog.Errorf("Error getting cloud alias for node %q: %v", sync.nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -228,14 +228,14 @@ func (op *updateOp) run(sync *NodeSync) error {
|
||||
// match.
|
||||
func (op *updateOp) validateRange(ctx context.Context, sync *NodeSync, node *v1.Node, aliasRange *net.IPNet) error {
|
||||
if node.Spec.PodCIDR != aliasRange.String() {
|
||||
glog.Errorf("Inconsistency detected between node PodCIDR and node alias (%v != %v)",
|
||||
klog.Errorf("Inconsistency detected between node PodCIDR and node alias (%v != %v)",
|
||||
node.Spec.PodCIDR, aliasRange)
|
||||
sync.kubeAPI.EmitNodeWarningEvent(node.Name, MismatchEvent,
|
||||
"Node.Spec.PodCIDR != cloud alias (%v != %v)", node.Spec.PodCIDR, aliasRange)
|
||||
// User intervention is required in this case, as this is most likely due
|
||||
// to the user mucking around with their VM aliases on the side.
|
||||
} else {
|
||||
glog.V(4).Infof("Node %q CIDR range %v is matches cloud assignment", node.Name, node.Spec.PodCIDR)
|
||||
klog.V(4).Infof("Node %q CIDR range %v is matches cloud assignment", node.Name, node.Spec.PodCIDR)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -249,26 +249,26 @@ func (op *updateOp) updateNodeFromAlias(ctx context.Context, sync *NodeSync, nod
|
||||
return fmt.Errorf("cannot sync from cloud in mode %q", sync.mode)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updating node spec with alias range, node.PodCIDR = %v", aliasRange)
|
||||
klog.V(2).Infof("Updating node spec with alias range, node.PodCIDR = %v", aliasRange)
|
||||
|
||||
if err := sync.set.Occupy(aliasRange); err != nil {
|
||||
glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
klog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, aliasRange); err != nil {
|
||||
glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, aliasRange, err)
|
||||
klog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, aliasRange, err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Node %q PodCIDR set to %v", node.Name, aliasRange)
|
||||
klog.V(2).Infof("Node %q PodCIDR set to %v", node.Name, aliasRange)
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updated node %q PodCIDR from cloud alias %v", node.Name, aliasRange)
|
||||
klog.V(2).Infof("Updated node %q PodCIDR from cloud alias %v", node.Name, aliasRange)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -283,27 +283,27 @@ func (op *updateOp) updateAliasFromNode(ctx context.Context, sync *NodeSync, nod
|
||||
|
||||
_, aliasRange, err := net.ParseCIDR(node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not parse PodCIDR (%q) for node %q: %v",
|
||||
klog.Errorf("Could not parse PodCIDR (%q) for node %q: %v",
|
||||
node.Spec.PodCIDR, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.set.Occupy(aliasRange); err != nil {
|
||||
glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
klog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.cloudAlias.AddAlias(ctx, node.Name, aliasRange); err != nil {
|
||||
glog.Errorf("Could not add alias %v for node %q: %v", aliasRange, node.Name, err)
|
||||
klog.Errorf("Could not add alias %v for node %q: %v", aliasRange, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updated node %q cloud alias with node spec, node.PodCIDR = %v",
|
||||
klog.V(2).Infof("Updated node %q cloud alias with node spec, node.PodCIDR = %v",
|
||||
node.Name, node.Spec.PodCIDR)
|
||||
|
||||
return nil
|
||||
@@ -326,21 +326,21 @@ func (op *updateOp) allocateRange(ctx context.Context, sync *NodeSync, node *v1.
|
||||
// is no durable record of the range. The missing space will be
|
||||
// recovered on the next restart of the controller.
|
||||
if err := sync.cloudAlias.AddAlias(ctx, node.Name, cidrRange); err != nil {
|
||||
glog.Errorf("Could not add alias %v for node %q: %v", cidrRange, node.Name, err)
|
||||
klog.Errorf("Could not add alias %v for node %q: %v", cidrRange, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, cidrRange); err != nil {
|
||||
glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, cidrRange, err)
|
||||
klog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, cidrRange, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Allocated PodCIDR %v for node %q", cidrRange, node.Name)
|
||||
klog.V(2).Infof("Allocated PodCIDR %v for node %q", cidrRange, node.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -358,15 +358,15 @@ func (op *deleteOp) String() string {
|
||||
}
|
||||
|
||||
func (op *deleteOp) run(sync *NodeSync) error {
|
||||
glog.V(3).Infof("Running deleteOp %+v", op)
|
||||
klog.V(3).Infof("Running deleteOp %+v", op)
|
||||
if op.node.Spec.PodCIDR == "" {
|
||||
glog.V(2).Infof("Node %q was deleted, node had no PodCIDR range assigned", op.node.Name)
|
||||
klog.V(2).Infof("Node %q was deleted, node had no PodCIDR range assigned", op.node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
_, cidrRange, err := net.ParseCIDR(op.node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
glog.Errorf("Deleted node %q has an invalid podCIDR %q: %v",
|
||||
klog.Errorf("Deleted node %q has an invalid podCIDR %q: %v",
|
||||
op.node.Name, op.node.Spec.PodCIDR, err)
|
||||
sync.kubeAPI.EmitNodeWarningEvent(op.node.Name, InvalidPodCIDR,
|
||||
"Node %q has an invalid PodCIDR: %q", op.node.Name, op.node.Spec.PodCIDR)
|
||||
@@ -374,7 +374,7 @@ func (op *deleteOp) run(sync *NodeSync) error {
|
||||
}
|
||||
|
||||
sync.set.Release(cidrRange)
|
||||
glog.V(2).Infof("Node %q was deleted, releasing CIDR range %v",
|
||||
klog.V(2).Infof("Node %q was deleted, releasing CIDR range %v",
|
||||
op.node.Name, op.node.Spec.PodCIDR)
|
||||
|
||||
return nil
|
||||
|
@@ -24,8 +24,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
||||
|
||||
@@ -88,7 +88,7 @@ func (f *fakeAPIs) EmitNodeWarningEvent(nodeName, reason, fmtStr string, args ..
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) ReportResult(err error) {
|
||||
glog.V(2).Infof("ReportResult %v", err)
|
||||
klog.V(2).Infof("ReportResult %v", err)
|
||||
f.results = append(f.results, err)
|
||||
if f.reportChan != nil {
|
||||
f.reportChan <- struct{}{}
|
||||
@@ -104,7 +104,7 @@ func (f *fakeAPIs) ResyncTimeout() time.Duration {
|
||||
|
||||
func (f *fakeAPIs) dumpTrace() {
|
||||
for i, x := range f.calls {
|
||||
glog.Infof("trace %v: %v", i, x)
|
||||
klog.Infof("trace %v: %v", i, x)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
@@ -90,13 +90,13 @@ func NewNodeIpamController(
|
||||
allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
|
||||
|
||||
if kubeClient == nil {
|
||||
glog.Fatalf("kubeClient is nil when starting Controller")
|
||||
klog.Fatalf("kubeClient is nil when starting Controller")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
glog.Infof("Sending events to api server.")
|
||||
klog.Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(
|
||||
&v1core.EventSinkImpl{
|
||||
Interface: kubeClient.CoreV1().Events(""),
|
||||
@@ -107,13 +107,13 @@ func NewNodeIpamController(
|
||||
}
|
||||
|
||||
if clusterCIDR == nil {
|
||||
glog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set")
|
||||
klog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set")
|
||||
}
|
||||
mask := clusterCIDR.Mask
|
||||
if allocatorType != ipam.CloudAllocatorType {
|
||||
// Cloud CIDR allocator does not rely on clusterCIDR or nodeCIDRMaskSize for allocation.
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
glog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
|
||||
klog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,10 +141,10 @@ func NewNodeIpamController(
|
||||
}
|
||||
ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating ipam controller: %v", err)
|
||||
klog.Fatalf("Error creating ipam controller: %v", err)
|
||||
}
|
||||
if err := ipamc.Start(nodeInformer); err != nil {
|
||||
glog.Fatalf("Error trying to Init(): %v", err)
|
||||
klog.Fatalf("Error trying to Init(): %v", err)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
@@ -165,8 +165,8 @@ func NewNodeIpamController(
|
||||
func (nc *Controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting ipam controller")
|
||||
defer glog.Infof("Shutting down ipam controller")
|
||||
klog.Infof("Starting ipam controller")
|
||||
defer klog.Infof("Shutting down ipam controller")
|
||||
|
||||
if !controller.WaitForCacheSync("node", stopCh, nc.nodeInformerSynced) {
|
||||
return
|
||||
|
@@ -43,8 +43,8 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -29,7 +29,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
coordv1beta1 "k8s.io/api/coordination/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -262,14 +262,14 @@ func NewNodeLifecycleController(
|
||||
taintNodeByCondition bool) (*Controller, error) {
|
||||
|
||||
if kubeClient == nil {
|
||||
glog.Fatalf("kubeClient is nil when starting Controller")
|
||||
klog.Fatalf("kubeClient is nil when starting Controller")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "node-controller"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
glog.Infof("Sending events to api server.")
|
||||
klog.Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(
|
||||
&v1core.EventSinkImpl{
|
||||
Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""),
|
||||
@@ -309,7 +309,7 @@ func NewNodeLifecycleController(
|
||||
nodeUpdateQueue: workqueue.New(),
|
||||
}
|
||||
if useTaintBasedEvictions {
|
||||
glog.Infof("Controller is using taint based evictions.")
|
||||
klog.Infof("Controller is using taint based evictions.")
|
||||
}
|
||||
|
||||
nc.enterPartialDisruptionFunc = nc.ReducedQPSFunc
|
||||
@@ -336,12 +336,12 @@ func NewNodeLifecycleController(
|
||||
if !isPod {
|
||||
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Received unexpected object: %v", obj)
|
||||
klog.Errorf("Received unexpected object: %v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = deletedState.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("DeletedFinalStateUnknown contained non-Pod object: %v", deletedState.Obj)
|
||||
klog.Errorf("DeletedFinalStateUnknown contained non-Pod object: %v", deletedState.Obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -375,7 +375,7 @@ func NewNodeLifecycleController(
|
||||
}
|
||||
|
||||
if nc.taintNodeByCondition {
|
||||
glog.Infof("Controller will taint node by condition.")
|
||||
klog.Infof("Controller will taint node by condition.")
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) error {
|
||||
nc.nodeUpdateQueue.Add(node.Name)
|
||||
@@ -420,8 +420,8 @@ func NewNodeLifecycleController(
|
||||
func (nc *Controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting node controller")
|
||||
defer glog.Infof("Shutting down node controller")
|
||||
klog.Infof("Starting node controller")
|
||||
defer klog.Infof("Shutting down node controller")
|
||||
|
||||
if !controller.WaitForCacheSync("taint", stopCh, nc.leaseInformerSynced, nc.nodeInformerSynced, nc.podInformerSynced, nc.daemonSetInformerSynced) {
|
||||
return
|
||||
@@ -459,7 +459,7 @@ func (nc *Controller) Run(stopCh <-chan struct{}) {
|
||||
// Incorporate the results of node health signal pushed from kubelet to master.
|
||||
go wait.Until(func() {
|
||||
if err := nc.monitorNodeHealth(); err != nil {
|
||||
glog.Errorf("Error monitoring node health: %v", err)
|
||||
klog.Errorf("Error monitoring node health: %v", err)
|
||||
}
|
||||
}, nc.nodeMonitorPeriod, stopCh)
|
||||
|
||||
@@ -495,7 +495,7 @@ func (nc *Controller) doFixDeprecatedTaintKeyPass(node *v1.Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.Warningf("Detected deprecated taint keys: %v on node: %v, will substitute them with %v",
|
||||
klog.Warningf("Detected deprecated taint keys: %v on node: %v, will substitute them with %v",
|
||||
taintsToDel, node.GetName(), taintsToAdd)
|
||||
|
||||
if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, taintsToAdd, taintsToDel, node) {
|
||||
@@ -516,7 +516,7 @@ func (nc *Controller) doNoScheduleTaintingPassWorker() {
|
||||
|
||||
if err := nc.doNoScheduleTaintingPass(nodeName); err != nil {
|
||||
// TODO (k82cn): Add nodeName back to the queue.
|
||||
glog.Errorf("Failed to taint NoSchedule on node <%s>, requeue it: %v", nodeName, err)
|
||||
klog.Errorf("Failed to taint NoSchedule on node <%s>, requeue it: %v", nodeName, err)
|
||||
}
|
||||
nc.nodeUpdateQueue.Done(nodeName)
|
||||
}
|
||||
@@ -585,10 +585,10 @@ func (nc *Controller) doNoExecuteTaintingPass() {
|
||||
nc.zoneNoExecuteTainter[k].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
|
||||
node, err := nc.nodeLister.Get(value.Value)
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Warningf("Node %v no longer present in nodeLister!", value.Value)
|
||||
klog.Warningf("Node %v no longer present in nodeLister!", value.Value)
|
||||
return true, 0
|
||||
} else if err != nil {
|
||||
glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
|
||||
klog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
|
||||
// retry in 50 millisecond
|
||||
return false, 50 * time.Millisecond
|
||||
} else {
|
||||
@@ -607,7 +607,7 @@ func (nc *Controller) doNoExecuteTaintingPass() {
|
||||
oppositeTaint = *NotReadyTaintTemplate
|
||||
} else {
|
||||
// It seems that the Node is ready again, so there's no need to taint it.
|
||||
glog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value)
|
||||
klog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value)
|
||||
return true, 0
|
||||
}
|
||||
|
||||
@@ -624,9 +624,9 @@ func (nc *Controller) doEvictionPass() {
|
||||
nc.zonePodEvictor[k].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
|
||||
node, err := nc.nodeLister.Get(value.Value)
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Warningf("Node %v no longer present in nodeLister!", value.Value)
|
||||
klog.Warningf("Node %v no longer present in nodeLister!", value.Value)
|
||||
} else if err != nil {
|
||||
glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
|
||||
klog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
|
||||
} else {
|
||||
zone := utilnode.GetZoneKey(node)
|
||||
evictionsNumber.WithLabelValues(zone).Inc()
|
||||
@@ -638,7 +638,7 @@ func (nc *Controller) doEvictionPass() {
|
||||
return false, 0
|
||||
}
|
||||
if remaining {
|
||||
glog.Infof("Pods awaiting deletion due to Controller eviction")
|
||||
klog.Infof("Pods awaiting deletion due to Controller eviction")
|
||||
}
|
||||
return true, 0
|
||||
})
|
||||
@@ -662,7 +662,7 @@ func (nc *Controller) monitorNodeHealth() error {
|
||||
}
|
||||
|
||||
for i := range added {
|
||||
glog.V(1).Infof("Controller observed a new Node: %#v", added[i].Name)
|
||||
klog.V(1).Infof("Controller observed a new Node: %#v", added[i].Name)
|
||||
nodeutil.RecordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in Controller", added[i].Name))
|
||||
nc.knownNodeSet[added[i].Name] = added[i]
|
||||
nc.addPodEvictorForNewZone(added[i])
|
||||
@@ -674,7 +674,7 @@ func (nc *Controller) monitorNodeHealth() error {
|
||||
}
|
||||
|
||||
for i := range deleted {
|
||||
glog.V(1).Infof("Controller observed a Node deletion: %v", deleted[i].Name)
|
||||
klog.V(1).Infof("Controller observed a Node deletion: %v", deleted[i].Name)
|
||||
nodeutil.RecordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), v1.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from Controller", deleted[i].Name))
|
||||
delete(nc.knownNodeSet, deleted[i].Name)
|
||||
}
|
||||
@@ -693,12 +693,12 @@ func (nc *Controller) monitorNodeHealth() error {
|
||||
name := node.Name
|
||||
node, err = nc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting a Node to retry updating node health. Probably Node %s was deleted.", name)
|
||||
klog.Errorf("Failed while getting a Node to retry updating node health. Probably Node %s was deleted.", name)
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
glog.Errorf("Update health of Node '%v' from Controller error: %v. "+
|
||||
klog.Errorf("Update health of Node '%v' from Controller error: %v. "+
|
||||
"Skipping - no pods will be evicted.", node.Name, err)
|
||||
continue
|
||||
}
|
||||
@@ -717,10 +717,10 @@ func (nc *Controller) monitorNodeHealth() error {
|
||||
if taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) {
|
||||
taintToAdd := *NotReadyTaintTemplate
|
||||
if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) {
|
||||
glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
|
||||
klog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
|
||||
}
|
||||
} else if nc.markNodeForTainting(node) {
|
||||
glog.V(2).Infof("Node %v is NotReady as of %v. Adding it to the Taint queue.",
|
||||
klog.V(2).Infof("Node %v is NotReady as of %v. Adding it to the Taint queue.",
|
||||
node.Name,
|
||||
decisionTimestamp,
|
||||
)
|
||||
@@ -728,7 +728,7 @@ func (nc *Controller) monitorNodeHealth() error {
|
||||
} else {
|
||||
if decisionTimestamp.After(nc.nodeHealthMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) {
|
||||
if nc.evictPods(node) {
|
||||
glog.V(2).Infof("Node is NotReady. Adding Pods on Node %s to eviction queue: %v is later than %v + %v",
|
||||
klog.V(2).Infof("Node is NotReady. Adding Pods on Node %s to eviction queue: %v is later than %v + %v",
|
||||
node.Name,
|
||||
decisionTimestamp,
|
||||
nc.nodeHealthMap[node.Name].readyTransitionTimestamp,
|
||||
@@ -744,10 +744,10 @@ func (nc *Controller) monitorNodeHealth() error {
|
||||
if taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) {
|
||||
taintToAdd := *UnreachableTaintTemplate
|
||||
if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) {
|
||||
glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
|
||||
klog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
|
||||
}
|
||||
} else if nc.markNodeForTainting(node) {
|
||||
glog.V(2).Infof("Node %v is unresponsive as of %v. Adding it to the Taint queue.",
|
||||
klog.V(2).Infof("Node %v is unresponsive as of %v. Adding it to the Taint queue.",
|
||||
node.Name,
|
||||
decisionTimestamp,
|
||||
)
|
||||
@@ -755,7 +755,7 @@ func (nc *Controller) monitorNodeHealth() error {
|
||||
} else {
|
||||
if decisionTimestamp.After(nc.nodeHealthMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout)) {
|
||||
if nc.evictPods(node) {
|
||||
glog.V(2).Infof("Node is unresponsive. Adding Pods on Node %s to eviction queues: %v is later than %v + %v",
|
||||
klog.V(2).Infof("Node is unresponsive. Adding Pods on Node %s to eviction queues: %v is later than %v + %v",
|
||||
node.Name,
|
||||
decisionTimestamp,
|
||||
nc.nodeHealthMap[node.Name].readyTransitionTimestamp,
|
||||
@@ -769,20 +769,20 @@ func (nc *Controller) monitorNodeHealth() error {
|
||||
if nc.useTaintBasedEvictions {
|
||||
removed, err := nc.markNodeAsReachable(node)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove taints from node %v. Will retry in next iteration.", node.Name)
|
||||
klog.Errorf("Failed to remove taints from node %v. Will retry in next iteration.", node.Name)
|
||||
}
|
||||
if removed {
|
||||
glog.V(2).Infof("Node %s is healthy again, removing all taints", node.Name)
|
||||
klog.V(2).Infof("Node %s is healthy again, removing all taints", node.Name)
|
||||
}
|
||||
} else {
|
||||
if nc.cancelPodEviction(node) {
|
||||
glog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name)
|
||||
klog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name)
|
||||
}
|
||||
}
|
||||
// remove shutdown taint this is needed always depending do we use taintbased or not
|
||||
err := nc.markNodeAsNotShutdown(node)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove taints from node %v. Will retry in next iteration.", node.Name)
|
||||
klog.Errorf("Failed to remove taints from node %v. Will retry in next iteration.", node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -800,23 +800,23 @@ func (nc *Controller) monitorNodeHealth() error {
|
||||
// check is node shutdowned, if yes do not deleted it. Instead add taint
|
||||
shutdown, err := nc.nodeShutdownInCloudProvider(context.TODO(), node)
|
||||
if err != nil {
|
||||
glog.Errorf("Error determining if node %v shutdown in cloud: %v", node.Name, err)
|
||||
klog.Errorf("Error determining if node %v shutdown in cloud: %v", node.Name, err)
|
||||
}
|
||||
// node shutdown
|
||||
if shutdown && err == nil {
|
||||
err = controller.AddOrUpdateTaintOnNode(nc.kubeClient, node.Name, controller.ShutdownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Error patching node taints: %v", err)
|
||||
klog.Errorf("Error patching node taints: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
exists, err := nc.nodeExistsInCloudProvider(types.NodeName(node.Name))
|
||||
if err != nil {
|
||||
glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err)
|
||||
klog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
if !exists {
|
||||
glog.V(2).Infof("Deleting node (no longer present in cloud provider): %s", node.Name)
|
||||
klog.V(2).Infof("Deleting node (no longer present in cloud provider): %s", node.Name)
|
||||
nodeutil.RecordNodeEvent(nc.recorder, node.Name, string(node.UID), v1.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name))
|
||||
go func(nodeName string) {
|
||||
defer utilruntime.HandleCrash()
|
||||
@@ -824,7 +824,7 @@ func (nc *Controller) monitorNodeHealth() error {
|
||||
// is gone. Delete it without worrying about grace
|
||||
// periods.
|
||||
if err := nodeutil.ForcefullyDeleteNode(nc.kubeClient, nodeName); err != nil {
|
||||
glog.Errorf("Unable to forcefully delete node %q: %v", nodeName, err)
|
||||
klog.Errorf("Unable to forcefully delete node %q: %v", nodeName, err)
|
||||
}
|
||||
}(node.Name)
|
||||
}
|
||||
@@ -892,21 +892,21 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
||||
}
|
||||
_, observedCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if !found {
|
||||
glog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name)
|
||||
klog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name)
|
||||
savedNodeHealth = &nodeHealthData{
|
||||
status: &node.Status,
|
||||
probeTimestamp: nc.now(),
|
||||
readyTransitionTimestamp: nc.now(),
|
||||
}
|
||||
} else if savedCondition == nil && observedCondition != nil {
|
||||
glog.V(1).Infof("Creating timestamp entry for newly observed Node %s", node.Name)
|
||||
klog.V(1).Infof("Creating timestamp entry for newly observed Node %s", node.Name)
|
||||
savedNodeHealth = &nodeHealthData{
|
||||
status: &node.Status,
|
||||
probeTimestamp: nc.now(),
|
||||
readyTransitionTimestamp: nc.now(),
|
||||
}
|
||||
} else if savedCondition != nil && observedCondition == nil {
|
||||
glog.Errorf("ReadyCondition was removed from Status of Node %s", node.Name)
|
||||
klog.Errorf("ReadyCondition was removed from Status of Node %s", node.Name)
|
||||
// TODO: figure out what to do in this case. For now we do the same thing as above.
|
||||
savedNodeHealth = &nodeHealthData{
|
||||
status: &node.Status,
|
||||
@@ -918,15 +918,15 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
||||
// If ReadyCondition changed since the last time we checked, we update the transition timestamp to "now",
|
||||
// otherwise we leave it as it is.
|
||||
if savedCondition.LastTransitionTime != observedCondition.LastTransitionTime {
|
||||
glog.V(3).Infof("ReadyCondition for Node %s transitioned from %v to %v", node.Name, savedCondition, observedCondition)
|
||||
klog.V(3).Infof("ReadyCondition for Node %s transitioned from %v to %v", node.Name, savedCondition, observedCondition)
|
||||
transitionTime = nc.now()
|
||||
} else {
|
||||
transitionTime = savedNodeHealth.readyTransitionTimestamp
|
||||
}
|
||||
if glog.V(5) {
|
||||
glog.V(5).Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, savedNodeHealth.status, node.Status)
|
||||
if klog.V(5) {
|
||||
klog.V(5).Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, savedNodeHealth.status, node.Status)
|
||||
} else {
|
||||
glog.V(3).Infof("Node %s ReadyCondition updated. Updating timestamp.", node.Name)
|
||||
klog.V(3).Infof("Node %s ReadyCondition updated. Updating timestamp.", node.Name)
|
||||
}
|
||||
savedNodeHealth = &nodeHealthData{
|
||||
status: &node.Status,
|
||||
@@ -952,7 +952,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
||||
// NodeReady condition or lease was last set longer ago than gracePeriod, so
|
||||
// update it to Unknown (regardless of its current value) in the master.
|
||||
if currentReadyCondition == nil {
|
||||
glog.V(2).Infof("node %v is never updated by kubelet", node.Name)
|
||||
klog.V(2).Infof("node %v is never updated by kubelet", node.Name)
|
||||
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
@@ -962,7 +962,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
||||
LastTransitionTime: nc.now(),
|
||||
})
|
||||
} else {
|
||||
glog.V(4).Infof("node %v hasn't been updated for %+v. Last ready condition is: %+v",
|
||||
klog.V(4).Infof("node %v hasn't been updated for %+v. Last ready condition is: %+v",
|
||||
node.Name, nc.now().Time.Sub(savedNodeHealth.probeTimestamp.Time), observedReadyCondition)
|
||||
if observedReadyCondition.Status != v1.ConditionUnknown {
|
||||
currentReadyCondition.Status = v1.ConditionUnknown
|
||||
@@ -988,7 +988,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
||||
for _, nodeConditionType := range remainingNodeConditionTypes {
|
||||
_, currentCondition := v1node.GetNodeCondition(&node.Status, nodeConditionType)
|
||||
if currentCondition == nil {
|
||||
glog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name)
|
||||
klog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name)
|
||||
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
|
||||
Type: nodeConditionType,
|
||||
Status: v1.ConditionUnknown,
|
||||
@@ -998,7 +998,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
||||
LastTransitionTime: nowTimestamp,
|
||||
})
|
||||
} else {
|
||||
glog.V(4).Infof("node %v hasn't been updated for %+v. Last %v is: %+v",
|
||||
klog.V(4).Infof("node %v hasn't been updated for %+v. Last %v is: %+v",
|
||||
node.Name, nc.now().Time.Sub(savedNodeHealth.probeTimestamp.Time), nodeConditionType, currentCondition)
|
||||
if currentCondition.Status != v1.ConditionUnknown {
|
||||
currentCondition.Status = v1.ConditionUnknown
|
||||
@@ -1012,7 +1012,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
||||
_, currentCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if !apiequality.Semantic.DeepEqual(currentCondition, &observedReadyCondition) {
|
||||
if _, err = nc.kubeClient.CoreV1().Nodes().UpdateStatus(node); err != nil {
|
||||
glog.Errorf("Error updating node %s: %v", node.Name, err)
|
||||
klog.Errorf("Error updating node %s: %v", node.Name, err)
|
||||
return gracePeriod, observedReadyCondition, currentReadyCondition, err
|
||||
}
|
||||
nc.nodeHealthMap[node.Name] = &nodeHealthData{
|
||||
@@ -1041,7 +1041,7 @@ func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.Nod
|
||||
}
|
||||
newZoneStates[k] = newState
|
||||
if _, had := nc.zoneStates[k]; !had {
|
||||
glog.Errorf("Setting initial state for unseen zone: %v", k)
|
||||
klog.Errorf("Setting initial state for unseen zone: %v", k)
|
||||
nc.zoneStates[k] = stateInitial
|
||||
}
|
||||
}
|
||||
@@ -1069,12 +1069,12 @@ func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.Nod
|
||||
if !allAreFullyDisrupted || !allWasFullyDisrupted {
|
||||
// We're switching to full disruption mode
|
||||
if allAreFullyDisrupted {
|
||||
glog.V(0).Info("Controller detected that all Nodes are not-Ready. Entering master disruption mode.")
|
||||
klog.V(0).Info("Controller detected that all Nodes are not-Ready. Entering master disruption mode.")
|
||||
for i := range nodes {
|
||||
if nc.useTaintBasedEvictions {
|
||||
_, err := nc.markNodeAsReachable(nodes[i])
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove taints from Node %v", nodes[i].Name)
|
||||
klog.Errorf("Failed to remove taints from Node %v", nodes[i].Name)
|
||||
}
|
||||
} else {
|
||||
nc.cancelPodEviction(nodes[i])
|
||||
@@ -1096,7 +1096,7 @@ func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.Nod
|
||||
}
|
||||
// We're exiting full disruption mode
|
||||
if allWasFullyDisrupted {
|
||||
glog.V(0).Info("Controller detected that some Nodes are Ready. Exiting master disruption mode.")
|
||||
klog.V(0).Info("Controller detected that some Nodes are Ready. Exiting master disruption mode.")
|
||||
// When exiting disruption mode update probe timestamps on all Nodes.
|
||||
now := nc.now()
|
||||
for i := range nodes {
|
||||
@@ -1119,7 +1119,7 @@ func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.Nod
|
||||
if v == newState {
|
||||
continue
|
||||
}
|
||||
glog.V(0).Infof("Controller detected that zone %v is now in state %v.", k, newState)
|
||||
klog.V(0).Infof("Controller detected that zone %v is now in state %v.", k, newState)
|
||||
nc.setLimiterInZone(k, len(zoneToNodeConditions[k]), newState)
|
||||
nc.zoneStates[k] = newState
|
||||
}
|
||||
@@ -1219,7 +1219,7 @@ func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) {
|
||||
flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, scheduler.EvictionRateLimiterBurst))
|
||||
}
|
||||
// Init the metric for the new zone.
|
||||
glog.Infof("Initializing eviction metric for zone: %v", zone)
|
||||
klog.Infof("Initializing eviction metric for zone: %v", zone)
|
||||
evictionsNumber.WithLabelValues(zone).Add(0)
|
||||
}
|
||||
}
|
||||
@@ -1232,7 +1232,7 @@ func (nc *Controller) cancelPodEviction(node *v1.Node) bool {
|
||||
defer nc.evictorLock.Unlock()
|
||||
wasDeleting := nc.zonePodEvictor[zone].Remove(node.Name)
|
||||
if wasDeleting {
|
||||
glog.V(2).Infof("Cancelling pod Eviction on Node: %v", node.Name)
|
||||
klog.V(2).Infof("Cancelling pod Eviction on Node: %v", node.Name)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -1257,12 +1257,12 @@ func (nc *Controller) markNodeAsReachable(node *v1.Node) (bool, error) {
|
||||
defer nc.evictorLock.Unlock()
|
||||
err := controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, UnreachableTaintTemplate)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
|
||||
klog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
|
||||
return false, err
|
||||
}
|
||||
err = controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, NotReadyTaintTemplate)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
|
||||
klog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
|
||||
return false, err
|
||||
}
|
||||
return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name), nil
|
||||
@@ -1273,7 +1273,7 @@ func (nc *Controller) markNodeAsNotShutdown(node *v1.Node) error {
|
||||
defer nc.evictorLock.Unlock()
|
||||
err := controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, controller.ShutdownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
|
||||
klog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@@ -26,7 +26,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -236,7 +236,7 @@ func (q *RateLimitedTimedQueue) Try(fn ActionFunc) {
|
||||
for ok {
|
||||
// rate limit the queue checking
|
||||
if !q.limiter.TryAccept() {
|
||||
glog.V(10).Infof("Try rate limited for value: %v", val)
|
||||
klog.V(10).Infof("Try rate limited for value: %v", val)
|
||||
// Try again later
|
||||
break
|
||||
}
|
||||
|
@@ -38,7 +38,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -99,7 +99,7 @@ func deletePodHandler(c clientset.Interface, emitEventFunc func(types.Namespaced
|
||||
return func(args *WorkArgs) error {
|
||||
ns := args.NamespacedName.Namespace
|
||||
name := args.NamespacedName.Name
|
||||
glog.V(0).Infof("NoExecuteTaintManager is deleting Pod: %v", args.NamespacedName.String())
|
||||
klog.V(0).Infof("NoExecuteTaintManager is deleting Pod: %v", args.NamespacedName.String())
|
||||
if emitEventFunc != nil {
|
||||
emitEventFunc(args.NamespacedName)
|
||||
}
|
||||
@@ -170,12 +170,12 @@ func getMinTolerationTime(tolerations []v1.Toleration) time.Duration {
|
||||
func NewNoExecuteTaintManager(c clientset.Interface, getPod GetPodFunc, getNode GetNodeFunc) *NoExecuteTaintManager {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "taint-controller"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
if c != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.CoreV1().Events("")})
|
||||
} else {
|
||||
glog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
klog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
}
|
||||
|
||||
tm := &NoExecuteTaintManager{
|
||||
@@ -195,7 +195,7 @@ func NewNoExecuteTaintManager(c clientset.Interface, getPod GetPodFunc, getNode
|
||||
|
||||
// Run starts NoExecuteTaintManager which will run in loop until `stopCh` is closed.
|
||||
func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
glog.V(0).Infof("Starting NoExecuteTaintManager")
|
||||
klog.V(0).Infof("Starting NoExecuteTaintManager")
|
||||
|
||||
for i := 0; i < UpdateWorkerSize; i++ {
|
||||
tc.nodeUpdateChannels = append(tc.nodeUpdateChannels, make(chan nodeUpdateItem, NodeUpdateChannelSize))
|
||||
@@ -356,7 +356,7 @@ func (tc *NoExecuteTaintManager) processPodOnNode(
|
||||
}
|
||||
allTolerated, usedTolerations := v1helper.GetMatchingTolerations(taints, tolerations)
|
||||
if !allTolerated {
|
||||
glog.V(2).Infof("Not all taints are tolerated after update for Pod %v on %v", podNamespacedName.String(), nodeName)
|
||||
klog.V(2).Infof("Not all taints are tolerated after update for Pod %v on %v", podNamespacedName.String(), nodeName)
|
||||
// We're canceling scheduled work (if any), as we're going to delete the Pod right away.
|
||||
tc.cancelWorkWithEvent(podNamespacedName)
|
||||
tc.taintEvictionQueue.AddWork(NewWorkArgs(podNamespacedName.Name, podNamespacedName.Namespace), time.Now(), time.Now())
|
||||
@@ -365,7 +365,7 @@ func (tc *NoExecuteTaintManager) processPodOnNode(
|
||||
minTolerationTime := getMinTolerationTime(usedTolerations)
|
||||
// getMinTolerationTime returns negative value to denote infinite toleration.
|
||||
if minTolerationTime < 0 {
|
||||
glog.V(4).Infof("New tolerations for %v tolerate forever. Scheduled deletion won't be cancelled if already scheduled.", podNamespacedName.String())
|
||||
klog.V(4).Infof("New tolerations for %v tolerate forever. Scheduled deletion won't be cancelled if already scheduled.", podNamespacedName.String())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -388,7 +388,7 @@ func (tc *NoExecuteTaintManager) handlePodUpdate(podUpdate podUpdateItem) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// Delete
|
||||
podNamespacedName := types.NamespacedName{Namespace: podUpdate.podNamespace, Name: podUpdate.podName}
|
||||
glog.V(4).Infof("Noticed pod deletion: %#v", podNamespacedName)
|
||||
klog.V(4).Infof("Noticed pod deletion: %#v", podNamespacedName)
|
||||
tc.cancelWorkWithEvent(podNamespacedName)
|
||||
return
|
||||
}
|
||||
@@ -403,7 +403,7 @@ func (tc *NoExecuteTaintManager) handlePodUpdate(podUpdate podUpdateItem) {
|
||||
|
||||
// Create or Update
|
||||
podNamespacedName := types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}
|
||||
glog.V(4).Infof("Noticed pod update: %#v", podNamespacedName)
|
||||
klog.V(4).Infof("Noticed pod update: %#v", podNamespacedName)
|
||||
nodeName := pod.Spec.NodeName
|
||||
if nodeName == "" {
|
||||
return
|
||||
@@ -427,7 +427,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) {
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// Delete
|
||||
glog.V(4).Infof("Noticed node deletion: %#v", nodeUpdate.nodeName)
|
||||
klog.V(4).Infof("Noticed node deletion: %#v", nodeUpdate.nodeName)
|
||||
tc.taintedNodesLock.Lock()
|
||||
defer tc.taintedNodesLock.Unlock()
|
||||
delete(tc.taintedNodes, nodeUpdate.nodeName)
|
||||
@@ -438,12 +438,12 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) {
|
||||
}
|
||||
|
||||
// Create or Update
|
||||
glog.V(4).Infof("Noticed node update: %#v", nodeUpdate)
|
||||
klog.V(4).Infof("Noticed node update: %#v", nodeUpdate)
|
||||
taints := getNoExecuteTaints(node.Spec.Taints)
|
||||
func() {
|
||||
tc.taintedNodesLock.Lock()
|
||||
defer tc.taintedNodesLock.Unlock()
|
||||
glog.V(4).Infof("Updating known taints on node %v: %v", node.Name, taints)
|
||||
klog.V(4).Infof("Updating known taints on node %v: %v", node.Name, taints)
|
||||
if len(taints) == 0 {
|
||||
delete(tc.taintedNodes, node.Name)
|
||||
} else {
|
||||
@@ -452,7 +452,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) {
|
||||
}()
|
||||
pods, err := getPodsAssignedToNode(tc.client, node.Name)
|
||||
if err != nil {
|
||||
glog.Errorf(err.Error())
|
||||
klog.Errorf(err.Error())
|
||||
return
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
@@ -460,7 +460,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) {
|
||||
}
|
||||
// Short circuit, to make this controller a bit faster.
|
||||
if len(taints) == 0 {
|
||||
glog.V(4).Infof("All taints were removed from the Node %v. Cancelling all evictions...", node.Name)
|
||||
klog.V(4).Infof("All taints were removed from the Node %v. Cancelling all evictions...", node.Name)
|
||||
for i := range pods {
|
||||
tc.cancelWorkWithEvent(types.NamespacedName{Namespace: pods[i].Namespace, Name: pods[i].Name})
|
||||
}
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// WorkArgs keeps arguments that will be passed to the function executed by the worker.
|
||||
@@ -107,12 +107,12 @@ func (q *TimedWorkerQueue) getWrappedWorkerFunc(key string) func(args *WorkArgs)
|
||||
// AddWork adds a work to the WorkerQueue which will be executed not earlier than `fireAt`.
|
||||
func (q *TimedWorkerQueue) AddWork(args *WorkArgs, createdAt time.Time, fireAt time.Time) {
|
||||
key := args.KeyFromWorkArgs()
|
||||
glog.V(4).Infof("Adding TimedWorkerQueue item %v at %v to be fired at %v", key, createdAt, fireAt)
|
||||
klog.V(4).Infof("Adding TimedWorkerQueue item %v at %v to be fired at %v", key, createdAt, fireAt)
|
||||
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
if _, exists := q.workers[key]; exists {
|
||||
glog.Warningf("Trying to add already existing work for %+v. Skipping.", args)
|
||||
klog.Warningf("Trying to add already existing work for %+v. Skipping.", args)
|
||||
return
|
||||
}
|
||||
worker := CreateWorker(args, createdAt, fireAt, q.getWrappedWorkerFunc(key))
|
||||
@@ -126,7 +126,7 @@ func (q *TimedWorkerQueue) CancelWork(key string) bool {
|
||||
worker, found := q.workers[key]
|
||||
result := false
|
||||
if found {
|
||||
glog.V(4).Infof("Cancelling TimedWorkerQueue item %v at %v", key, time.Now())
|
||||
klog.V(4).Infof("Cancelling TimedWorkerQueue item %v at %v", key, time.Now())
|
||||
if worker != nil {
|
||||
result = true
|
||||
worker.Cancel()
|
||||
|
@@ -40,7 +40,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -21,7 +21,6 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -46,6 +45,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
@@ -108,7 +108,7 @@ func NewHorizontalController(
|
||||
|
||||
) *HorizontalController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(glog.Infof)
|
||||
broadcaster.StartLogging(klog.Infof)
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
|
||||
@@ -153,8 +153,8 @@ func (a *HorizontalController) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer a.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting HPA controller")
|
||||
defer glog.Infof("Shutting down HPA controller")
|
||||
klog.Infof("Starting HPA controller")
|
||||
defer klog.Infof("Shutting down HPA controller")
|
||||
|
||||
if !controller.WaitForCacheSync("HPA", stopCh, a.hpaListerSynced, a.podListerSynced) {
|
||||
return
|
||||
@@ -197,7 +197,7 @@ func (a *HorizontalController) deleteHPA(obj interface{}) {
|
||||
func (a *HorizontalController) worker() {
|
||||
for a.processNextWorkItem() {
|
||||
}
|
||||
glog.Infof("horizontal pod autoscaler controller worker shutting down")
|
||||
klog.Infof("horizontal pod autoscaler controller worker shutting down")
|
||||
}
|
||||
|
||||
func (a *HorizontalController) processNextWorkItem() bool {
|
||||
@@ -306,7 +306,7 @@ func (a *HorizontalController) reconcileKey(key string) error {
|
||||
|
||||
hpa, err := a.hpaLister.HorizontalPodAutoscalers(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Infof("Horizontal Pod Autoscaler %s has been deleted in %s", name, namespace)
|
||||
klog.Infof("Horizontal Pod Autoscaler %s has been deleted in %s", name, namespace)
|
||||
delete(a.recommendations, key)
|
||||
return nil
|
||||
}
|
||||
@@ -553,7 +553,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
|
||||
return fmt.Errorf("failed to compute desired number of replicas based on listed metrics for %s: %v", reference, err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("proposing %v desired replicas (based on %s from %s) for %s", metricDesiredReplicas, metricName, timestamp, reference)
|
||||
klog.V(4).Infof("proposing %v desired replicas (based on %s from %s) for %s", metricDesiredReplicas, metricName, timestamp, reference)
|
||||
|
||||
rescaleMetric := ""
|
||||
if metricDesiredReplicas > desiredReplicas {
|
||||
@@ -585,10 +585,10 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
|
||||
}
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededRescale", "the HPA controller was able to update the target scale to %d", desiredReplicas)
|
||||
a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
|
||||
glog.Infof("Successful rescale of %s, old size: %d, new size: %d, reason: %s",
|
||||
klog.Infof("Successful rescale of %s, old size: %d, new size: %d, reason: %s",
|
||||
hpa.Name, currentReplicas, desiredReplicas, rescaleReason)
|
||||
} else {
|
||||
glog.V(4).Infof("decided not to scale %s to %v (last scale time was %s)", reference, desiredReplicas, hpa.Status.LastScaleTime)
|
||||
klog.V(4).Infof("decided not to scale %s to %v (last scale time was %s)", reference, desiredReplicas, hpa.Status.LastScaleTime)
|
||||
desiredReplicas = currentReplicas
|
||||
}
|
||||
|
||||
@@ -770,7 +770,7 @@ func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAuto
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
||||
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
|
||||
}
|
||||
glog.V(2).Infof("Successfully updated status for %s", hpa.Name)
|
||||
klog.V(2).Infof("Successfully updated status for %s", hpa.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -23,8 +23,8 @@ go_library(
|
||||
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -22,8 +22,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
"k8s.io/klog"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta2"
|
||||
@@ -73,7 +73,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod resource metrics: %v", err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
klog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
err = json.Unmarshal(resultRaw, &metrics)
|
||||
@@ -94,7 +94,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
continue
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
@@ -150,7 +150,7 @@ func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string
|
||||
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
klog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
|
||||
if len(metrics.Items) != len(podNames) {
|
||||
// if we get too many metrics or two few metrics, we have no way of knowing which metric goes to which pod
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -81,7 +81,7 @@ func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
break // containers loop
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
|
@@ -26,7 +26,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -34,7 +34,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -59,7 +59,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor
|
||||
kubeClient: kubeClient,
|
||||
terminatedPodThreshold: terminatedPodThreshold,
|
||||
deletePod: func(namespace, name string) error {
|
||||
glog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name)
|
||||
klog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name)
|
||||
return kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0))
|
||||
},
|
||||
}
|
||||
@@ -73,8 +73,8 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor
|
||||
func (gcc *PodGCController) Run(stop <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting GC controller")
|
||||
defer glog.Infof("Shutting down GC controller")
|
||||
klog.Infof("Starting GC controller")
|
||||
defer klog.Infof("Shutting down GC controller")
|
||||
|
||||
if !controller.WaitForCacheSync("GC", stop, gcc.podListerSynced) {
|
||||
return
|
||||
@@ -88,7 +88,7 @@ func (gcc *PodGCController) Run(stop <-chan struct{}) {
|
||||
func (gcc *PodGCController) gc() {
|
||||
pods, err := gcc.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("Error while listing all Pods: %v", err)
|
||||
klog.Errorf("Error while listing all Pods: %v", err)
|
||||
return
|
||||
}
|
||||
if gcc.terminatedPodThreshold > 0 {
|
||||
@@ -122,7 +122,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
|
||||
deleteCount = terminatedPodCount
|
||||
}
|
||||
if deleteCount > 0 {
|
||||
glog.Infof("garbage collecting %v pods", deleteCount)
|
||||
klog.Infof("garbage collecting %v pods", deleteCount)
|
||||
}
|
||||
|
||||
var wait sync.WaitGroup
|
||||
@@ -141,7 +141,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
|
||||
|
||||
// gcOrphaned deletes pods that are bound to nodes that don't exist.
|
||||
func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) {
|
||||
glog.V(4).Infof("GC'ing orphaned")
|
||||
klog.V(4).Infof("GC'ing orphaned")
|
||||
// We want to get list of Nodes from the etcd, to make sure that it's as fresh as possible.
|
||||
nodes, err := gcc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -159,29 +159,29 @@ func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) {
|
||||
if nodeNames.Has(pod.Spec.NodeName) {
|
||||
continue
|
||||
}
|
||||
glog.V(2).Infof("Found orphaned Pod %v/%v assigned to the Node %v. Deleting.", pod.Namespace, pod.Name, pod.Spec.NodeName)
|
||||
klog.V(2).Infof("Found orphaned Pod %v/%v assigned to the Node %v. Deleting.", pod.Namespace, pod.Name, pod.Spec.NodeName)
|
||||
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
} else {
|
||||
glog.V(0).Infof("Forced deletion of orphaned Pod %v/%v succeeded", pod.Namespace, pod.Name)
|
||||
klog.V(0).Infof("Forced deletion of orphaned Pod %v/%v succeeded", pod.Namespace, pod.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// gcUnscheduledTerminating deletes pods that are terminating and haven't been scheduled to a particular node.
|
||||
func (gcc *PodGCController) gcUnscheduledTerminating(pods []*v1.Pod) {
|
||||
glog.V(4).Infof("GC'ing unscheduled pods which are terminating.")
|
||||
klog.V(4).Infof("GC'ing unscheduled pods which are terminating.")
|
||||
|
||||
for _, pod := range pods {
|
||||
if pod.DeletionTimestamp == nil || len(pod.Spec.NodeName) > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Found unscheduled terminating Pod %v/%v not assigned to any Node. Deleting.", pod.Namespace, pod.Name)
|
||||
klog.V(2).Infof("Found unscheduled terminating Pod %v/%v not assigned to any Node. Deleting.", pod.Namespace, pod.Name)
|
||||
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
} else {
|
||||
glog.V(0).Infof("Forced deletion of unscheduled terminating Pod %v/%v succeeded", pod.Namespace, pod.Name)
|
||||
klog.V(0).Infof("Forced deletion of unscheduled terminating Pod %v/%v succeeded", pod.Namespace, pod.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -38,7 +38,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -35,7 +35,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -55,6 +54,7 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/integer"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
@@ -108,7 +108,7 @@ type ReplicaSetController struct {
|
||||
// NewReplicaSetController configures a replica set controller with the specified event recorder
|
||||
func NewReplicaSetController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
return NewBaseController(rsInformer, podInformer, kubeClient, burstReplicas,
|
||||
apps.SchemeGroupVersion.WithKind("ReplicaSet"),
|
||||
@@ -179,8 +179,8 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer rsc.queue.ShutDown()
|
||||
|
||||
controllerName := strings.ToLower(rsc.Kind)
|
||||
glog.Infof("Starting %v controller", controllerName)
|
||||
defer glog.Infof("Shutting down %v controller", controllerName)
|
||||
klog.Infof("Starting %v controller", controllerName)
|
||||
defer klog.Infof("Shutting down %v controller", controllerName)
|
||||
|
||||
if !controller.WaitForCacheSync(rsc.Kind, stopCh, rsc.podListerSynced, rsc.rsListerSynced) {
|
||||
return
|
||||
@@ -246,7 +246,7 @@ func (rsc *ReplicaSetController) updateRS(old, cur interface{}) {
|
||||
// that bad as ReplicaSets that haven't met expectations yet won't
|
||||
// sync, and all the listing is done using local stores.
|
||||
if *(oldRS.Spec.Replicas) != *(curRS.Spec.Replicas) {
|
||||
glog.V(4).Infof("%v %v updated. Desired pod count change: %d->%d", rsc.Kind, curRS.Name, *(oldRS.Spec.Replicas), *(curRS.Spec.Replicas))
|
||||
klog.V(4).Infof("%v %v updated. Desired pod count change: %d->%d", rsc.Kind, curRS.Name, *(oldRS.Spec.Replicas), *(curRS.Spec.Replicas))
|
||||
}
|
||||
rsc.enqueueReplicaSet(cur)
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func (rsc *ReplicaSetController) addPod(obj interface{}) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod)
|
||||
klog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod)
|
||||
rsc.expectations.CreationObserved(rsKey)
|
||||
rsc.enqueueReplicaSet(rs)
|
||||
return
|
||||
@@ -286,7 +286,7 @@ func (rsc *ReplicaSetController) addPod(obj interface{}) {
|
||||
if len(rss) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan Pod %s created: %#v.", pod.Name, pod)
|
||||
klog.V(4).Infof("Orphan Pod %s created: %#v.", pod.Name, pod)
|
||||
for _, rs := range rss {
|
||||
rsc.enqueueReplicaSet(rs)
|
||||
}
|
||||
@@ -335,7 +335,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
|
||||
if rs == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
|
||||
klog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
|
||||
rsc.enqueueReplicaSet(rs)
|
||||
// TODO: MinReadySeconds in the Pod will generate an Available condition to be added in
|
||||
// the Pod status which in turn will trigger a requeue of the owning replica set thus
|
||||
@@ -345,7 +345,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
|
||||
// Note that this still suffers from #29229, we are just moving the problem one level
|
||||
// "closer" to kubelet (from the deployment to the replica set controller).
|
||||
if !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) && rs.Spec.MinReadySeconds > 0 {
|
||||
glog.V(2).Infof("%v %q will be enqueued after %ds for availability check", rsc.Kind, rs.Name, rs.Spec.MinReadySeconds)
|
||||
klog.V(2).Infof("%v %q will be enqueued after %ds for availability check", rsc.Kind, rs.Name, rs.Spec.MinReadySeconds)
|
||||
// Add a second to avoid milliseconds skew in AddAfter.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info.
|
||||
rsc.enqueueReplicaSetAfter(rs, (time.Duration(rs.Spec.MinReadySeconds)*time.Second)+time.Second)
|
||||
@@ -360,7 +360,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
|
||||
if len(rss) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
|
||||
klog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
|
||||
for _, rs := range rss {
|
||||
rsc.enqueueReplicaSet(rs)
|
||||
}
|
||||
@@ -402,7 +402,7 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod)
|
||||
klog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod)
|
||||
rsc.expectations.DeletionObserved(rsKey, controller.PodKey(pod))
|
||||
rsc.enqueueReplicaSet(rs)
|
||||
}
|
||||
@@ -474,7 +474,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
|
||||
// into a performance bottleneck. We should generate a UID for the pod
|
||||
// beforehand and store it via ExpectCreations.
|
||||
rsc.expectations.ExpectCreations(rsKey, diff)
|
||||
glog.V(2).Infof("Too few replicas for %v %s/%s, need %d, creating %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
|
||||
klog.V(2).Infof("Too few replicas for %v %s/%s, need %d, creating %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
|
||||
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
|
||||
// and double with each successful iteration in a kind of "slow start".
|
||||
// This handles attempts to start large numbers of pods that would
|
||||
@@ -511,7 +511,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
|
||||
// The skipped pods will be retried later. The next controller resync will
|
||||
// retry the slow start process.
|
||||
if skippedPods := diff - successfulCreations; skippedPods > 0 {
|
||||
glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for %v %v/%v", skippedPods, rsc.Kind, rs.Namespace, rs.Name)
|
||||
klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for %v %v/%v", skippedPods, rsc.Kind, rs.Namespace, rs.Name)
|
||||
for i := 0; i < skippedPods; i++ {
|
||||
// Decrement the expected number of creates because the informer won't observe this pod
|
||||
rsc.expectations.CreationObserved(rsKey)
|
||||
@@ -522,7 +522,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
|
||||
if diff > rsc.burstReplicas {
|
||||
diff = rsc.burstReplicas
|
||||
}
|
||||
glog.V(2).Infof("Too many replicas for %v %s/%s, need %d, deleting %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
|
||||
klog.V(2).Infof("Too many replicas for %v %s/%s, need %d, deleting %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
|
||||
|
||||
// Choose which Pods to delete, preferring those in earlier phases of startup.
|
||||
podsToDelete := getPodsToDelete(filteredPods, diff)
|
||||
@@ -544,7 +544,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
|
||||
if err := rsc.podControl.DeletePod(rs.Namespace, targetPod.Name, rs); err != nil {
|
||||
// Decrement the expected number of deletes because the informer won't observe this deletion
|
||||
podKey := controller.PodKey(targetPod)
|
||||
glog.V(2).Infof("Failed to delete %v, decrementing expectations for %v %s/%s", podKey, rsc.Kind, rs.Namespace, rs.Name)
|
||||
klog.V(2).Infof("Failed to delete %v, decrementing expectations for %v %s/%s", podKey, rsc.Kind, rs.Namespace, rs.Name)
|
||||
rsc.expectations.DeletionObserved(rsKey, podKey)
|
||||
errCh <- err
|
||||
}
|
||||
@@ -572,7 +572,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
||||
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing %v %q (%v)", rsc.Kind, key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing %v %q (%v)", rsc.Kind, key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -581,7 +581,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
||||
}
|
||||
rs, err := rsc.rsLister.ReplicaSets(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(4).Infof("%v %v has been deleted", rsc.Kind, key)
|
||||
klog.V(4).Infof("%v %v has been deleted", rsc.Kind, key)
|
||||
rsc.expectations.DeleteExpectations(key)
|
||||
return nil
|
||||
}
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -55,7 +55,7 @@ func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSe
|
||||
var getErr, updateErr error
|
||||
var updatedRS *apps.ReplicaSet
|
||||
for i, rs := 0, rs; ; i++ {
|
||||
glog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", rs.Kind, rs.Namespace, rs.Name) +
|
||||
klog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", rs.Kind, rs.Namespace, rs.Name) +
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) +
|
||||
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) +
|
||||
fmt.Sprintf("readyReplicas %d->%d, ", rs.Status.ReadyReplicas, newStatus.ReadyReplicas) +
|
||||
|
@@ -38,7 +38,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -26,13 +26,13 @@ limitations under the License.
|
||||
package replication
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
)
|
||||
@@ -51,7 +51,7 @@ type ReplicationManager struct {
|
||||
// NewReplicationManager configures a replication manager with the specified event recorder
|
||||
func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
return &ReplicationManager{
|
||||
*replicaset.NewBaseController(informerAdapter{rcInformer}, podInformer, clientsetAdapter{kubeClient}, burstReplicas,
|
||||
|
@@ -37,7 +37,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
@@ -180,7 +180,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) (*Resou
|
||||
|
||||
// enqueueAll is called at the fullResyncPeriod interval to force a full recalculation of quota usage statistics
|
||||
func (rq *ResourceQuotaController) enqueueAll() {
|
||||
defer glog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage")
|
||||
defer klog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage")
|
||||
rqs, err := rq.rqLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to enqueue all - error listing resource quotas: %v", err))
|
||||
@@ -200,7 +200,7 @@ func (rq *ResourceQuotaController) enqueueAll() {
|
||||
func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
klog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
rq.queue.Add(key)
|
||||
@@ -209,7 +209,7 @@ func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
|
||||
func (rq *ResourceQuotaController) addQuota(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
klog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -261,7 +261,7 @@ func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface)
|
||||
return func() {
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
glog.Infof("resource quota controller worker shutting down")
|
||||
klog.Infof("resource quota controller worker shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -273,8 +273,8 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer rq.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting resource quota controller")
|
||||
defer glog.Infof("Shutting down resource quota controller")
|
||||
klog.Infof("Starting resource quota controller")
|
||||
defer klog.Infof("Shutting down resource quota controller")
|
||||
|
||||
if rq.quotaMonitor != nil {
|
||||
go rq.quotaMonitor.Run(stopCh)
|
||||
@@ -298,7 +298,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
|
||||
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -307,11 +307,11 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
|
||||
}
|
||||
quota, err := rq.rqLister.ResourceQuotas(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Infof("Resource quota has been deleted %v", key)
|
||||
klog.Infof("Resource quota has been deleted %v", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
|
||||
klog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
|
||||
return err
|
||||
}
|
||||
return rq.syncResourceQuota(quota)
|
||||
@@ -426,12 +426,12 @@ func (rq *ResourceQuotaController) Sync(discoveryFunc NamespacedResourcesFunc, p
|
||||
|
||||
// Decide whether discovery has reported a change.
|
||||
if reflect.DeepEqual(oldResources, newResources) {
|
||||
glog.V(4).Infof("no resource updates from discovery, skipping resource quota sync")
|
||||
klog.V(4).Infof("no resource updates from discovery, skipping resource quota sync")
|
||||
return
|
||||
}
|
||||
|
||||
// Something has changed, so track the new state and perform a sync.
|
||||
glog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources)
|
||||
klog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources)
|
||||
oldResources = newResources
|
||||
|
||||
// Ensure workers are paused to avoid processing events before informers
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -173,11 +173,11 @@ func (qm *QuotaMonitor) controllerFor(resource schema.GroupVersionResource) (cac
|
||||
}
|
||||
shared, err := qm.informerFactory.ForResource(resource)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("QuotaMonitor using a shared informer for resource %q", resource.String())
|
||||
klog.V(4).Infof("QuotaMonitor using a shared informer for resource %q", resource.String())
|
||||
shared.Informer().AddEventHandlerWithResyncPeriod(handlers, qm.resyncPeriod())
|
||||
return shared.Informer().GetController(), nil
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor unable to use a shared informer for resource %q: %v", resource.String(), err)
|
||||
klog.V(4).Infof("QuotaMonitor unable to use a shared informer for resource %q: %v", resource.String(), err)
|
||||
|
||||
// TODO: if we can share storage with garbage collector, it may make sense to support other resources
|
||||
// until that time, aggregated api servers will have to run their own controller to reconcile their own quota.
|
||||
@@ -225,7 +225,7 @@ func (qm *QuotaMonitor) SyncMonitors(resources map[schema.GroupVersionResource]s
|
||||
listResourceFunc := generic.ListResourceUsingListerFunc(listerFunc, resource)
|
||||
evaluator = generic.NewObjectCountEvaluator(resource.GroupResource(), listResourceFunc, "")
|
||||
qm.registry.Add(evaluator)
|
||||
glog.Infof("QuotaMonitor created object count evaluator for %s", resource.GroupResource())
|
||||
klog.Infof("QuotaMonitor created object count evaluator for %s", resource.GroupResource())
|
||||
}
|
||||
|
||||
// track the monitor
|
||||
@@ -240,7 +240,7 @@ func (qm *QuotaMonitor) SyncMonitors(resources map[schema.GroupVersionResource]s
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("quota synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
|
||||
klog.V(4).Infof("quota synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
|
||||
// NewAggregate returns nil if errs is 0-length
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func (qm *QuotaMonitor) StartMonitors() {
|
||||
started++
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor started %d new monitors, %d currently running", started, len(monitors))
|
||||
klog.V(4).Infof("QuotaMonitor started %d new monitors, %d currently running", started, len(monitors))
|
||||
}
|
||||
|
||||
// IsSynced returns true if any monitors exist AND all those monitors'
|
||||
@@ -298,8 +298,8 @@ func (qm *QuotaMonitor) IsSynced() bool {
|
||||
// Run sets the stop channel and starts monitor execution until stopCh is
|
||||
// closed. Any running monitors will be stopped before Run returns.
|
||||
func (qm *QuotaMonitor) Run(stopCh <-chan struct{}) {
|
||||
glog.Infof("QuotaMonitor running")
|
||||
defer glog.Infof("QuotaMonitor stopping")
|
||||
klog.Infof("QuotaMonitor running")
|
||||
defer klog.Infof("QuotaMonitor stopping")
|
||||
|
||||
// Set up the stop channel.
|
||||
qm.monitorLock.Lock()
|
||||
@@ -323,7 +323,7 @@ func (qm *QuotaMonitor) Run(stopCh <-chan struct{}) {
|
||||
close(monitor.stopCh)
|
||||
}
|
||||
}
|
||||
glog.Infof("QuotaMonitor stopped %d of %d monitors", stopped, len(monitors))
|
||||
klog.Infof("QuotaMonitor stopped %d of %d monitors", stopped, len(monitors))
|
||||
}
|
||||
|
||||
func (qm *QuotaMonitor) runProcessResourceChanges() {
|
||||
@@ -349,7 +349,7 @@ func (qm *QuotaMonitor) processResourceChanges() bool {
|
||||
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
|
||||
return true
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor process object: %s, namespace %s, name %s, uid %s, event type %v", event.gvr.String(), accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
|
||||
klog.V(4).Infof("QuotaMonitor process object: %s, namespace %s, name %s, uid %s, event type %v", event.gvr.String(), accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
|
||||
qm.replenishmentFunc(event.gvr.GroupResource(), accessor.GetNamespace())
|
||||
return true
|
||||
}
|
||||
|
@@ -33,7 +33,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -75,11 +75,11 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, nodeInform
|
||||
}
|
||||
|
||||
if clusterCIDR == nil {
|
||||
glog.Fatal("RouteController: Must specify clusterCIDR.")
|
||||
klog.Fatal("RouteController: Must specify clusterCIDR.")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "route_controller"})
|
||||
|
||||
rc := &RouteController{
|
||||
@@ -99,8 +99,8 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, nodeInform
|
||||
func (rc *RouteController) Run(stopCh <-chan struct{}, syncPeriod time.Duration) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Info("Starting route controller")
|
||||
defer glog.Info("Shutting down route controller")
|
||||
klog.Info("Starting route controller")
|
||||
defer klog.Info("Shutting down route controller")
|
||||
|
||||
if !controller.WaitForCacheSync("route", stopCh, rc.nodeListerSynced) {
|
||||
return
|
||||
@@ -117,7 +117,7 @@ func (rc *RouteController) Run(stopCh <-chan struct{}, syncPeriod time.Duration)
|
||||
// trigger reconciliation for that node.
|
||||
go wait.NonSlidingUntil(func() {
|
||||
if err := rc.reconcileNodeRoutes(); err != nil {
|
||||
glog.Errorf("Couldn't reconcile node routes: %v", err)
|
||||
klog.Errorf("Couldn't reconcile node routes: %v", err)
|
||||
}
|
||||
}, syncPeriod, stopCh)
|
||||
|
||||
@@ -173,7 +173,7 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
||||
// Ensure that we don't have more than maxConcurrentRouteCreations
|
||||
// CreateRoute calls in flight.
|
||||
rateLimiter <- struct{}{}
|
||||
glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
|
||||
klog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
|
||||
err := rc.routes.CreateRoute(context.TODO(), rc.clusterName, nameHint, route)
|
||||
<-rateLimiter
|
||||
|
||||
@@ -189,14 +189,14 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
||||
Namespace: "",
|
||||
}, v1.EventTypeWarning, "FailedToCreateRoute", msg)
|
||||
}
|
||||
glog.V(4).Infof(msg)
|
||||
klog.V(4).Infof(msg)
|
||||
return err
|
||||
}
|
||||
glog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime))
|
||||
klog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Could not create route %s %s for node %s: %v", nameHint, route.DestinationCIDR, nodeName, err)
|
||||
klog.Errorf("Could not create route %s %s for node %s: %v", nameHint, route.DestinationCIDR, nodeName, err)
|
||||
}
|
||||
}(nodeName, nameHint, route)
|
||||
} else {
|
||||
@@ -216,11 +216,11 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
|
||||
// Delete the route.
|
||||
go func(route *cloudprovider.Route, startTime time.Time) {
|
||||
defer wg.Done()
|
||||
glog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR)
|
||||
klog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR)
|
||||
if err := rc.routes.DeleteRoute(context.TODO(), rc.clusterName, route); err != nil {
|
||||
glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Since(startTime), err)
|
||||
klog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Since(startTime), err)
|
||||
} else {
|
||||
glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Since(startTime))
|
||||
klog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Since(startTime))
|
||||
}
|
||||
}(route, time.Now())
|
||||
}
|
||||
@@ -254,13 +254,13 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Error updating node %s, retrying: %v", nodeName, err)
|
||||
klog.V(4).Infof("Error updating node %s, retrying: %v", nodeName, err)
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Error updating node %s: %v", nodeName, err)
|
||||
klog.Errorf("Error updating node %s: %v", nodeName, err)
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -269,7 +269,7 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro
|
||||
func (rc *RouteController) isResponsibleForRoute(route *cloudprovider.Route) bool {
|
||||
_, cidr, err := net.ParseCIDR(route.DestinationCIDR)
|
||||
if err != nil {
|
||||
glog.Errorf("Ignoring route %s, unparsable CIDR: %v", route.Name, err)
|
||||
klog.Errorf("Ignoring route %s, unparsable CIDR: %v", route.Name, err)
|
||||
return false
|
||||
}
|
||||
// Not responsible if this route's CIDR is not within our clusterCIDR
|
||||
|
@@ -33,7 +33,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -24,7 +24,6 @@ import (
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -40,6 +39,7 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/klog"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
@@ -110,7 +110,7 @@ func New(
|
||||
clusterName string,
|
||||
) (*ServiceController, error) {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(glog.Infof)
|
||||
broadcaster.StartLogging(klog.Infof)
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "service-controller"})
|
||||
|
||||
@@ -160,7 +160,7 @@ func New(
|
||||
func (s *ServiceController) enqueueService(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %#v: %v", obj, err)
|
||||
klog.Errorf("Couldn't get key for object %#v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
s.queue.Add(key)
|
||||
@@ -180,8 +180,8 @@ func (s *ServiceController) Run(stopCh <-chan struct{}, workers int) {
|
||||
defer runtime.HandleCrash()
|
||||
defer s.queue.ShutDown()
|
||||
|
||||
glog.Info("Starting service controller")
|
||||
defer glog.Info("Shutting down service controller")
|
||||
klog.Info("Starting service controller")
|
||||
defer klog.Info("Shutting down service controller")
|
||||
|
||||
if !controller.WaitForCacheSync("service", stopCh, s.serviceListerSynced, s.nodeListerSynced) {
|
||||
return
|
||||
@@ -287,7 +287,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S
|
||||
return fmt.Errorf("error getting LB for service %s: %v", key, err)
|
||||
}
|
||||
if exists {
|
||||
glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key)
|
||||
klog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key)
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
|
||||
if err := s.balancer.EnsureLoadBalancerDeleted(context.TODO(), s.clusterName, service); err != nil {
|
||||
return err
|
||||
@@ -297,7 +297,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S
|
||||
|
||||
newState = &v1.LoadBalancerStatus{}
|
||||
} else {
|
||||
glog.V(2).Infof("Ensuring LB for service %s", key)
|
||||
klog.V(2).Infof("Ensuring LB for service %s", key)
|
||||
|
||||
// TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart
|
||||
|
||||
@@ -327,7 +327,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
glog.V(2).Infof("Not persisting unchanged LoadBalancerStatus for service %s to registry.", key)
|
||||
klog.V(2).Infof("Not persisting unchanged LoadBalancerStatus for service %s to registry.", key)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -344,7 +344,7 @@ func (s *ServiceController) persistUpdate(service *v1.Service) error {
|
||||
// out so that we can process the delete, which we should soon be receiving
|
||||
// if we haven't already.
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
|
||||
klog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
|
||||
service.Namespace, service.Name, err)
|
||||
return nil
|
||||
}
|
||||
@@ -353,7 +353,7 @@ func (s *ServiceController) persistUpdate(service *v1.Service) error {
|
||||
if errors.IsConflict(err) {
|
||||
return err
|
||||
}
|
||||
glog.Warningf("Failed to persist updated LoadBalancerStatus to service '%s/%s' after creating its load balancer: %v",
|
||||
klog.Warningf("Failed to persist updated LoadBalancerStatus to service '%s/%s' after creating its load balancer: %v",
|
||||
service.Namespace, service.Name, err)
|
||||
time.Sleep(clientRetryInterval)
|
||||
}
|
||||
@@ -613,7 +613,7 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate {
|
||||
// We consider the node for load balancing only when its NodeReady condition status
|
||||
// is ConditionTrue
|
||||
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -626,7 +626,7 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate {
|
||||
func (s *ServiceController) nodeSyncLoop() {
|
||||
newHosts, err := s.nodeLister.ListWithPredicate(getNodeConditionPredicate())
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err)
|
||||
klog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err)
|
||||
return
|
||||
}
|
||||
if nodeSlicesEqualForLB(newHosts, s.knownHosts) {
|
||||
@@ -636,7 +636,7 @@ func (s *ServiceController) nodeSyncLoop() {
|
||||
return
|
||||
}
|
||||
|
||||
glog.Infof("Detected change in list of current cluster nodes. New node set: %v",
|
||||
klog.Infof("Detected change in list of current cluster nodes. New node set: %v",
|
||||
nodeNames(newHosts))
|
||||
|
||||
// Try updating all services, and save the ones that fail to try again next
|
||||
@@ -644,7 +644,7 @@ func (s *ServiceController) nodeSyncLoop() {
|
||||
s.servicesToUpdate = s.cache.allServices()
|
||||
numServices := len(s.servicesToUpdate)
|
||||
s.servicesToUpdate = s.updateLoadBalancerHosts(s.servicesToUpdate, newHosts)
|
||||
glog.Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes",
|
||||
klog.Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes",
|
||||
numServices-len(s.servicesToUpdate), numServices)
|
||||
|
||||
s.knownHosts = newHosts
|
||||
@@ -660,7 +660,7 @@ func (s *ServiceController) updateLoadBalancerHosts(services []*v1.Service, host
|
||||
return
|
||||
}
|
||||
if err := s.lockedUpdateLoadBalancerHosts(service, hosts); err != nil {
|
||||
glog.Errorf("External error while updating load balancer: %v.", err)
|
||||
klog.Errorf("External error while updating load balancer: %v.", err)
|
||||
servicesToRetry = append(servicesToRetry, service)
|
||||
}
|
||||
}()
|
||||
@@ -689,7 +689,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *v1.Service, h
|
||||
|
||||
// It's only an actual error if the load balancer still exists.
|
||||
if _, exists, err := s.balancer.GetLoadBalancer(context.TODO(), s.clusterName, service); err != nil {
|
||||
glog.Errorf("External error while checking if load balancer %q exists: name, %v", s.balancer.GetLoadBalancerName(context.TODO(), s.clusterName, service), err)
|
||||
klog.Errorf("External error while checking if load balancer %q exists: name, %v", s.balancer.GetLoadBalancerName(context.TODO(), s.clusterName, service), err)
|
||||
} else if !exists {
|
||||
return nil
|
||||
}
|
||||
@@ -713,7 +713,7 @@ func (s *ServiceController) syncService(key string) error {
|
||||
startTime := time.Now()
|
||||
var cachedService *cachedService
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing service %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing service %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -726,10 +726,10 @@ func (s *ServiceController) syncService(key string) error {
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
// service absence in store means watcher caught the deletion, ensure LB info is cleaned
|
||||
glog.Infof("Service has been deleted %v. Attempting to cleanup load balancer resources", key)
|
||||
klog.Infof("Service has been deleted %v. Attempting to cleanup load balancer resources", key)
|
||||
err = s.processServiceDeletion(key)
|
||||
case err != nil:
|
||||
glog.Infof("Unable to retrieve service %v from store: %v", key, err)
|
||||
klog.Infof("Unable to retrieve service %v from store: %v", key, err)
|
||||
default:
|
||||
cachedService = s.cache.getOrCreate(key)
|
||||
err = s.processServiceUpdate(cachedService, service, key)
|
||||
@@ -744,7 +744,7 @@ func (s *ServiceController) syncService(key string) error {
|
||||
func (s *ServiceController) processServiceDeletion(key string) error {
|
||||
cachedService, ok := s.cache.get(key)
|
||||
if !ok {
|
||||
glog.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion", key)
|
||||
klog.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion", key)
|
||||
return nil
|
||||
}
|
||||
return s.processLoadBalancerDelete(cachedService, key)
|
||||
|
@@ -34,7 +34,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -59,8 +59,8 @@ go_test(
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/gopkg.in/square/go-jose.v2/jwt:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -32,6 +31,7 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
)
|
||||
@@ -112,8 +112,8 @@ func (c *ServiceAccountsController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting service account controller")
|
||||
defer glog.Infof("Shutting down service account controller")
|
||||
klog.Infof("Starting service account controller")
|
||||
defer klog.Infof("Shutting down service account controller")
|
||||
|
||||
if !controller.WaitForCacheSync("service account", stopCh, c.saListerSynced, c.nsListerSynced) {
|
||||
return
|
||||
@@ -183,7 +183,7 @@ func (c *ServiceAccountsController) processNextWorkItem() bool {
|
||||
func (c *ServiceAccountsController) syncNamespace(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
ns, err := c.nsLister.Get(key)
|
||||
|
@@ -21,7 +21,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -36,6 +35,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
clientretry "k8s.io/client-go/util/retry"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/registry/core/secret"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
@@ -173,13 +173,13 @@ func (e *TokensController) Run(workers int, stopCh <-chan struct{}) {
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Starting workers")
|
||||
klog.V(5).Infof("Starting workers")
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(e.syncServiceAccount, 0, stopCh)
|
||||
go wait.Until(e.syncSecret, 0, stopCh)
|
||||
}
|
||||
<-stopCh
|
||||
glog.V(1).Infof("Shutting down")
|
||||
klog.V(1).Infof("Shutting down")
|
||||
}
|
||||
|
||||
func (e *TokensController) queueServiceAccountSync(obj interface{}) {
|
||||
@@ -207,7 +207,7 @@ func (e *TokensController) retryOrForget(queue workqueue.RateLimitingInterface,
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(4).Infof("retried %d times: %#v", requeueCount, key)
|
||||
klog.V(4).Infof("retried %d times: %#v", requeueCount, key)
|
||||
queue.Forget(key)
|
||||
}
|
||||
|
||||
@@ -237,28 +237,28 @@ func (e *TokensController) syncServiceAccount() {
|
||||
|
||||
saInfo, err := parseServiceAccountKey(key)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
sa, err := e.getServiceAccount(saInfo.namespace, saInfo.name, saInfo.uid, false)
|
||||
switch {
|
||||
case err != nil:
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
retry = true
|
||||
case sa == nil:
|
||||
// service account no longer exists, so delete related tokens
|
||||
glog.V(4).Infof("syncServiceAccount(%s/%s), service account deleted, removing tokens", saInfo.namespace, saInfo.name)
|
||||
klog.V(4).Infof("syncServiceAccount(%s/%s), service account deleted, removing tokens", saInfo.namespace, saInfo.name)
|
||||
sa = &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: saInfo.namespace, Name: saInfo.name, UID: saInfo.uid}}
|
||||
retry, err = e.deleteTokens(sa)
|
||||
if err != nil {
|
||||
glog.Errorf("error deleting serviceaccount tokens for %s/%s: %v", saInfo.namespace, saInfo.name, err)
|
||||
klog.Errorf("error deleting serviceaccount tokens for %s/%s: %v", saInfo.namespace, saInfo.name, err)
|
||||
}
|
||||
default:
|
||||
// ensure a token exists and is referenced by this service account
|
||||
retry, err = e.ensureReferencedToken(sa)
|
||||
if err != nil {
|
||||
glog.Errorf("error synchronizing serviceaccount %s/%s: %v", saInfo.namespace, saInfo.name, err)
|
||||
klog.Errorf("error synchronizing serviceaccount %s/%s: %v", saInfo.namespace, saInfo.name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -278,14 +278,14 @@ func (e *TokensController) syncSecret() {
|
||||
|
||||
secretInfo, err := parseSecretQueueKey(key)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
secret, err := e.getSecret(secretInfo.namespace, secretInfo.name, secretInfo.uid, false)
|
||||
switch {
|
||||
case err != nil:
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
retry = true
|
||||
case secret == nil:
|
||||
// If the service account exists
|
||||
@@ -294,7 +294,7 @@ func (e *TokensController) syncSecret() {
|
||||
if err := clientretry.RetryOnConflict(RemoveTokenBackoff, func() error {
|
||||
return e.removeSecretReference(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, secretInfo.name)
|
||||
}); err != nil {
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
@@ -302,19 +302,19 @@ func (e *TokensController) syncSecret() {
|
||||
sa, saErr := e.getServiceAccount(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, true)
|
||||
switch {
|
||||
case saErr != nil:
|
||||
glog.Error(saErr)
|
||||
klog.Error(saErr)
|
||||
retry = true
|
||||
case sa == nil:
|
||||
// Delete token
|
||||
glog.V(4).Infof("syncSecret(%s/%s), service account does not exist, deleting token", secretInfo.namespace, secretInfo.name)
|
||||
klog.V(4).Infof("syncSecret(%s/%s), service account does not exist, deleting token", secretInfo.namespace, secretInfo.name)
|
||||
if retriable, err := e.deleteToken(secretInfo.namespace, secretInfo.name, secretInfo.uid); err != nil {
|
||||
glog.Errorf("error deleting serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err)
|
||||
klog.Errorf("error deleting serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err)
|
||||
retry = retriable
|
||||
}
|
||||
default:
|
||||
// Update token if needed
|
||||
if retriable, err := e.generateTokenIfNeeded(sa, secret); err != nil {
|
||||
glog.Errorf("error populating serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err)
|
||||
klog.Errorf("error populating serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err)
|
||||
retry = retriable
|
||||
}
|
||||
}
|
||||
@@ -376,7 +376,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
|
||||
}
|
||||
if liveServiceAccount.ResourceVersion != serviceAccount.ResourceVersion {
|
||||
// Retry if our liveServiceAccount doesn't match our cache's resourceVersion (either the live lookup or our cache are stale)
|
||||
glog.V(4).Infof("liveServiceAccount.ResourceVersion (%s) does not match cache (%s), retrying", liveServiceAccount.ResourceVersion, serviceAccount.ResourceVersion)
|
||||
klog.V(4).Infof("liveServiceAccount.ResourceVersion (%s) does not match cache (%s), retrying", liveServiceAccount.ResourceVersion, serviceAccount.ResourceVersion)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -455,10 +455,10 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
|
||||
|
||||
if !addedReference {
|
||||
// we weren't able to use the token, try to clean it up.
|
||||
glog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
|
||||
klog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
|
||||
deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}}
|
||||
if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil {
|
||||
glog.Error(deleteErr) // if we fail, just log it
|
||||
klog.Error(deleteErr) // if we fail, just log it
|
||||
}
|
||||
}
|
||||
|
||||
@@ -524,7 +524,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou
|
||||
if liveSecret.ResourceVersion != cachedSecret.ResourceVersion {
|
||||
// our view of the secret is not up to date
|
||||
// we'll get notified of an update event later and get to try again
|
||||
glog.V(2).Infof("secret %s/%s is not up to date, skipping token population", liveSecret.Namespace, liveSecret.Name)
|
||||
klog.V(2).Infof("secret %s/%s is not up to date, skipping token population", liveSecret.Namespace, liveSecret.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@@ -23,8 +23,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/golang/glog"
|
||||
"gopkg.in/square/go-jose.v2/jwt"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -568,7 +568,7 @@ func TestTokenCreation(t *testing.T) {
|
||||
}
|
||||
|
||||
for k, tc := range testcases {
|
||||
glog.Infof(k)
|
||||
klog.Infof(k)
|
||||
|
||||
// Re-seed to reset name generation
|
||||
utilrand.Seed(1)
|
||||
|
@@ -41,7 +41,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -41,7 +41,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/history"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -86,7 +86,7 @@ func NewStatefulSetController(
|
||||
kubeClient clientset.Interface,
|
||||
) *StatefulSetController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset-controller"})
|
||||
|
||||
@@ -128,7 +128,7 @@ func NewStatefulSetController(
|
||||
oldPS := old.(*apps.StatefulSet)
|
||||
curPS := cur.(*apps.StatefulSet)
|
||||
if oldPS.Status.Replicas != curPS.Status.Replicas {
|
||||
glog.V(4).Infof("Observed updated replica count for StatefulSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas)
|
||||
klog.V(4).Infof("Observed updated replica count for StatefulSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas)
|
||||
}
|
||||
ssc.enqueueStatefulSet(cur)
|
||||
},
|
||||
@@ -148,8 +148,8 @@ func (ssc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer ssc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting stateful set controller")
|
||||
defer glog.Infof("Shutting down statefulset controller")
|
||||
klog.Infof("Starting stateful set controller")
|
||||
defer klog.Infof("Shutting down statefulset controller")
|
||||
|
||||
if !controller.WaitForCacheSync("stateful set", stopCh, ssc.podListerSynced, ssc.setListerSynced, ssc.pvcListerSynced, ssc.revListerSynced) {
|
||||
return
|
||||
@@ -179,7 +179,7 @@ func (ssc *StatefulSetController) addPod(obj interface{}) {
|
||||
if set == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels)
|
||||
klog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels)
|
||||
ssc.enqueueStatefulSet(set)
|
||||
return
|
||||
}
|
||||
@@ -190,7 +190,7 @@ func (ssc *StatefulSetController) addPod(obj interface{}) {
|
||||
if len(sets) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan Pod %s created, labels: %+v", pod.Name, pod.Labels)
|
||||
klog.V(4).Infof("Orphan Pod %s created, labels: %+v", pod.Name, pod.Labels)
|
||||
for _, set := range sets {
|
||||
ssc.enqueueStatefulSet(set)
|
||||
}
|
||||
@@ -224,7 +224,7 @@ func (ssc *StatefulSetController) updatePod(old, cur interface{}) {
|
||||
if set == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
|
||||
klog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
|
||||
ssc.enqueueStatefulSet(set)
|
||||
return
|
||||
}
|
||||
@@ -236,7 +236,7 @@ func (ssc *StatefulSetController) updatePod(old, cur interface{}) {
|
||||
if len(sets) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
|
||||
klog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
|
||||
for _, set := range sets {
|
||||
ssc.enqueueStatefulSet(set)
|
||||
}
|
||||
@@ -273,7 +273,7 @@ func (ssc *StatefulSetController) deletePod(obj interface{}) {
|
||||
if set == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s/%s deleted through %v.", pod.Namespace, pod.Name, utilruntime.GetCaller())
|
||||
klog.V(4).Infof("Pod %s/%s deleted through %v.", pod.Namespace, pod.Name, utilruntime.GetCaller())
|
||||
ssc.enqueueStatefulSet(set)
|
||||
}
|
||||
|
||||
@@ -415,7 +415,7 @@ func (ssc *StatefulSetController) worker() {
|
||||
func (ssc *StatefulSetController) sync(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -424,7 +424,7 @@ func (ssc *StatefulSetController) sync(key string) error {
|
||||
}
|
||||
set, err := ssc.setLister.StatefulSets(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Infof("StatefulSet has been deleted %v", key)
|
||||
klog.Infof("StatefulSet has been deleted %v", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
@@ -453,11 +453,11 @@ func (ssc *StatefulSetController) sync(key string) error {
|
||||
|
||||
// syncStatefulSet syncs a tuple of (statefulset, []*v1.Pod).
|
||||
func (ssc *StatefulSetController) syncStatefulSet(set *apps.StatefulSet, pods []*v1.Pod) error {
|
||||
glog.V(4).Infof("Syncing StatefulSet %v/%v with %d pods", set.Namespace, set.Name, len(pods))
|
||||
klog.V(4).Infof("Syncing StatefulSet %v/%v with %d pods", set.Namespace, set.Name, len(pods))
|
||||
// TODO: investigate where we mutate the set during the update as it is not obvious.
|
||||
if err := ssc.control.UpdateStatefulSet(set.DeepCopy(), pods); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Successfully synced StatefulSet %s/%s successful", set.Namespace, set.Name)
|
||||
klog.V(4).Infof("Successfully synced StatefulSet %s/%s successful", set.Namespace, set.Name)
|
||||
return nil
|
||||
}
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -99,7 +99,7 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *apps.StatefulSet, p
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("StatefulSet %s/%s pod status replicas=%d ready=%d current=%d updated=%d",
|
||||
klog.V(4).Infof("StatefulSet %s/%s pod status replicas=%d ready=%d current=%d updated=%d",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
status.Replicas,
|
||||
@@ -107,7 +107,7 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *apps.StatefulSet, p
|
||||
status.CurrentReplicas,
|
||||
status.UpdatedReplicas)
|
||||
|
||||
glog.V(4).Infof("StatefulSet %s/%s revisions current=%s update=%s",
|
||||
klog.V(4).Infof("StatefulSet %s/%s revisions current=%s update=%s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
status.CurrentRevision,
|
||||
@@ -351,7 +351,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
|
||||
}
|
||||
|
||||
if unhealthy > 0 {
|
||||
glog.V(4).Infof("StatefulSet %s/%s has %d unhealthy Pods starting with %s",
|
||||
klog.V(4).Infof("StatefulSet %s/%s has %d unhealthy Pods starting with %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
unhealthy,
|
||||
@@ -415,7 +415,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
|
||||
// If we find a Pod that is currently terminating, we must wait until graceful deletion
|
||||
// completes before we continue to make progress.
|
||||
if isTerminating(replicas[i]) && monotonic {
|
||||
glog.V(4).Infof(
|
||||
klog.V(4).Infof(
|
||||
"StatefulSet %s/%s is waiting for Pod %s to Terminate",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
@@ -426,7 +426,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
|
||||
// We must ensure that all for each Pod, when we create it, all of its predecessors, with respect to its
|
||||
// ordinal, are Running and Ready.
|
||||
if !isRunningAndReady(replicas[i]) && monotonic {
|
||||
glog.V(4).Infof(
|
||||
klog.V(4).Infof(
|
||||
"StatefulSet %s/%s is waiting for Pod %s to be Running and Ready",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
@@ -452,7 +452,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
|
||||
for target := len(condemned) - 1; target >= 0; target-- {
|
||||
// wait for terminating pods to expire
|
||||
if isTerminating(condemned[target]) {
|
||||
glog.V(4).Infof(
|
||||
klog.V(4).Infof(
|
||||
"StatefulSet %s/%s is waiting for Pod %s to Terminate prior to scale down",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
@@ -465,14 +465,14 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
|
||||
}
|
||||
// if we are in monotonic mode and the condemned target is not the first unhealthy Pod block
|
||||
if !isRunningAndReady(condemned[target]) && monotonic && condemned[target] != firstUnhealthyPod {
|
||||
glog.V(4).Infof(
|
||||
klog.V(4).Infof(
|
||||
"StatefulSet %s/%s is waiting for Pod %s to be Running and Ready prior to scale down",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
firstUnhealthyPod.Name)
|
||||
return &status, nil
|
||||
}
|
||||
glog.V(2).Infof("StatefulSet %s/%s terminating Pod %s for scale down",
|
||||
klog.V(2).Infof("StatefulSet %s/%s terminating Pod %s for scale down",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
condemned[target].Name)
|
||||
@@ -506,7 +506,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
|
||||
|
||||
// delete the Pod if it is not already terminating and does not match the update revision.
|
||||
if getPodRevision(replicas[target]) != updateRevision.Name && !isTerminating(replicas[target]) {
|
||||
glog.V(2).Infof("StatefulSet %s/%s terminating Pod %s for update",
|
||||
klog.V(2).Infof("StatefulSet %s/%s terminating Pod %s for update",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
replicas[target].Name)
|
||||
@@ -517,7 +517,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
|
||||
|
||||
// wait for unhealthy Pods on update
|
||||
if !isHealthy(replicas[target]) {
|
||||
glog.V(4).Infof(
|
||||
klog.V(4).Infof(
|
||||
"StatefulSet %s/%s is waiting for Pod %s to update",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
|
@@ -28,7 +28,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//vendor/github.com/evanphx/json-patch:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -46,7 +46,7 @@ import (
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -299,12 +299,12 @@ func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, su
|
||||
|
||||
originalObjJS, err := json.Marshal(nodeCopy)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to marshal %v", nodeCopy)
|
||||
klog.Errorf("Failed to marshal %v", nodeCopy)
|
||||
return nil, nil
|
||||
}
|
||||
var originalNode v1.Node
|
||||
if err = json.Unmarshal(originalObjJS, &originalNode); err != nil {
|
||||
glog.Errorf("Failed to unmarshal original object: %v", err)
|
||||
klog.Errorf("Failed to unmarshal original object: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -313,31 +313,31 @@ func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, su
|
||||
case types.JSONPatchType:
|
||||
patchObj, err := jsonpatch.DecodePatch(data)
|
||||
if err != nil {
|
||||
glog.Error(err.Error())
|
||||
klog.Error(err.Error())
|
||||
return nil, nil
|
||||
}
|
||||
if patchedObjJS, err = patchObj.Apply(originalObjJS); err != nil {
|
||||
glog.Error(err.Error())
|
||||
klog.Error(err.Error())
|
||||
return nil, nil
|
||||
}
|
||||
case types.MergePatchType:
|
||||
if patchedObjJS, err = jsonpatch.MergePatch(originalObjJS, data); err != nil {
|
||||
glog.Error(err.Error())
|
||||
klog.Error(err.Error())
|
||||
return nil, nil
|
||||
}
|
||||
case types.StrategicMergePatchType:
|
||||
if patchedObjJS, err = strategicpatch.StrategicMergePatch(originalObjJS, data, originalNode); err != nil {
|
||||
glog.Error(err.Error())
|
||||
klog.Error(err.Error())
|
||||
return nil, nil
|
||||
}
|
||||
default:
|
||||
glog.Errorf("unknown Content-Type header for patch: %v", pt)
|
||||
klog.Errorf("unknown Content-Type header for patch: %v", pt)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var updatedNode v1.Node
|
||||
if err = json.Unmarshal(patchedObjJS, &updatedNode); err != nil {
|
||||
glog.Errorf("Failed to unmarshal patched object: %v", err)
|
||||
klog.Errorf("Failed to unmarshal patched object: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -382,7 +382,7 @@ func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp metav1.Time,
|
||||
defer f.Unlock()
|
||||
ref, err := ref.GetReference(legacyscheme.Scheme, obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Encountered error while getting reference: %v", err)
|
||||
klog.Errorf("Encountered error while getting reference: %v", err)
|
||||
return
|
||||
}
|
||||
event := f.makeEvent(ref, eventtype, reason, message)
|
||||
|
@@ -24,7 +24,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -47,7 +47,7 @@ import (
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type TTLController struct {
|
||||
@@ -113,8 +113,8 @@ func (ttlc *TTLController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer ttlc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting TTL controller")
|
||||
defer glog.Infof("Shutting down TTL controller")
|
||||
klog.Infof("Starting TTL controller")
|
||||
defer klog.Infof("Shutting down TTL controller")
|
||||
|
||||
if !controller.WaitForCacheSync("TTL", stopCh, ttlc.hasSynced) {
|
||||
return
|
||||
@@ -190,7 +190,7 @@ func (ttlc *TTLController) deleteNode(obj interface{}) {
|
||||
func (ttlc *TTLController) enqueueNode(node *v1.Node) {
|
||||
key, err := controller.KeyFunc(node)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v", node)
|
||||
klog.Errorf("Couldn't get key for object %+v", node)
|
||||
return
|
||||
}
|
||||
ttlc.queue.Add(key)
|
||||
@@ -235,7 +235,7 @@ func getIntFromAnnotation(node *v1.Node, annotationKey string) (int, bool) {
|
||||
}
|
||||
intValue, err := strconv.Atoi(annotationValue)
|
||||
if err != nil {
|
||||
glog.Warningf("Cannot convert the value %q with annotation key %q for the node %q",
|
||||
klog.Warningf("Cannot convert the value %q with annotation key %q for the node %q",
|
||||
annotationValue, annotationKey, node.Name)
|
||||
return 0, false
|
||||
}
|
||||
@@ -265,10 +265,10 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey
|
||||
}
|
||||
_, err = ttlc.kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err)
|
||||
klog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Changed ttl annotation for node %s to %d seconds", node.Name, value)
|
||||
klog.V(2).Infof("Changed ttl annotation for node %s to %d seconds", node.Name, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -24,7 +24,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
batch "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -71,7 +71,7 @@ type Controller struct {
|
||||
// New creates an instance of Controller
|
||||
func New(jobInformer batchinformers.JobInformer, client clientset.Interface) *Controller {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
@@ -102,8 +102,8 @@ func (tc *Controller) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer tc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting TTL after finished controller")
|
||||
defer glog.Infof("Shutting down TTL after finished controller")
|
||||
klog.Infof("Starting TTL after finished controller")
|
||||
defer klog.Infof("Shutting down TTL after finished controller")
|
||||
|
||||
if !controller.WaitForCacheSync("TTL after finished", stopCh, tc.jListerSynced) {
|
||||
return
|
||||
@@ -118,7 +118,7 @@ func (tc *Controller) Run(workers int, stopCh <-chan struct{}) {
|
||||
|
||||
func (tc *Controller) addJob(obj interface{}) {
|
||||
job := obj.(*batch.Job)
|
||||
glog.V(4).Infof("Adding job %s/%s", job.Namespace, job.Name)
|
||||
klog.V(4).Infof("Adding job %s/%s", job.Namespace, job.Name)
|
||||
|
||||
if job.DeletionTimestamp == nil && needsCleanup(job) {
|
||||
tc.enqueue(job)
|
||||
@@ -127,7 +127,7 @@ func (tc *Controller) addJob(obj interface{}) {
|
||||
|
||||
func (tc *Controller) updateJob(old, cur interface{}) {
|
||||
job := cur.(*batch.Job)
|
||||
glog.V(4).Infof("Updating job %s/%s", job.Namespace, job.Name)
|
||||
klog.V(4).Infof("Updating job %s/%s", job.Namespace, job.Name)
|
||||
|
||||
if job.DeletionTimestamp == nil && needsCleanup(job) {
|
||||
tc.enqueue(job)
|
||||
@@ -135,7 +135,7 @@ func (tc *Controller) updateJob(old, cur interface{}) {
|
||||
}
|
||||
|
||||
func (tc *Controller) enqueue(job *batch.Job) {
|
||||
glog.V(4).Infof("Add job %s/%s to cleanup", job.Namespace, job.Name)
|
||||
klog.V(4).Infof("Add job %s/%s to cleanup", job.Namespace, job.Name)
|
||||
key, err := controller.KeyFunc(job)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", job, err))
|
||||
@@ -194,7 +194,7 @@ func (tc *Controller) processJob(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Checking if Job %s/%s is ready for cleanup", namespace, name)
|
||||
klog.V(4).Infof("Checking if Job %s/%s is ready for cleanup", namespace, name)
|
||||
// Ignore the Jobs that are already deleted or being deleted, or the ones that don't need clean up.
|
||||
job, err := tc.jLister.Jobs(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -233,7 +233,7 @@ func (tc *Controller) processJob(key string) error {
|
||||
PropagationPolicy: &policy,
|
||||
Preconditions: &metav1.Preconditions{UID: &fresh.UID},
|
||||
}
|
||||
glog.V(4).Infof("Cleaning up Job %s/%s", namespace, name)
|
||||
klog.V(4).Infof("Cleaning up Job %s/%s", namespace, name)
|
||||
return tc.client.BatchV1().Jobs(fresh.Namespace).Delete(fresh.Name, options)
|
||||
}
|
||||
|
||||
@@ -284,10 +284,10 @@ func timeLeft(j *batch.Job, since *time.Time) (*time.Duration, error) {
|
||||
return nil, err
|
||||
}
|
||||
if finishAt.UTC().After(since.UTC()) {
|
||||
glog.Warningf("Warning: Found Job %s/%s finished in the future. This is likely due to time skew in the cluster. Job cleanup will be deferred.", j.Namespace, j.Name)
|
||||
klog.Warningf("Warning: Found Job %s/%s finished in the future. This is likely due to time skew in the cluster. Job cleanup will be deferred.", j.Namespace, j.Name)
|
||||
}
|
||||
remaining := expireAt.UTC().Sub(since.UTC())
|
||||
glog.V(4).Infof("Found Job %s/%s finished at %v, remaining TTL %v since %v, TTL will expire at %v", j.Namespace, j.Name, finishAt.UTC(), remaining, since.UTC(), expireAt.UTC())
|
||||
klog.V(4).Infof("Found Job %s/%s finished at %v, remaining TTL %v since %v, TTL will expire at %v", j.Namespace, j.Name, finishAt.UTC(), remaining, since.UTC(), expireAt.UTC())
|
||||
return &remaining, nil
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user