Merge pull request #59905 from mtaufen/dkcfg-config-ok-kubelet-config-ok

Automatic merge from submit-queue (batch tested with PRs 59353, 59905, 53833). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Rename ConfigOK to KubeletConfigOk

This is a more accurate name for the condition, as it describes the
status of the Kubelet's configuration.

Also cleans up capitalization of internal names.

```release-note
The ConfigOK node condition has been renamed to KubeletConfigOk.
```
This commit is contained in:
Kubernetes Submit Queue
2018-02-15 11:06:36 -08:00
committed by GitHub
8 changed files with 79 additions and 79 deletions

View File

@@ -71,14 +71,14 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v
node, err := latestNode(cc.informer.GetStore(), nodeName)
if err != nil {
cc.configOK.SetFailSyncCondition(status.FailSyncReasonInformer)
cc.configOk.SetFailSyncCondition(status.FailSyncReasonInformer)
syncerr = fmt.Errorf("%s, error: %v", status.FailSyncReasonInformer, err)
return
}
// check the Node and download any new config
if updated, cur, reason, err := cc.doSyncConfigSource(client, node.Spec.ConfigSource); err != nil {
cc.configOK.SetFailSyncCondition(reason)
cc.configOk.SetFailSyncCondition(reason)
syncerr = fmt.Errorf("%s, error: %v", reason, err)
return
} else if updated {
@@ -100,7 +100,7 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v
// - there is no need to restart to update the current config
// - there was no error trying to sync configuration
// - if, previously, there was an error trying to sync configuration, we need to clear that error from the condition
cc.configOK.ClearFailSyncCondition()
cc.configOk.ClearFailSyncCondition()
}
// doSyncConfigSource checkpoints and sets the store's current config to the new config or resets config,

View File

@@ -53,8 +53,8 @@ type Controller struct {
// pendingConfigSource; write to this channel to indicate that the config source needs to be synced from the API server
pendingConfigSource chan bool
// configOK manages the ConfigOK condition that is reported in Node.Status.Conditions
configOK status.ConfigOKCondition
// configOk manages the KubeletConfigOk condition that is reported in Node.Status.Conditions
configOk status.ConfigOkCondition
// informer is the informer that watches the Node object
informer cache.SharedInformer
@@ -69,7 +69,7 @@ func NewController(defaultConfig *kubeletconfig.KubeletConfiguration, dynamicCon
defaultConfig: defaultConfig,
// channels must have capacity at least 1, since we signal with non-blocking writes
pendingConfigSource: make(chan bool, 1),
configOK: status.NewConfigOKCondition(),
configOk: status.NewConfigOkCondition(),
checkpointStore: store.NewFsStore(utilfs.DefaultFs{}, filepath.Join(dynamicConfigDir, checkpointsDir)),
}
}
@@ -95,9 +95,9 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) {
if err == nil {
// set the status to indicate we will use the assigned config
if curSource != nil {
cc.configOK.Set(fmt.Sprintf(status.CurRemoteMessageFmt, curSource.APIPath()), reason, apiv1.ConditionTrue)
cc.configOk.Set(fmt.Sprintf(status.CurRemoteMessageFmt, curSource.APIPath()), reason, apiv1.ConditionTrue)
} else {
cc.configOK.Set(status.CurLocalMessage, reason, apiv1.ConditionTrue)
cc.configOk.Set(status.CurLocalMessage, reason, apiv1.ConditionTrue)
}
// update the last-known-good config if necessary, and start a timer that
@@ -125,9 +125,9 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) {
// set the status to indicate that we had to roll back to the lkg for the reason reported when we tried to load the assigned config
if lkgSource != nil {
cc.configOK.Set(fmt.Sprintf(status.LkgRemoteMessageFmt, lkgSource.APIPath()), reason, apiv1.ConditionFalse)
cc.configOk.Set(fmt.Sprintf(status.LkgRemoteMessageFmt, lkgSource.APIPath()), reason, apiv1.ConditionFalse)
} else {
cc.configOK.Set(status.LkgLocalMessage, reason, apiv1.ConditionFalse)
cc.configOk.Set(status.LkgLocalMessage, reason, apiv1.ConditionFalse)
}
// return the last-known-good config
@@ -146,11 +146,11 @@ func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.E
return
}
// start the ConfigOK condition sync loop
// start the ConfigOk condition sync loop
go utilpanic.HandlePanic(func() {
utillog.Infof("starting ConfigOK condition sync loop")
utillog.Infof("starting ConfigOk condition sync loop")
wait.JitterUntil(func() {
cc.configOK.Sync(client, nodeName)
cc.configOk.Sync(client, nodeName)
}, 10*time.Second, 0.2, true, wait.NeverStop)
})()

View File

@@ -97,8 +97,8 @@ const (
EmptyReason = "unknown - reason not provided"
)
// ConfigOKCondition represents a ConfigOK NodeCondition
type ConfigOKCondition interface {
// ConfigOkCondition represents a ConfigOk NodeCondition
type ConfigOkCondition interface {
// Set sets the Message, Reason, and Status of the condition
Set(message, reason string, status apiv1.ConditionStatus)
// SetFailSyncCondition sets the condition for when syncing Kubelet config fails
@@ -109,30 +109,30 @@ type ConfigOKCondition interface {
Sync(client clientset.Interface, nodeName string)
}
// configOKCondition implements ConfigOKCondition
type configOKCondition struct {
// configOkCondition implements ConfigOkCondition
type configOkCondition struct {
// conditionMux is a mutex on the condition, alternate between setting and syncing the condition
conditionMux sync.Mutex
// condition is the current ConfigOK node condition, which will be reported in the Node.status.conditions
// condition is the current ConfigOk node condition, which will be reported in the Node.status.conditions
condition *apiv1.NodeCondition
// failedSyncReason is sent in place of the usual reason when the Kubelet is failing to sync the remote config
failedSyncReason string
// pendingCondition; write to this channel to indicate that ConfigOK needs to be synced to the API server
// pendingCondition; write to this channel to indicate that ConfigOk needs to be synced to the API server
pendingCondition chan bool
}
// NewConfigOKCondition returns a new ConfigOKCondition
func NewConfigOKCondition() ConfigOKCondition {
return &configOKCondition{
// NewConfigOkCondition returns a new ConfigOkCondition
func NewConfigOkCondition() ConfigOkCondition {
return &configOkCondition{
// channels must have capacity at least 1, since we signal with non-blocking writes
pendingCondition: make(chan bool, 1),
}
}
// unsafeSet sets the current state of the condition
// it does not grab the conditionMux lock, so you should generally use setConfigOK unless you need to grab the lock
// it does not grab the conditionMux lock, so you should generally use setConfigOk unless you need to grab the lock
// at a higher level to synchronize additional operations
func (c *configOKCondition) unsafeSet(message, reason string, status apiv1.ConditionStatus) {
func (c *configOkCondition) unsafeSet(message, reason string, status apiv1.ConditionStatus) {
// We avoid an empty Message, Reason, or Status on the condition. Since we use Patch to update conditions, an empty
// field might cause a value from a previous condition to leak through, which can be very confusing.
if len(message) == 0 {
@@ -149,21 +149,21 @@ func (c *configOKCondition) unsafeSet(message, reason string, status apiv1.Condi
Message: message,
Reason: reason,
Status: status,
Type: apiv1.NodeConfigOK,
Type: apiv1.NodeKubeletConfigOk,
}
c.pokeSyncWorker()
}
func (c *configOKCondition) Set(message, reason string, status apiv1.ConditionStatus) {
func (c *configOkCondition) Set(message, reason string, status apiv1.ConditionStatus) {
c.conditionMux.Lock()
defer c.conditionMux.Unlock()
c.unsafeSet(message, reason, status)
}
// SetFailSyncCondition updates the ConfigOK status to reflect that we failed to sync to the latest config,
// SetFailSyncCondition updates the ConfigOk status to reflect that we failed to sync to the latest config,
// e.g. due to a malformed Node.Spec.ConfigSource, a download failure, etc.
func (c *configOKCondition) SetFailSyncCondition(reason string) {
func (c *configOkCondition) SetFailSyncCondition(reason string) {
c.conditionMux.Lock()
defer c.conditionMux.Unlock()
// set the reason overlay and poke the sync worker to send the update
@@ -172,7 +172,7 @@ func (c *configOKCondition) SetFailSyncCondition(reason string) {
}
// ClearFailSyncCondition removes the "failed to sync" reason overlay
func (c *configOKCondition) ClearFailSyncCondition() {
func (c *configOkCondition) ClearFailSyncCondition() {
c.conditionMux.Lock()
defer c.conditionMux.Unlock()
// clear the reason overlay and poke the sync worker to send the update
@@ -180,8 +180,8 @@ func (c *configOKCondition) ClearFailSyncCondition() {
c.pokeSyncWorker()
}
// pokeSyncWorker notes that the ConfigOK condition needs to be synced to the API server
func (c *configOKCondition) pokeSyncWorker() {
// pokeSyncWorker notes that the ConfigOk condition needs to be synced to the API server
func (c *configOkCondition) pokeSyncWorker() {
select {
case c.pendingCondition <- true:
default:
@@ -190,7 +190,7 @@ func (c *configOKCondition) pokeSyncWorker() {
// Sync attempts to sync `c.condition` with the Node object for this Kubelet,
// if syncing fails, an error is logged, and work is queued for retry.
func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) {
func (c *configOkCondition) Sync(client clientset.Interface, nodeName string) {
select {
case <-c.pendingCondition:
default:
@@ -212,21 +212,21 @@ func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) {
}()
if c.condition == nil {
utillog.Infof("ConfigOK condition is nil, skipping ConfigOK sync")
utillog.Infof("ConfigOk condition is nil, skipping ConfigOk sync")
return
}
// get the Node so we can check the current condition
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("could not get Node %q, will not sync ConfigOK condition, error: %v", nodeName, err)
err = fmt.Errorf("could not get Node %q, will not sync ConfigOk condition, error: %v", nodeName, err)
return
}
// set timestamps
syncTime := metav1.NewTime(time.Now())
c.condition.LastHeartbeatTime = syncTime
if remote := getConfigOK(node.Status.Conditions); remote == nil || !utilequal.ConfigOKEq(remote, c.condition) {
if remote := getKubeletConfigOk(node.Status.Conditions); remote == nil || !utilequal.KubeletConfigOkEq(remote, c.condition) {
// update transition time the first time we create the condition,
// or if we are semantically changing the condition
c.condition.LastTransitionTime = syncTime
@@ -269,7 +269,7 @@ func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) {
return
}
patchConfigOK(node, condition)
patchConfigOk(node, condition)
after, err := kuberuntime.Encode(encoder, node)
if err != nil {
err = fmt.Errorf(`failed to encode "after" node while generating patch, error: %v`, err)
@@ -278,36 +278,36 @@ func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) {
patch, err := strategicpatch.CreateTwoWayMergePatch(before, after, apiv1.Node{})
if err != nil {
err = fmt.Errorf("failed to generate patch for updating ConfigOK condition, error: %v", err)
err = fmt.Errorf("failed to generate patch for updating ConfigOk condition, error: %v", err)
return
}
// patch the remote Node object
_, err = client.CoreV1().Nodes().PatchStatus(nodeName, patch)
if err != nil {
err = fmt.Errorf("could not update ConfigOK condition, error: %v", err)
err = fmt.Errorf("could not update ConfigOk condition, error: %v", err)
return
}
}
// patchConfigOK replaces or adds the ConfigOK condition to the node
func patchConfigOK(node *apiv1.Node, configOK *apiv1.NodeCondition) {
// patchConfigOk replaces or adds the ConfigOk condition to the node
func patchConfigOk(node *apiv1.Node, configOk *apiv1.NodeCondition) {
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == apiv1.NodeConfigOK {
if node.Status.Conditions[i].Type == apiv1.NodeKubeletConfigOk {
// edit the condition
node.Status.Conditions[i] = *configOK
node.Status.Conditions[i] = *configOk
return
}
}
// append the condition
node.Status.Conditions = append(node.Status.Conditions, *configOK)
node.Status.Conditions = append(node.Status.Conditions, *configOk)
}
// getConfigOK returns the first NodeCondition in `cs` with Type == apiv1.NodeConfigOK,
// getKubeletConfigOk returns the first NodeCondition in `cs` with Type == apiv1.NodeKubeletConfigOk,
// or if no such condition exists, returns nil.
func getConfigOK(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
func getKubeletConfigOk(cs []apiv1.NodeCondition) *apiv1.NodeCondition {
for i := range cs {
if cs[i].Type == apiv1.NodeConfigOK {
if cs[i].Type == apiv1.NodeKubeletConfigOk {
return &cs[i]
}
}

View File

@@ -45,7 +45,7 @@ func ObjectRefEq(a, b *apiv1.ObjectReference) bool {
return a.UID == b.UID && a.Namespace == b.Namespace && a.Name == b.Name
}
// ConfigOKEq returns true if the two conditions are semantically equivalent in the context of dynamic config
func ConfigOKEq(a, b *apiv1.NodeCondition) bool {
// KubeletConfigOkEq returns true if the two conditions are semantically equivalent in the context of dynamic config
func KubeletConfigOkEq(a, b *apiv1.NodeCondition) bool {
return a.Message == b.Message && a.Reason == b.Reason && a.Status == b.Status
}