@@ -385,6 +385,10 @@ type DaemonSetStatus struct {
|
||||
// DesiredNumberScheduled is the total number of nodes that should be running the daemon
|
||||
// pod (including nodes correctly running the daemon pod).
|
||||
DesiredNumberScheduled int32 `json:"desiredNumberScheduled"`
|
||||
|
||||
// NumberReady is the number of nodes that should be running the daemon pod and have one
|
||||
// or more of the daemon pod running and ready.
|
||||
NumberReady int32 `json:"numberReady"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
|
@@ -459,6 +459,10 @@ type DaemonSetStatus struct {
|
||||
// pod (including nodes correctly running the daemon pod).
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
|
||||
DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
|
||||
|
||||
// NumberReady is the number of nodes that should be running the daemon pod and have one
|
||||
// or more of the daemon pod running and ready.
|
||||
NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
|
@@ -44,6 +44,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
|
||||
CurrentNumberScheduled: 1,
|
||||
NumberMisscheduled: 2,
|
||||
DesiredNumberScheduled: 3,
|
||||
NumberReady: 1,
|
||||
},
|
||||
},
|
||||
update: extensions.DaemonSet{
|
||||
@@ -52,6 +53,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
|
||||
CurrentNumberScheduled: 1,
|
||||
NumberMisscheduled: 1,
|
||||
DesiredNumberScheduled: 3,
|
||||
NumberReady: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -76,6 +78,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
|
||||
CurrentNumberScheduled: 1,
|
||||
NumberMisscheduled: 2,
|
||||
DesiredNumberScheduled: 3,
|
||||
NumberReady: 1,
|
||||
},
|
||||
},
|
||||
update: extensions.DaemonSet{
|
||||
@@ -88,6 +91,7 @@ func TestValidateDaemonSetStatusUpdate(t *testing.T) {
|
||||
CurrentNumberScheduled: -1,
|
||||
NumberMisscheduled: -1,
|
||||
DesiredNumberScheduled: -3,
|
||||
NumberReady: -1,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@@ -531,10 +531,11 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
|
||||
return utilerrors.NewAggregate(errors)
|
||||
}
|
||||
|
||||
func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int) error {
|
||||
func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int) error {
|
||||
if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled &&
|
||||
int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled &&
|
||||
int(ds.Status.NumberMisscheduled) == numberMisscheduled {
|
||||
int(ds.Status.NumberMisscheduled) == numberMisscheduled &&
|
||||
int(ds.Status.NumberReady) == numberReady {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -543,6 +544,7 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds
|
||||
ds.Status.DesiredNumberScheduled = int32(desiredNumberScheduled)
|
||||
ds.Status.CurrentNumberScheduled = int32(currentNumberScheduled)
|
||||
ds.Status.NumberMisscheduled = int32(numberMisscheduled)
|
||||
ds.Status.NumberReady = int32(numberReady)
|
||||
|
||||
if _, updateErr = dsClient.UpdateStatus(ds); updateErr == nil {
|
||||
return nil
|
||||
@@ -570,7 +572,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
||||
return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
|
||||
}
|
||||
|
||||
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int
|
||||
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int
|
||||
for _, node := range nodeList.Items {
|
||||
shouldRun := dsc.nodeShouldRunDaemonPod(&node, ds)
|
||||
|
||||
@@ -580,6 +582,12 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
||||
desiredNumberScheduled++
|
||||
if scheduled {
|
||||
currentNumberScheduled++
|
||||
// Sort the daemon pods by creation time, so the the oldest is first.
|
||||
daemonPods, _ := nodeToDaemonPods[node.Name]
|
||||
sort.Sort(podByCreationTimestamp(daemonPods))
|
||||
if api.IsPodReady(daemonPods[0]) {
|
||||
numberReady++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if scheduled {
|
||||
@@ -588,7 +596,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
||||
}
|
||||
}
|
||||
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled)
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
|
||||
}
|
||||
|
@@ -569,3 +569,29 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
||||
manager.dsStore.Add(daemon)
|
||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0)
|
||||
}
|
||||
|
||||
func TestNumberReadyStatus(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
addNodes(manager.nodeStore.Store, 0, 2, simpleNodeLabel)
|
||||
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
|
||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1)
|
||||
daemon := newDaemonSet("foo")
|
||||
manager.dsStore.Add(daemon)
|
||||
|
||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
|
||||
if daemon.Status.NumberReady != 0 {
|
||||
t.Errorf("Wrong daemon %s status: %v", daemon.Name, daemon.Status)
|
||||
}
|
||||
|
||||
selector, _ := unversioned.LabelSelectorAsSelector(daemon.Spec.Selector)
|
||||
daemonPods, _ := manager.podStore.Pods(daemon.Namespace).List(selector)
|
||||
for _, pod := range daemonPods {
|
||||
condition := api.PodCondition{Type: api.PodReady, Status: api.ConditionTrue}
|
||||
pod.Status.Conditions = append(pod.Status.Conditions, condition)
|
||||
}
|
||||
|
||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
|
||||
if daemon.Status.NumberReady != 2 {
|
||||
t.Errorf("Wrong daemon %s status: %v", daemon.Name, daemon.Status)
|
||||
}
|
||||
}
|
||||
|
@@ -472,7 +472,7 @@ var (
|
||||
petSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"}
|
||||
endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"}
|
||||
nodeColumns = []string{"NAME", "STATUS", "AGE"}
|
||||
daemonSetColumns = []string{"NAME", "DESIRED", "CURRENT", "NODE-SELECTOR", "AGE"}
|
||||
daemonSetColumns = []string{"NAME", "DESIRED", "CURRENT", "READY", "NODE-SELECTOR", "AGE"}
|
||||
eventColumns = []string{"LASTSEEN", "FIRSTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "TYPE", "REASON", "SOURCE", "MESSAGE"}
|
||||
limitRangeColumns = []string{"NAME", "AGE"}
|
||||
resourceQuotaColumns = []string{"NAME", "AGE"}
|
||||
@@ -1286,15 +1286,17 @@ func printDaemonSet(ds *extensions.DaemonSet, w io.Writer, options PrintOptions)
|
||||
|
||||
desiredScheduled := ds.Status.DesiredNumberScheduled
|
||||
currentScheduled := ds.Status.CurrentNumberScheduled
|
||||
numberReady := ds.Status.NumberReady
|
||||
selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||
if err != nil {
|
||||
// this shouldn't happen if LabelSelector passed validation
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s\t%s",
|
||||
if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%d\t%s\t%s",
|
||||
name,
|
||||
desiredScheduled,
|
||||
currentScheduled,
|
||||
numberReady,
|
||||
labels.FormatLabels(ds.Spec.Template.Spec.NodeSelector),
|
||||
translateTimestamp(ds.CreationTimestamp),
|
||||
); err != nil {
|
||||
|
@@ -1446,9 +1446,10 @@ func TestPrintDaemonSet(t *testing.T) {
|
||||
Status: extensions.DaemonSetStatus{
|
||||
CurrentNumberScheduled: 2,
|
||||
DesiredNumberScheduled: 3,
|
||||
NumberReady: 1,
|
||||
},
|
||||
},
|
||||
"test1\t3\t2\t<none>\t0s\n",
|
||||
"test1\t3\t2\t1\t<none>\t0s\n",
|
||||
},
|
||||
}
|
||||
|
||||
|
@@ -127,6 +127,8 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Stop a daemon pod, check that the daemon pod is revived.")
|
||||
podClient := c.Pods(ns)
|
||||
@@ -186,6 +188,8 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("remove the node selector and wait for daemons to be unscheduled")
|
||||
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
|
||||
@@ -252,6 +256,8 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("remove the node selector and wait for daemons to be unscheduled")
|
||||
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
|
||||
@@ -374,3 +380,15 @@ func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string)
|
||||
func checkRunningOnNoNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
|
||||
return checkDaemonPodOnNodes(f, selector, make([]string, 0))
|
||||
}
|
||||
|
||||
func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
ds, err := f.Client.DaemonSets(f.Namespace.Name).Get(dsName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get daemon set from api.")
|
||||
}
|
||||
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
|
||||
if desired != scheduled && desired != ready {
|
||||
return fmt.Errorf("Error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user