daemonset: Implement MaxSurge on daemonset update
If MaxSurge is set, the controller will attempt to double up nodes up to the allowed limit with a new pod, and then when the most recent (by hash) pod is ready, trigger deletion on the old pod. If the old pod goes unready before the new pod is ready, the old pod is immediately deleted. If an old pod goes unready before a new pod is placed on that node, a new pod is immediately added for that node even past the MaxSurge limit. The backoff clock is used consistently throughout the daemonset controller as an injectable clock for the purposes of testing.
This commit is contained in:
@@ -18,12 +18,20 @@ package daemon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/klog/v2"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestDaemonSetUpdatesPods(t *testing.T) {
|
||||
@@ -67,6 +75,48 @@ func TestDaemonSetUpdatesPods(t *testing.T) {
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesPodsWithMaxSurge(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)()
|
||||
ds := newDaemonSet("foo")
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
// surge is thhe controlling amount
|
||||
maxSurge := 2
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge))
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, 0, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 5%maxSurge, maxSurge, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 5%maxSurge, 0)
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
|
||||
ds := newDaemonSet("foo")
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
@@ -138,6 +188,149 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)()
|
||||
ds := newDaemonSet("foo")
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
|
||||
maxSurge := 3
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge))
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// all old pods are unavailable so should be surged
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(100, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
|
||||
// waiting for pods to go ready, old pods are deleted
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(200, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0)
|
||||
|
||||
setPodReadiness(t, manager, true, 5, func(_ *v1.Pod) bool { return true })
|
||||
ds.Spec.MinReadySeconds = 15
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo3/bar3"
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(300, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 3, 0, 0)
|
||||
|
||||
hash, err := currentDSHash(manager, ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
currentPods := podsByNodeMatchingHash(manager, hash)
|
||||
// mark two updated pods as ready at time 300
|
||||
setPodReadiness(t, manager, true, 2, func(pod *v1.Pod) bool {
|
||||
return pod.Labels[apps.ControllerRevisionHashLabelKey] == hash
|
||||
})
|
||||
// mark one of the old pods that is on a node without an updated pod as unready
|
||||
setPodReadiness(t, manager, false, 1, func(pod *v1.Pod) bool {
|
||||
nodeName, err := util.GetTargetNodeName(pod)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return pod.Labels[apps.ControllerRevisionHashLabelKey] != hash && len(currentPods[nodeName]) == 0
|
||||
})
|
||||
|
||||
// the new pods should still be considered waiting to hit min readiness, so one pod should be created to replace
|
||||
// the deleted old pod
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(310, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
|
||||
// the new pods are now considered available, so delete the old pods
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(320, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 3, 0)
|
||||
|
||||
// mark all updated pods as ready at time 320
|
||||
currentPods = podsByNodeMatchingHash(manager, hash)
|
||||
setPodReadiness(t, manager, true, 3, func(pod *v1.Pod) bool {
|
||||
return pod.Labels[apps.ControllerRevisionHashLabelKey] == hash
|
||||
})
|
||||
|
||||
// the new pods are now considered available, so delete the old pods
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(340, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 2, 0)
|
||||
|
||||
// controller has completed upgrade
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(350, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
|
||||
func podsByNodeMatchingHash(dsc *daemonSetsController, hash string) map[string][]string {
|
||||
byNode := make(map[string][]string)
|
||||
for _, obj := range dsc.podStore.List() {
|
||||
pod := obj.(*v1.Pod)
|
||||
if pod.Labels[apps.ControllerRevisionHashLabelKey] != hash {
|
||||
continue
|
||||
}
|
||||
nodeName, err := util.GetTargetNodeName(pod)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
byNode[nodeName] = append(byNode[nodeName], pod.Name)
|
||||
}
|
||||
return byNode
|
||||
}
|
||||
|
||||
func setPodReadiness(t *testing.T, dsc *daemonSetsController, ready bool, count int, fn func(*v1.Pod) bool) {
|
||||
t.Helper()
|
||||
for _, obj := range dsc.podStore.List() {
|
||||
if count <= 0 {
|
||||
break
|
||||
}
|
||||
pod := obj.(*v1.Pod)
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if podutil.IsPodReady(pod) == ready {
|
||||
continue
|
||||
}
|
||||
if !fn(pod) {
|
||||
continue
|
||||
}
|
||||
condition := v1.PodCondition{Type: v1.PodReady}
|
||||
if ready {
|
||||
condition.Status = v1.ConditionTrue
|
||||
} else {
|
||||
condition.Status = v1.ConditionFalse
|
||||
}
|
||||
if !podutil.UpdatePodCondition(&pod.Status, &condition) {
|
||||
t.Fatal("failed to update pod")
|
||||
}
|
||||
// TODO: workaround UpdatePodCondition calling time.Now() directly
|
||||
setCondition := podutil.GetPodReadyCondition(pod.Status)
|
||||
setCondition.LastTransitionTime.Time = dsc.failedPodsBackoff.Clock.Now()
|
||||
klog.Infof("marked pod %s ready=%t", pod.Name, ready)
|
||||
count--
|
||||
}
|
||||
if count > 0 {
|
||||
t.Fatalf("could not mark %d pods ready=%t", count, ready)
|
||||
}
|
||||
}
|
||||
|
||||
func currentDSHash(dsc *daemonSetsController, ds *apps.DaemonSet) (string, error) {
|
||||
// Construct histories of the DaemonSet, and get the hash of current history
|
||||
cur, _, err := dsc.constructHistory(ds)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return cur.Labels[apps.DefaultDaemonSetUniqueLabelKey], nil
|
||||
|
||||
}
|
||||
|
||||
func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
|
||||
ds := newDaemonSet("foo")
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
@@ -163,12 +356,34 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
}
|
||||
|
||||
func newUpdateSurge(value intstr.IntOrString) apps.DaemonSetUpdateStrategy {
|
||||
zero := intstr.FromInt(0)
|
||||
return apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: &zero,
|
||||
MaxSurge: &value,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newUpdateUnavailable(value intstr.IntOrString) apps.DaemonSetUpdateStrategy {
|
||||
return apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: &value,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetUnavailableNumbers(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
Manager *daemonSetsController
|
||||
ds *apps.DaemonSet
|
||||
nodeToPods map[string][]*v1.Pod
|
||||
enableSurge bool
|
||||
maxSurge int
|
||||
maxUnavailable int
|
||||
numUnavailable int
|
||||
Err error
|
||||
@@ -184,8 +399,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(0)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(0))
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: make(map[string][]*v1.Pod),
|
||||
@@ -204,8 +418,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(1)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(1))
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
@@ -233,8 +446,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(0)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(0))
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
@@ -244,7 +456,32 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
mapping["node-0"] = []*v1.Pod{pod0}
|
||||
return mapping
|
||||
}(),
|
||||
maxUnavailable: 0,
|
||||
maxUnavailable: 1,
|
||||
numUnavailable: 1,
|
||||
},
|
||||
{
|
||||
name: "Two nodes, one node without pods, surge",
|
||||
Manager: func() *daemonSetsController {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(0))
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
mapping := make(map[string][]*v1.Pod)
|
||||
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
|
||||
markPodReady(pod0)
|
||||
mapping["node-0"] = []*v1.Pod{pod0}
|
||||
return mapping
|
||||
}(),
|
||||
maxUnavailable: 1,
|
||||
numUnavailable: 1,
|
||||
},
|
||||
{
|
||||
@@ -259,8 +496,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromString("50%")
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromString("50%"))
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
@@ -276,6 +512,66 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
maxUnavailable: 1,
|
||||
numUnavailable: 0,
|
||||
},
|
||||
{
|
||||
name: "Two nodes with pods, MaxUnavailable in percents, surge",
|
||||
Manager: func() *daemonSetsController {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromString("50%"))
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
mapping := make(map[string][]*v1.Pod)
|
||||
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
|
||||
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
|
||||
markPodReady(pod0)
|
||||
markPodReady(pod1)
|
||||
mapping["node-0"] = []*v1.Pod{pod0}
|
||||
mapping["node-1"] = []*v1.Pod{pod1}
|
||||
return mapping
|
||||
}(),
|
||||
enableSurge: true,
|
||||
maxSurge: 1,
|
||||
maxUnavailable: 0,
|
||||
numUnavailable: 0,
|
||||
},
|
||||
{
|
||||
name: "Two nodes with pods, MaxUnavailable is 100%, surge",
|
||||
Manager: func() *daemonSetsController {
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromString("100%"))
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
mapping := make(map[string][]*v1.Pod)
|
||||
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
|
||||
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
|
||||
markPodReady(pod0)
|
||||
markPodReady(pod1)
|
||||
mapping["node-0"] = []*v1.Pod{pod0}
|
||||
mapping["node-1"] = []*v1.Pod{pod1}
|
||||
return mapping
|
||||
}(),
|
||||
enableSurge: true,
|
||||
maxSurge: 2,
|
||||
maxUnavailable: 0,
|
||||
numUnavailable: 0,
|
||||
},
|
||||
{
|
||||
name: "Two nodes with pods, MaxUnavailable in percents, pod terminating",
|
||||
Manager: func() *daemonSetsController {
|
||||
@@ -288,8 +584,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
}(),
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromString("50%")
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromString("50%"))
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
@@ -310,20 +605,26 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
c.Manager.dsStore.Add(c.ds)
|
||||
nodeList, err := c.Manager.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
t.Fatalf("error listing nodes: %v", err)
|
||||
}
|
||||
maxUnavailable, numUnavailable, err := c.Manager.getUnavailableNumbers(c.ds, nodeList, c.nodeToPods)
|
||||
if err != nil && c.Err != nil {
|
||||
if c.Err != err {
|
||||
t.Errorf("Test case: %s. Expected error: %v but got: %v", c.name, c.Err, err)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, c.enableSurge)()
|
||||
|
||||
c.Manager.dsStore.Add(c.ds)
|
||||
nodeList, err := c.Manager.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
t.Fatalf("error listing nodes: %v", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
t.Errorf("Test case: %s. Unexpected error: %v", c.name, err)
|
||||
} else if maxUnavailable != c.maxUnavailable || numUnavailable != c.numUnavailable {
|
||||
t.Errorf("Test case: %s. Wrong values. maxUnavailable: %d, expected: %d, numUnavailable: %d. expected: %d", c.name, maxUnavailable, c.maxUnavailable, numUnavailable, c.numUnavailable)
|
||||
}
|
||||
maxSurge, maxUnavailable, numUnavailable, err := c.Manager.getUnavailableNumbers(c.ds, nodeList, c.nodeToPods)
|
||||
if err != nil && c.Err != nil {
|
||||
if c.Err != err {
|
||||
t.Fatalf("Expected error: %v but got: %v", c.Err, err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if maxSurge != c.maxSurge || maxUnavailable != c.maxUnavailable || numUnavailable != c.numUnavailable {
|
||||
t.Fatalf("Wrong values. maxSurge: %d, expected %d, maxUnavailable: %d, expected: %d, numUnavailable: %d. expected: %d", maxSurge, c.maxSurge, maxUnavailable, c.maxUnavailable, numUnavailable, c.numUnavailable)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user