adding pods to MarkPodsNotReady parameters
This commit is contained in:
@@ -22,7 +22,6 @@ import (
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -33,7 +32,6 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appsv1listers "k8s.io/client-go/listers/apps/v1"
|
||||
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
nodepkg "k8s.io/kubernetes/pkg/util/node"
|
||||
@@ -111,19 +109,13 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
|
||||
return updatedPod, nil
|
||||
}
|
||||
|
||||
// MarkAllPodsNotReady updates ready status of all pods running on
|
||||
// MarkPodsNotReady updates ready status of given pods running on
|
||||
// given node from master return true if success
|
||||
func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
|
||||
nodeName := node.Name
|
||||
func MarkPodsNotReady(kubeClient clientset.Interface, pods []v1.Pod, nodeName string) error {
|
||||
klog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
||||
opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
|
||||
pods, err := kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
errMsg := []string{}
|
||||
for _, pod := range pods.Items {
|
||||
for _, pod := range pods {
|
||||
// Defensive check, also needed for tests.
|
||||
if pod.Spec.NodeName != nodeName {
|
||||
continue
|
||||
|
Reference in New Issue
Block a user