delete volumes before pod deletion

This commit is contained in:
David Ashpole
2017-02-07 16:59:39 -08:00
parent 74cf0484c3
commit 67cb2704c5
18 changed files with 165 additions and 29 deletions

View File

@@ -67,6 +67,7 @@ type manager struct {
// Map from (mirror) pod UID to latest status version successfully sent to the API server.
// apiStatusVersions must only be accessed from the sync thread.
apiStatusVersions map[types.UID]uint64
podDeletionSafety PodDeletionSafetyProvider
}
// PodStatusProvider knows how to provide status for a pod. It's intended to be used by other components
@@ -77,6 +78,12 @@ type PodStatusProvider interface {
GetPodStatus(uid types.UID) (v1.PodStatus, bool)
}
// An object which provides guarantees that a pod can be saftely deleted.
type PodDeletionSafetyProvider interface {
// A function which returns true if the pod can safely be deleted
OkToDeletePod(pod *v1.Pod) bool
}
// Manager is the Source of truth for kubelet pod status, and should be kept up-to-date with
// the latest v1.PodStatus. It also syncs updates back to the API server.
type Manager interface {
@@ -103,13 +110,14 @@ type Manager interface {
const syncPeriod = 10 * time.Second
func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager) Manager {
func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager, podDeletionSafety PodDeletionSafetyProvider) Manager {
return &manager{
kubeClient: kubeClient,
podManager: podManager,
podStatuses: make(map[types.UID]versionedPodStatus),
podStatusChannel: make(chan podStatusSyncRequest, 1000), // Buffer up to 1000 statuses
apiStatusVersions: make(map[types.UID]uint64),
podDeletionSafety: podDeletionSafety,
}
}
@@ -381,7 +389,7 @@ func (m *manager) syncBatch() {
}
syncedUID = mirrorUID
}
if m.needsUpdate(syncedUID, status) {
if m.needsUpdate(syncedUID, status) || m.couldBeDeleted(uid, status.status) {
updatedStatuses = append(updatedStatuses, podStatusSyncRequest{uid, status})
} else if m.needsReconcile(uid, status.status) {
// Delete the apiStatusVersions here to force an update on the pod status
@@ -434,11 +442,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
// We don't handle graceful deletion of mirror pods.
return
}
if pod.DeletionTimestamp == nil {
return
}
if !notRunning(pod.Status.ContainerStatuses) {
glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod))
if !m.podDeletionSafety.OkToDeletePod(pod) {
return
}
deleteOptions := metav1.NewDeleteOptions(0)
@@ -463,6 +467,15 @@ func (m *manager) needsUpdate(uid types.UID, status versionedPodStatus) bool {
return !ok || latest < status.version
}
func (m *manager) couldBeDeleted(uid types.UID, status v1.PodStatus) bool {
// The pod could be a static pod, so we should translate first.
pod, ok := m.podManager.GetPodByUID(uid)
if !ok {
return false
}
return !kubepod.IsMirrorPod(pod) && m.podDeletionSafety.OkToDeletePod(pod)
}
// needsReconcile compares the given status with the status in the pod manager (which
// in fact comes from apiserver), returns whether the status needs to be reconciled with
// the apiserver. Now when pod status is inconsistent between apiserver and kubelet,
@@ -563,17 +576,6 @@ func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus {
return status
}
// notRunning returns true if every status is terminated or waiting, or the status list
// is empty.
func notRunning(statuses []v1.ContainerStatus) bool {
for _, status := range statuses {
if status.State.Terminated == nil && status.State.Waiting == nil {
return false
}
}
return true
}
func copyStatus(source *v1.PodStatus) (v1.PodStatus, error) {
clone, err := api.Scheme.DeepCopy(source)
if err != nil {