Add ImageVolumeSource implementation

This patch adds the kubelet implementation of the image volume source
feature.

Signed-off-by: Sascha Grunert <sgrunert@redhat.com>
This commit is contained in:
Sascha Grunert
2024-06-24 11:31:06 +02:00
parent c4bd05df1c
commit 979863d15c
20 changed files with 563 additions and 76 deletions

View File

@@ -169,22 +169,10 @@ func calcRestartCountByLogDir(path string) (int, error) {
return restartCount, nil
}
// startContainer starts a container and returns a message indicates why it is failed on error.
// It starts the container through the following steps:
// * pull the image
// * create the container
// * start the container
// * run the post start lifecycle hooks (if applicable)
func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) {
container := spec.container
// Step 1: pull the image.
func (m *kubeGenericRuntimeManager) getPodRuntimeHandler(pod *v1.Pod) (podRuntimeHandler string, err error) {
// If RuntimeClassInImageCriAPI feature gate is enabled, pass runtimehandler
// information for the runtime class specified. If not runtime class is
// specified, then pass ""
podRuntimeHandler := ""
var err error
if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClassInImageCriAPI) {
if pod.Spec.RuntimeClassName != nil && *pod.Spec.RuntimeClassName != "" {
podRuntimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
@@ -195,7 +183,30 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
}
}
imageRef, msg, err := m.imagePuller.EnsureImageExists(ctx, pod, container, pullSecrets, podSandboxConfig, podRuntimeHandler)
return podRuntimeHandler, nil
}
// startContainer starts a container and returns a message indicates why it is failed on error.
// It starts the container through the following steps:
// * pull the image
// * create the container
// * start the container
// * run the post start lifecycle hooks (if applicable)
func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string, imageVolumes kubecontainer.ImageVolumes) (string, error) {
container := spec.container
// Step 1: pull the image.
podRuntimeHandler, err := m.getPodRuntimeHandler(pod)
if err != nil {
return "", err
}
ref, err := kubecontainer.GenerateContainerRef(pod, container)
if err != nil {
klog.ErrorS(err, "Couldn't make a ref to pod", "pod", klog.KObj(pod), "containerName", container.Name)
}
imageRef, msg, err := m.imagePuller.EnsureImageExists(ctx, ref, pod, container.Image, pullSecrets, podSandboxConfig, podRuntimeHandler, container.ImagePullPolicy)
if err != nil {
s, _ := grpcstatus.FromError(err)
m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
@@ -234,7 +245,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
return s.Message(), ErrCreateContainerConfig
}
containerConfig, cleanupAction, err := m.generateContainerConfig(ctx, container, pod, restartCount, podIP, imageRef, podIPs, target)
containerConfig, cleanupAction, err := m.generateContainerConfig(ctx, container, pod, restartCount, podIP, imageRef, podIPs, target, imageVolumes)
if cleanupAction != nil {
defer cleanupAction()
}
@@ -317,8 +328,8 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
}
// generateContainerConfig generates container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateContainerConfig(ctx context.Context, container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID) (*runtimeapi.ContainerConfig, func(), error) {
opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(ctx, pod, container, podIP, podIPs)
func (m *kubeGenericRuntimeManager) generateContainerConfig(ctx context.Context, container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID, imageVolumes kubecontainer.ImageVolumes) (*runtimeapi.ContainerConfig, func(), error) {
opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(ctx, pod, container, podIP, podIPs, imageVolumes)
if err != nil {
return nil, nil, err
}
@@ -436,6 +447,7 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
SelinuxRelabel: selinuxRelabel,
Propagation: v.Propagation,
RecursiveReadOnly: v.RecursiveReadOnly,
Image: v.Image,
}
volumeMounts = append(volumeMounts, mount)
@@ -651,6 +663,18 @@ func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName strin
cStatus.ExitCode = int(status.ExitCode)
cStatus.FinishedAt = time.Unix(0, status.FinishedAt)
}
for _, mount := range status.Mounts {
cStatus.Mounts = append(cStatus.Mounts, kubecontainer.Mount{
HostPath: mount.HostPath,
ContainerPath: mount.ContainerPath,
ReadOnly: mount.Readonly,
RecursiveReadOnly: mount.RecursiveReadOnly,
SELinuxRelabel: mount.SelinuxRelabel,
Propagation: mount.Propagation,
Image: mount.Image,
})
}
return cStatus
}