From 95d2a3b7c0a9d8e380a31744b7eba2d973e5e319 Mon Sep 17 00:00:00 2001 From: Abel Feng Date: Tue, 28 Nov 2023 14:55:51 +0800 Subject: [PATCH 1/2] upgrade: add cri upgrade test case Signed-off-by: Abel Feng --- integration/release_upgrade_linux_test.go | 90 ++++++++++++++++++++++- 1 file changed, 89 insertions(+), 1 deletion(-) diff --git a/integration/release_upgrade_linux_test.go b/integration/release_upgrade_linux_test.go index 228c440b6..66c94c276 100644 --- a/integration/release_upgrade_linux_test.go +++ b/integration/release_upgrade_linux_test.go @@ -49,6 +49,7 @@ func TestUpgrade(t *testing.T) { t.Run("recover", runUpgradeTestCase(previousReleaseBinDir, shouldRecoverAllThePodsAfterUpgrade)) t.Run("exec", runUpgradeTestCase(previousReleaseBinDir, execToExistingContainer)) + t.Run("manipulate", runUpgradeTestCase(previousReleaseBinDir, shouldManipulateContainersInPodAfterUpgrade)) // TODO: // Add stats/stop-existing-running-pods/... } @@ -195,7 +196,6 @@ func execToExistingContainer(t *testing.T, criRuntimeService cri.RuntimeService, t.Logf("Pulling image %q", busyboxImage) _, err := criImageService.PullImage(&criruntime.ImageSpec{Image: busyboxImage}, nil, nil) require.NoError(t, err) - t.Log("Create sandbox") sbConfig := PodSandboxConfig("sandbox", "running") sbConfig.LogDirectory = t.TempDir() @@ -258,6 +258,94 @@ func getFileSize(t *testing.T, filePath string) int64 { return st.Size() } +func shouldManipulateContainersInPodAfterUpgrade(t *testing.T, criRuntimeService cri.RuntimeService, criImageService cri.ImageManagerService) upgradeVerifyCaseFunc { + var busyboxImage = images.Get(images.BusyBox) + + t.Logf("Pulling image %q", busyboxImage) + _, err := criImageService.PullImage(&criruntime.ImageSpec{Image: busyboxImage}, nil, nil) + require.NoError(t, err) + + t.Log("Create a sandbox") + sbConfig := PodSandboxConfig("sandbox", "running-pod") + sb, err := criRuntimeService.RunPodSandbox(sbConfig, "") + require.NoError(t, err) + + t.Logf("Create a container config and run container in the pod") + containerConfig := ContainerConfig("running", busyboxImage, WithCommand("sleep", "1d")) + cn1, err := criRuntimeService.CreateContainer(sb, containerConfig, sbConfig) + require.NoError(t, err) + require.NoError(t, criRuntimeService.StartContainer(cn1)) + + t.Logf("Just create a container in the pod") + containerConfig = ContainerConfig("created", busyboxImage) + cn2, err := criRuntimeService.CreateContainer(sb, containerConfig, sbConfig) + require.NoError(t, err) + + t.Logf("Just create stopped container in the pod") + containerConfig = ContainerConfig("stopped", busyboxImage, WithCommand("sleep", "1d")) + cn3, err := criRuntimeService.CreateContainer(sb, containerConfig, sbConfig) + require.NoError(t, err) + require.NoError(t, criRuntimeService.StartContainer(cn3)) + require.NoError(t, criRuntimeService.StopContainer(cn3, 0)) + + return func(t *testing.T, criRuntimeService cri.RuntimeService) { + t.Log("Manipulating containers in the previous pod") + // For the running container, we get status and stats of it, + // exec and execsync in it, stop and remove it + status, err := criRuntimeService.ContainerStatus(cn1) + require.NoError(t, err) + assert.Equal(t, status.State, criruntime.ContainerState_CONTAINER_RUNNING) + _, err = criRuntimeService.ContainerStats(cn1) + require.NoError(t, err) + _, err = criRuntimeService.Exec(&criruntime.ExecRequest{ + ContainerId: cn1, + Cmd: []string{"/bin/sh"}, + Stderr: false, + Stdout: true, + Stdin: true, + Tty: true, + }) + require.NoError(t, err) + require.NoError(t, criRuntimeService.StopContainer(cn1, 0)) + status, err = criRuntimeService.ContainerStatus(cn1) + require.NoError(t, err) + assert.Equal(t, status.State, criruntime.ContainerState_CONTAINER_EXITED) + require.NoError(t, criRuntimeService.RemoveContainer(cn1)) + + // For the created container, we start it, stop it and remove it + status, err = criRuntimeService.ContainerStatus(cn2) + require.NoError(t, err) + assert.Equal(t, status.State, criruntime.ContainerState_CONTAINER_CREATED) + require.NoError(t, criRuntimeService.StartContainer(cn2)) + status, err = criRuntimeService.ContainerStatus(cn2) + require.NoError(t, err) + assert.Equal(t, status.State, criruntime.ContainerState_CONTAINER_RUNNING) + require.NoError(t, criRuntimeService.StopContainer(cn2, 0)) + status, err = criRuntimeService.ContainerStatus(cn2) + require.NoError(t, err) + assert.Equal(t, status.State, criruntime.ContainerState_CONTAINER_EXITED) + require.NoError(t, criRuntimeService.RemoveContainer(cn2)) + + // For the stopped container, we remove it + status, err = criRuntimeService.ContainerStatus(cn3) + require.NoError(t, err) + assert.Equal(t, status.State, criruntime.ContainerState_CONTAINER_EXITED) + require.NoError(t, criRuntimeService.RemoveContainer(cn3)) + + // Create a new container in the previous pod, start, stop, and remove it + t.Logf("Create a container config and run container in the previous pod") + containerConfig = ContainerConfig("runinpreviouspod", busyboxImage, WithCommand("sleep", "1d")) + cn4, err := criRuntimeService.CreateContainer(sb, containerConfig, sbConfig) + require.NoError(t, err) + require.NoError(t, criRuntimeService.StartContainer(cn4)) + status, err = criRuntimeService.ContainerStatus(cn4) + require.NoError(t, err) + assert.Equal(t, status.State, criruntime.ContainerState_CONTAINER_RUNNING) + require.NoError(t, criRuntimeService.StopContainer(cn4, 0)) + require.NoError(t, criRuntimeService.RemoveContainer(cn4)) + } +} + // cleanupPods deletes all the pods based on the cri.RuntimeService connection. func cleanupPods(t *testing.T, criRuntimeService cri.RuntimeService) { pods, err := criRuntimeService.ListPodSandbox(nil) From c0363754fb47fd50a9346f1377b43aee7122fad5 Mon Sep 17 00:00:00 2001 From: Abel Feng Date: Tue, 28 Nov 2023 20:14:48 +0800 Subject: [PATCH 2/2] sandbox: get runtime info from sandbox or container For backward compatibility, we should get runtimeInfo from sandbox in db, or get it from the sandbox container in db. Note that this is a temporary solution and we will remove the Container field in Sandbox in cri cache, and replace it with a SandboxInsantance of type containerd.Sandbox interface. Signed-off-by: Abel Feng --- pkg/cri/server/container_create.go | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/pkg/cri/server/container_create.go b/pkg/cri/server/container_create.go index 95571fc95..ebc71e8c5 100644 --- a/pkg/cri/server/container_create.go +++ b/pkg/cri/server/container_create.go @@ -263,14 +263,15 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta containerLabels := buildLabels(config.Labels, image.ImageSpec.Config.Labels, crilabels.ContainerKindContainer) - sandboxInfo, err := c.client.SandboxStore().Get(ctx, sandboxID) + // TODO the sandbox in the cache should hold this info + runtimeName, runtimeOption, err := c.runtimeInfo(ctx, sandboxID) if err != nil { - return nil, fmt.Errorf("unable to get sandbox %q metdata: %w", sandboxID, err) + return nil, fmt.Errorf("unable to get sandbox %q runtime info: %w", sandboxID, err) } opts = append(opts, containerd.WithSpec(spec, specOpts...), - containerd.WithRuntime(sandboxInfo.Runtime.Name, sandboxInfo.Runtime.Options), + containerd.WithRuntime(runtimeName, runtimeOption), containerd.WithContainerLabels(containerLabels), containerd.WithContainerExtension(crilabels.ContainerMetadataExtension, &meta), ) @@ -1055,3 +1056,16 @@ func (c *criService) linuxContainerMounts(sandboxID string, config *runtime.Cont } return mounts } + +func (c *criService) runtimeInfo(ctx context.Context, id string) (string, typeurl.Any, error) { + sandboxInfo, err := c.client.SandboxStore().Get(ctx, id) + if err == nil { + return sandboxInfo.Runtime.Name, sandboxInfo.Runtime.Options, nil + } + sandboxContainer, legacyErr := c.client.ContainerService().Get(ctx, id) + if legacyErr == nil { + return sandboxContainer.Runtime.Name, sandboxContainer.Runtime.Options, nil + } + + return "", nil, err +}