Fix some assertions for integ tests

Signed-off-by: Henry Wang <henwang@amazon.com>
This commit is contained in:
Henry Wang 2023-10-01 07:35:22 +00:00
parent 3a3d5dee15
commit 0d76fe5c1d
7 changed files with 38 additions and 38 deletions

View File

@ -252,7 +252,7 @@ func TestContainerListStatsWithIdFilter(t *testing.T) {
t.Logf("Verify container stats for %s", id)
for _, s := range stats {
require.Equal(t, s.GetAttributes().GetId(), id)
require.Equal(t, id, s.GetAttributes().GetId())
testStats(t, s, containerConfigMap[id])
}
}
@ -406,9 +406,9 @@ func testStats(t *testing.T,
require.NotEmpty(t, s.GetAttributes().GetId())
require.NotEmpty(t, s.GetAttributes().GetMetadata())
require.NotEmpty(t, s.GetAttributes().GetAnnotations())
require.Equal(t, s.GetAttributes().GetLabels(), config.Labels)
require.Equal(t, s.GetAttributes().GetAnnotations(), config.Annotations)
require.Equal(t, s.GetAttributes().GetMetadata().Name, config.Metadata.Name)
require.Equal(t, config.Labels, s.GetAttributes().GetLabels())
require.Equal(t, config.Annotations, s.GetAttributes().GetAnnotations())
require.Equal(t, config.Metadata.Name, s.GetAttributes().GetMetadata().Name)
require.NotEmpty(t, s.GetAttributes().GetLabels())
require.NotEmpty(t, s.GetCpu().GetTimestamp())
require.NotEmpty(t, s.GetCpu().GetUsageCoreNanoSeconds().GetValue())

View File

@ -67,7 +67,7 @@ func TestSharedPidMultiProcessContainerStop(t *testing.T) {
t.Log("The container state should be exited")
s, err := runtimeService.ContainerStatus(cn)
require.NoError(t, err)
assert.Equal(t, s.GetState(), runtime.ContainerState_CONTAINER_EXITED)
assert.Equal(t, runtime.ContainerState_CONTAINER_EXITED, s.GetState())
})
}
}
@ -126,5 +126,5 @@ func TestContainerStopCancellation(t *testing.T) {
t.Log("The container state should be exited")
s, err := runtimeService.ContainerStatus(cn)
require.NoError(t, err)
assert.Equal(t, s.GetState(), runtime.ContainerState_CONTAINER_EXITED)
assert.Equal(t, runtime.ContainerState_CONTAINER_EXITED, s.GetState())
}

View File

@ -53,7 +53,7 @@ func TestContainerLifecycleWithoutImageRef(t *testing.T) {
t.Log("Container status should be running")
status, err := runtimeService.ContainerStatus(cn)
require.NoError(t, err)
assert.Equal(t, status.GetState(), runtime.ContainerState_CONTAINER_RUNNING)
assert.Equal(t, runtime.ContainerState_CONTAINER_RUNNING, status.GetState())
t.Logf("Stop container")
err = runtimeService.StopContainer(cn, 1)
@ -62,5 +62,5 @@ func TestContainerLifecycleWithoutImageRef(t *testing.T) {
t.Log("Container status should be exited")
status, err = runtimeService.ContainerStatus(cn)
require.NoError(t, err)
assert.Equal(t, status.GetState(), runtime.ContainerState_CONTAINER_EXITED)
assert.Equal(t, runtime.ContainerState_CONTAINER_EXITED, status.GetState())
}

View File

@ -117,18 +117,18 @@ func TestContainerdImage(t *testing.T) {
t.Logf("the image should be marked as managed")
imgByRef, err := containerdClient.GetImage(ctx, testImage)
assert.NoError(t, err)
assert.Equal(t, imgByRef.Labels()["io.cri-containerd.image"], "managed")
assert.Equal(t, "managed", imgByRef.Labels()["io.cri-containerd.image"])
t.Logf("the image id should be created and managed")
imgByID, err := containerdClient.GetImage(ctx, id)
assert.NoError(t, err)
assert.Equal(t, imgByID.Labels()["io.cri-containerd.image"], "managed")
assert.Equal(t, "managed", imgByID.Labels()["io.cri-containerd.image"])
t.Logf("the image should be labeled")
img, err := containerdClient.GetImage(ctx, testImage)
assert.NoError(t, err)
assert.Equal(t, img.Labels()["foo"], "bar")
assert.Equal(t, img.Labels()[labels.ImageLabelKey], labels.ImageLabelValue)
assert.Equal(t, "bar", img.Labels()["foo"])
assert.Equal(t, labels.ImageLabelValue, img.Labels()[labels.ImageLabelKey])
t.Logf("the image should be pinned")
i, err = imageService.ImageStatus(&runtime.ImageSpec{Image: testImage})
@ -225,7 +225,7 @@ func TestContainerdSandboxImage(t *testing.T) {
pauseImg, err := containerdClient.GetImage(ctx, pauseImage)
require.NoError(t, err)
t.Log("ensure correct labels are set on pause image")
assert.Equal(t, pauseImg.Labels()["io.cri-containerd.pinned"], "pinned")
assert.Equal(t, "pinned", pauseImg.Labels()["io.cri-containerd.pinned"])
t.Log("pause image should be seen by cri plugin")
pimg, err := imageService.ImageStatus(&runtime.ImageSpec{Image: pauseImage})

View File

@ -271,12 +271,12 @@ func testCRIImagePullTimeoutByNoDataTransferred(t *testing.T) {
},
})
assert.Equal(t, errors.Unwrap(err), context.Canceled, "[%v] expected canceled error, but got (%v)", idx, err)
assert.Equal(t, mirrorSrv.limiter.clearHitCircuitBreaker(), true, "[%v] expected to hit circuit breaker", idx)
assert.Equal(t, context.Canceled, errors.Unwrap(err), "[%v] expected canceled error, but got (%v)", idx, err)
assert.True(t, mirrorSrv.limiter.clearHitCircuitBreaker(), "[%v] expected to hit circuit breaker", idx)
// cleanup the temp data by sync delete
lid, ok := leases.FromContext(dctx)
assert.Equal(t, ok, true)
assert.True(t, ok)
err = cli.LeasesService().Delete(ctx, leases.Lease{ID: lid}, leases.SynchronousDelete)
assert.NoError(t, err)
}

View File

@ -67,7 +67,7 @@ func TestRunPodSandboxWithSetupCNIFailure(t *testing.T) {
t.Logf("Create a sandbox")
_, err := runtimeService.RunPodSandbox(sbConfig, failpointRuntimeHandler)
require.Error(t, err)
require.Equal(t, true, strings.Contains(err.Error(), "you-shall-not-pass!"))
require.ErrorContains(t, err, "you-shall-not-pass!")
t.Logf("Retry to create sandbox with same config")
sb, err := runtimeService.RunPodSandbox(sbConfig, failpointRuntimeHandler)
@ -95,7 +95,7 @@ func TestRunPodSandboxWithShimStartFailure(t *testing.T) {
t.Logf("Create a sandbox")
_, err := runtimeService.RunPodSandbox(sbConfig, failpointRuntimeHandler)
require.Error(t, err)
require.Equal(t, true, strings.Contains(err.Error(), "no hard feelings"))
require.ErrorContains(t, err, "no hard feelings")
}
// TestRunPodSandboxWithShimDeleteFailure should keep the sandbox record if
@ -130,16 +130,16 @@ func TestRunPodSandboxWithShimDeleteFailure(t *testing.T) {
require.Len(t, l, 1)
sb := l[0]
require.Equal(t, sb.State, criapiv1.PodSandboxState_SANDBOX_NOTREADY)
require.Equal(t, sb.Metadata.Name, sbConfig.Metadata.Name)
require.Equal(t, sb.Metadata.Namespace, sbConfig.Metadata.Namespace)
require.Equal(t, sb.Metadata.Uid, sbConfig.Metadata.Uid)
require.Equal(t, sb.Metadata.Attempt, sbConfig.Metadata.Attempt)
require.Equal(t, criapiv1.PodSandboxState_SANDBOX_NOTREADY, sb.State)
require.Equal(t, sbConfig.Metadata.Name, sb.Metadata.Name)
require.Equal(t, sbConfig.Metadata.Namespace, sb.Metadata.Namespace)
require.Equal(t, sbConfig.Metadata.Uid, sb.Metadata.Uid)
require.Equal(t, sbConfig.Metadata.Attempt, sb.Metadata.Attempt)
t.Log("Check PodSandboxStatus")
sbStatus, err := runtimeService.PodSandboxStatus(sb.Id)
require.NoError(t, err)
require.Equal(t, sbStatus.State, criapiv1.PodSandboxState_SANDBOX_NOTREADY)
require.Equal(t, criapiv1.PodSandboxState_SANDBOX_NOTREADY, sbStatus.State)
require.Greater(t, len(sbStatus.Network.Ip), 0)
if restart {
@ -150,13 +150,13 @@ func TestRunPodSandboxWithShimDeleteFailure(t *testing.T) {
l, err = runtimeService.ListPodSandbox(&criapiv1.PodSandboxFilter{Id: sb.Id})
require.NoError(t, err)
require.Len(t, l, 1)
require.Equal(t, l[0].State, criapiv1.PodSandboxState_SANDBOX_NOTREADY)
require.Equal(t, criapiv1.PodSandboxState_SANDBOX_NOTREADY, l[0].State)
t.Log("Check PodSandboxStatus")
sbStatus, err := runtimeService.PodSandboxStatus(sb.Id)
require.NoError(t, err)
t.Log(sbStatus.Network)
require.Equal(t, sbStatus.State, criapiv1.PodSandboxState_SANDBOX_NOTREADY)
require.Equal(t, criapiv1.PodSandboxState_SANDBOX_NOTREADY, sbStatus.State)
}
t.Log("Cleanup leaky sandbox")
@ -206,11 +206,11 @@ func TestRunPodSandboxWithShimStartAndTeardownCNIFailure(t *testing.T) {
require.Len(t, l, 1)
sb := l[0]
require.Equal(t, sb.State, criapiv1.PodSandboxState_SANDBOX_NOTREADY)
require.Equal(t, sb.Metadata.Name, sbConfig.Metadata.Name)
require.Equal(t, sb.Metadata.Namespace, sbConfig.Metadata.Namespace)
require.Equal(t, sb.Metadata.Uid, sbConfig.Metadata.Uid)
require.Equal(t, sb.Metadata.Attempt, sbConfig.Metadata.Attempt)
require.Equal(t, criapiv1.PodSandboxState_SANDBOX_NOTREADY, sb.State)
require.Equal(t, sbConfig.Metadata.Name, sb.Metadata.Name)
require.Equal(t, sbConfig.Metadata.Namespace, sb.Metadata.Namespace)
require.Equal(t, sbConfig.Metadata.Uid, sb.Metadata.Uid)
require.Equal(t, sbConfig.Metadata.Attempt, sb.Metadata.Attempt)
if restart {
t.Log("Restart containerd")
@ -220,7 +220,7 @@ func TestRunPodSandboxWithShimStartAndTeardownCNIFailure(t *testing.T) {
l, err = runtimeService.ListPodSandbox(&criapiv1.PodSandboxFilter{Id: sb.Id})
require.NoError(t, err)
require.Len(t, l, 1)
require.Equal(t, l[0].State, criapiv1.PodSandboxState_SANDBOX_NOTREADY)
require.Equal(t, criapiv1.PodSandboxState_SANDBOX_NOTREADY, l[0].State)
}
t.Log("Cleanup leaky sandbox")
@ -287,11 +287,11 @@ func TestRunPodSandboxAndTeardownCNISlow(t *testing.T) {
require.NoError(t, err)
}()
assert.Equal(t, sb.State, criapiv1.PodSandboxState_SANDBOX_NOTREADY)
assert.Equal(t, sb.Metadata.Name, sbConfig.Metadata.Name)
assert.Equal(t, sb.Metadata.Namespace, sbConfig.Metadata.Namespace)
assert.Equal(t, sb.Metadata.Uid, sbConfig.Metadata.Uid)
assert.Equal(t, sb.Metadata.Attempt, sbConfig.Metadata.Attempt)
assert.Equal(t, criapiv1.PodSandboxState_SANDBOX_NOTREADY, sb.State)
assert.Equal(t, sbConfig.Metadata.Name, sb.Metadata.Name)
assert.Equal(t, sbConfig.Metadata.Namespace, sb.Metadata.Namespace)
assert.Equal(t, sbConfig.Metadata.Uid, sb.Metadata.Uid)
assert.Equal(t, sbConfig.Metadata.Attempt, sb.Metadata.Attempt)
if os.Getenv("DISABLE_CRI_SANDBOXES") != "" {
// non-sbserver

View File

@ -42,7 +42,7 @@ func TestTruncIndex(t *testing.T) {
t.Logf("Get image status by truncindex, truncID: %s", imgTruncID)
res, err := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imgTruncID})
require.NoError(t, err)
require.NotEqual(t, nil, res)
require.NotNil(t, res)
assert.Equal(t, imgID, res.Id)
// TODO(yanxuean): for failure test case where there are two images with the same truncindex.