Create image reference cache.
Signed-off-by: Lantao Liu <lantaol@google.com>
This commit is contained in:
parent
cfdf872493
commit
953d67d250
@ -115,13 +115,9 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
|
|||||||
|
|
||||||
// Prepare container image snapshot. For container, the image should have
|
// Prepare container image snapshot. For container, the image should have
|
||||||
// been pulled before creating the container, so do not ensure the image.
|
// been pulled before creating the container, so do not ensure the image.
|
||||||
imageRef := config.GetImage().GetImage()
|
image, err := c.localResolve(config.GetImage().GetImage())
|
||||||
image, err := c.localResolve(ctx, imageRef)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to resolve image %q", imageRef)
|
return nil, errors.Wrapf(err, "failed to resolve image %q", config.GetImage().GetImage())
|
||||||
}
|
|
||||||
if image == nil {
|
|
||||||
return nil, errors.Errorf("image %q not found", imageRef)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run container using the same runtime with sandbox.
|
// Run container using the same runtime with sandbox.
|
||||||
|
@ -46,14 +46,15 @@ func (c *criService) ContainerStatus(ctx context.Context, r *runtime.ContainerSt
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to get image %q", imageRef)
|
return nil, errors.Wrapf(err, "failed to get image %q", imageRef)
|
||||||
}
|
}
|
||||||
if len(image.RepoTags) > 0 {
|
repoTags, repoDigests := parseImageReferences(image.References)
|
||||||
|
if len(repoTags) > 0 {
|
||||||
// Based on current behavior of dockershim, this field should be
|
// Based on current behavior of dockershim, this field should be
|
||||||
// image tag.
|
// image tag.
|
||||||
spec = &runtime.ImageSpec{Image: image.RepoTags[0]}
|
spec = &runtime.ImageSpec{Image: repoTags[0]}
|
||||||
}
|
}
|
||||||
if len(image.RepoDigests) > 0 {
|
if len(repoDigests) > 0 {
|
||||||
// Based on the CRI definition, this field will be consumed by user.
|
// Based on the CRI definition, this field will be consumed by user.
|
||||||
imageRef = image.RepoDigests[0]
|
imageRef = repoDigests[0]
|
||||||
}
|
}
|
||||||
status := toCRIContainerStatus(container, spec, imageRef)
|
status := toCRIContainerStatus(container, spec, imageRef)
|
||||||
info, err := toCRIContainerInfo(ctx, container, r.GetVerbose())
|
info, err := toCRIContainerInfo(ctx, container, r.GetVerbose())
|
||||||
|
@ -63,9 +63,11 @@ func getContainerStatusTestData() (*containerstore.Metadata, *containerstore.Sta
|
|||||||
StartedAt: startedAt,
|
StartedAt: startedAt,
|
||||||
}
|
}
|
||||||
image := &imagestore.Image{
|
image := &imagestore.Image{
|
||||||
ID: imageID,
|
ID: imageID,
|
||||||
RepoTags: []string{"test-image-repo-tag"},
|
References: []string{
|
||||||
RepoDigests: []string{"test-image-repo-digest"},
|
"gcr.io/library/busybox:latest",
|
||||||
|
"gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
expected := &runtime.ContainerStatus{
|
expected := &runtime.ContainerStatus{
|
||||||
Id: testID,
|
Id: testID,
|
||||||
@ -73,8 +75,8 @@ func getContainerStatusTestData() (*containerstore.Metadata, *containerstore.Sta
|
|||||||
State: runtime.ContainerState_CONTAINER_RUNNING,
|
State: runtime.ContainerState_CONTAINER_RUNNING,
|
||||||
CreatedAt: createdAt,
|
CreatedAt: createdAt,
|
||||||
StartedAt: startedAt,
|
StartedAt: startedAt,
|
||||||
Image: &runtime.ImageSpec{Image: "test-image-repo-tag"},
|
Image: &runtime.ImageSpec{Image: "gcr.io/library/busybox:latest"},
|
||||||
ImageRef: "test-image-repo-digest",
|
ImageRef: "gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
Reason: completeExitReason,
|
Reason: completeExitReason,
|
||||||
Labels: config.GetLabels(),
|
Labels: config.GetLabels(),
|
||||||
Annotations: config.GetAnnotations(),
|
Annotations: config.GetAnnotations(),
|
||||||
@ -120,7 +122,7 @@ func TestToCRIContainerStatus(t *testing.T) {
|
|||||||
expectedReason: errorExitReason,
|
expectedReason: errorExitReason,
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
metadata, status, image, expected := getContainerStatusTestData()
|
metadata, status, _, expected := getContainerStatusTestData()
|
||||||
// Update status with test case.
|
// Update status with test case.
|
||||||
status.FinishedAt = test.finishedAt
|
status.FinishedAt = test.finishedAt
|
||||||
status.ExitCode = test.exitCode
|
status.ExitCode = test.exitCode
|
||||||
@ -138,8 +140,8 @@ func TestToCRIContainerStatus(t *testing.T) {
|
|||||||
expected.ExitCode = test.exitCode
|
expected.ExitCode = test.exitCode
|
||||||
expected.Message = test.message
|
expected.Message = test.message
|
||||||
containerStatus := toCRIContainerStatus(container,
|
containerStatus := toCRIContainerStatus(container,
|
||||||
&runtime.ImageSpec{Image: image.RepoTags[0]},
|
expected.Image,
|
||||||
image.RepoDigests[0])
|
expected.ImageRef)
|
||||||
assert.Equal(t, expected, containerStatus, desc)
|
assert.Equal(t, expected, containerStatus, desc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -207,7 +209,8 @@ func TestContainerStatus(t *testing.T) {
|
|||||||
assert.NoError(t, c.containerStore.Add(container))
|
assert.NoError(t, c.containerStore.Add(container))
|
||||||
}
|
}
|
||||||
if test.imageExist {
|
if test.imageExist {
|
||||||
c.imageStore.Add(*image)
|
c.imageStore, err = imagestore.NewFakeStore([]imagestore.Image{*image})
|
||||||
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
resp, err := c.ContainerStatus(context.Background(), &runtime.ContainerStatusRequest{ContainerId: container.ID})
|
resp, err := c.ContainerStatus(context.Background(), &runtime.ContainerStatusRequest{ContainerId: container.ID})
|
||||||
if test.expectErr {
|
if test.expectErr {
|
||||||
|
@ -34,6 +34,7 @@ import (
|
|||||||
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
|
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
|
||||||
"github.com/containerd/cri/pkg/store"
|
"github.com/containerd/cri/pkg/store"
|
||||||
containerstore "github.com/containerd/cri/pkg/store/container"
|
containerstore "github.com/containerd/cri/pkg/store/container"
|
||||||
|
imagestore "github.com/containerd/cri/pkg/store/image"
|
||||||
sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
|
sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -49,6 +50,7 @@ const (
|
|||||||
type eventMonitor struct {
|
type eventMonitor struct {
|
||||||
containerStore *containerstore.Store
|
containerStore *containerstore.Store
|
||||||
sandboxStore *sandboxstore.Store
|
sandboxStore *sandboxstore.Store
|
||||||
|
imageStore *imagestore.Store
|
||||||
ch <-chan *events.Envelope
|
ch <-chan *events.Envelope
|
||||||
errCh <-chan error
|
errCh <-chan error
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
@ -76,12 +78,13 @@ type backOffQueue struct {
|
|||||||
|
|
||||||
// Create new event monitor. New event monitor will start subscribing containerd event. All events
|
// Create new event monitor. New event monitor will start subscribing containerd event. All events
|
||||||
// happen after it should be monitored.
|
// happen after it should be monitored.
|
||||||
func newEventMonitor(c *containerstore.Store, s *sandboxstore.Store) *eventMonitor {
|
func newEventMonitor(c *containerstore.Store, s *sandboxstore.Store, i *imagestore.Store) *eventMonitor {
|
||||||
// event subscribe doesn't need namespace.
|
// event subscribe doesn't need namespace.
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
return &eventMonitor{
|
return &eventMonitor{
|
||||||
containerStore: c,
|
containerStore: c,
|
||||||
sandboxStore: s,
|
sandboxStore: s,
|
||||||
|
imageStore: i,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
backOff: newBackOff(),
|
backOff: newBackOff(),
|
||||||
@ -93,12 +96,13 @@ func (em *eventMonitor) subscribe(subscriber events.Subscriber) {
|
|||||||
filters := []string{
|
filters := []string{
|
||||||
`topic=="/tasks/exit"`,
|
`topic=="/tasks/exit"`,
|
||||||
`topic=="/tasks/oom"`,
|
`topic=="/tasks/oom"`,
|
||||||
|
`topic~="/images/"`,
|
||||||
}
|
}
|
||||||
em.ch, em.errCh = subscriber.Subscribe(em.ctx, filters...)
|
em.ch, em.errCh = subscriber.Subscribe(em.ctx, filters...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertEvent(e *gogotypes.Any) (string, interface{}, error) {
|
func convertEvent(e *gogotypes.Any) (string, interface{}, error) {
|
||||||
containerID := ""
|
id := ""
|
||||||
evt, err := typeurl.UnmarshalAny(e)
|
evt, err := typeurl.UnmarshalAny(e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, errors.Wrap(err, "failed to unmarshalany")
|
return "", nil, errors.Wrap(err, "failed to unmarshalany")
|
||||||
@ -106,16 +110,22 @@ func convertEvent(e *gogotypes.Any) (string, interface{}, error) {
|
|||||||
|
|
||||||
switch evt.(type) {
|
switch evt.(type) {
|
||||||
case *eventtypes.TaskExit:
|
case *eventtypes.TaskExit:
|
||||||
containerID = evt.(*eventtypes.TaskExit).ContainerID
|
id = evt.(*eventtypes.TaskExit).ContainerID
|
||||||
case *eventtypes.TaskOOM:
|
case *eventtypes.TaskOOM:
|
||||||
containerID = evt.(*eventtypes.TaskOOM).ContainerID
|
id = evt.(*eventtypes.TaskOOM).ContainerID
|
||||||
|
case *eventtypes.ImageCreate:
|
||||||
|
id = evt.(*eventtypes.ImageCreate).Name
|
||||||
|
case *eventtypes.ImageUpdate:
|
||||||
|
id = evt.(*eventtypes.ImageUpdate).Name
|
||||||
|
case *eventtypes.ImageDelete:
|
||||||
|
id = evt.(*eventtypes.ImageDelete).Name
|
||||||
default:
|
default:
|
||||||
return "", nil, errors.New("unsupported event")
|
return "", nil, errors.New("unsupported event")
|
||||||
}
|
}
|
||||||
return containerID, evt, nil
|
return id, evt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// start starts the event monitor which monitors and handles all container events. It returns
|
// start starts the event monitor which monitors and handles all subscribed events. It returns
|
||||||
// an error channel for the caller to wait for stop errors from the event monitor.
|
// an error channel for the caller to wait for stop errors from the event monitor.
|
||||||
// start must be called after subscribe.
|
// start must be called after subscribe.
|
||||||
func (em *eventMonitor) start() <-chan error {
|
func (em *eventMonitor) start() <-chan error {
|
||||||
@ -130,19 +140,19 @@ func (em *eventMonitor) start() <-chan error {
|
|||||||
select {
|
select {
|
||||||
case e := <-em.ch:
|
case e := <-em.ch:
|
||||||
logrus.Debugf("Received containerd event timestamp - %v, namespace - %q, topic - %q", e.Timestamp, e.Namespace, e.Topic)
|
logrus.Debugf("Received containerd event timestamp - %v, namespace - %q, topic - %q", e.Timestamp, e.Namespace, e.Topic)
|
||||||
cID, evt, err := convertEvent(e.Event)
|
id, evt, err := convertEvent(e.Event)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithError(err).Errorf("Failed to convert event %+v", e)
|
logrus.WithError(err).Errorf("Failed to convert event %+v", e)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if em.backOff.isInBackOff(cID) {
|
if em.backOff.isInBackOff(id) {
|
||||||
logrus.Infof("Events for container %q is in backoff, enqueue event %+v", cID, evt)
|
logrus.Infof("Events for %q is in backoff, enqueue event %+v", id, evt)
|
||||||
em.backOff.enBackOff(cID, evt)
|
em.backOff.enBackOff(id, evt)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err := em.handleEvent(evt); err != nil {
|
if err := em.handleEvent(evt); err != nil {
|
||||||
logrus.WithError(err).Errorf("Failed to handle event %+v for container %s", evt, cID)
|
logrus.WithError(err).Errorf("Failed to handle event %+v for %s", evt, id)
|
||||||
em.backOff.enBackOff(cID, evt)
|
em.backOff.enBackOff(id, evt)
|
||||||
}
|
}
|
||||||
case err := <-em.errCh:
|
case err := <-em.errCh:
|
||||||
// Close errCh in defer directly if there is no error.
|
// Close errCh in defer directly if there is no error.
|
||||||
@ -152,13 +162,13 @@ func (em *eventMonitor) start() <-chan error {
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
case <-backOffCheckCh:
|
case <-backOffCheckCh:
|
||||||
cIDs := em.backOff.getExpiredContainers()
|
ids := em.backOff.getExpiredIDs()
|
||||||
for _, cID := range cIDs {
|
for _, id := range ids {
|
||||||
queue := em.backOff.deBackOff(cID)
|
queue := em.backOff.deBackOff(id)
|
||||||
for i, any := range queue.events {
|
for i, any := range queue.events {
|
||||||
if err := em.handleEvent(any); err != nil {
|
if err := em.handleEvent(any); err != nil {
|
||||||
logrus.WithError(err).Errorf("Failed to handle backOff event %+v for container %s", any, cID)
|
logrus.WithError(err).Errorf("Failed to handle backOff event %+v for %s", any, id)
|
||||||
em.backOff.reBackOff(cID, queue.events[i:], queue.duration)
|
em.backOff.reBackOff(id, queue.events[i:], queue.duration)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -230,6 +240,18 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to update container status for TaskOOM event")
|
return errors.Wrap(err, "failed to update container status for TaskOOM event")
|
||||||
}
|
}
|
||||||
|
case *eventtypes.ImageCreate:
|
||||||
|
e := any.(*eventtypes.ImageCreate)
|
||||||
|
logrus.Infof("ImageCreate event %+v", e)
|
||||||
|
return em.imageStore.Update(ctx, e.Name)
|
||||||
|
case *eventtypes.ImageUpdate:
|
||||||
|
e := any.(*eventtypes.ImageUpdate)
|
||||||
|
logrus.Infof("ImageUpdate event %+v", e)
|
||||||
|
return em.imageStore.Update(ctx, e.Name)
|
||||||
|
case *eventtypes.ImageDelete:
|
||||||
|
e := any.(*eventtypes.ImageDelete)
|
||||||
|
logrus.Infof("ImageDelete event %+v", e)
|
||||||
|
return em.imageStore.Update(ctx, e.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -331,14 +353,14 @@ func newBackOff() *backOff {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *backOff) getExpiredContainers() []string {
|
func (b *backOff) getExpiredIDs() []string {
|
||||||
var containers []string
|
var ids []string
|
||||||
for c, q := range b.queuePool {
|
for id, q := range b.queuePool {
|
||||||
if q.isExpire() {
|
if q.isExpire() {
|
||||||
containers = append(containers, c)
|
ids = append(ids, id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return containers
|
return ids
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *backOff) isInBackOff(key string) bool {
|
func (b *backOff) isInBackOff(key string) bool {
|
||||||
|
@ -94,11 +94,11 @@ func TestBackOff(t *testing.T) {
|
|||||||
assert.Equal(t, actual.isInBackOff(notExistKey), false)
|
assert.Equal(t, actual.isInBackOff(notExistKey), false)
|
||||||
|
|
||||||
t.Logf("No containers should be expired")
|
t.Logf("No containers should be expired")
|
||||||
assert.Empty(t, actual.getExpiredContainers())
|
assert.Empty(t, actual.getExpiredIDs())
|
||||||
|
|
||||||
t.Logf("Should be able to get all keys which are expired for backOff")
|
t.Logf("Should be able to get all keys which are expired for backOff")
|
||||||
testClock.Sleep(backOffInitDuration)
|
testClock.Sleep(backOffInitDuration)
|
||||||
actKeyList := actual.getExpiredContainers()
|
actKeyList := actual.getExpiredIDs()
|
||||||
assert.Equal(t, len(inputQueues), len(actKeyList))
|
assert.Equal(t, len(inputQueues), len(actKeyList))
|
||||||
for k := range inputQueues {
|
for k := range inputQueues {
|
||||||
assert.Contains(t, actKeyList, k)
|
assert.Contains(t, actKeyList, k)
|
||||||
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@ -26,15 +25,11 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
|
||||||
"github.com/containerd/containerd/containers"
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/runtime/linux/runctypes"
|
"github.com/containerd/containerd/runtime/linux/runctypes"
|
||||||
"github.com/containerd/typeurl"
|
"github.com/containerd/typeurl"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
imagedigest "github.com/opencontainers/go-digest"
|
imagedigest "github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/identity"
|
|
||||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
"github.com/opencontainers/runtime-tools/generate"
|
||||||
"github.com/opencontainers/selinux/go-selinux"
|
"github.com/opencontainers/selinux/go-selinux"
|
||||||
@ -236,28 +231,25 @@ func getRepoDigestAndTag(namedRef reference.Named, digest imagedigest.Digest, sc
|
|||||||
return repoDigest, repoTag
|
return repoDigest, repoTag
|
||||||
}
|
}
|
||||||
|
|
||||||
// localResolve resolves image reference locally and returns corresponding image metadata. It returns
|
// localResolve resolves image reference locally and returns corresponding image metadata. It
|
||||||
// nil without error if the reference doesn't exist.
|
// returns store.ErrNotExist if the reference doesn't exist.
|
||||||
func (c *criService) localResolve(ctx context.Context, refOrID string) (*imagestore.Image, error) {
|
func (c *criService) localResolve(refOrID string) (imagestore.Image, error) {
|
||||||
getImageID := func(refOrId string) string {
|
getImageID := func(refOrId string) string {
|
||||||
if _, err := imagedigest.Parse(refOrID); err == nil {
|
if _, err := imagedigest.Parse(refOrID); err == nil {
|
||||||
return refOrID
|
return refOrID
|
||||||
}
|
}
|
||||||
return func(ref string) string {
|
return func(ref string) string {
|
||||||
// ref is not image id, try to resolve it locally.
|
// ref is not image id, try to resolve it locally.
|
||||||
|
// TODO(random-liu): Handle this error better for debugging.
|
||||||
normalized, err := util.NormalizeImageRef(ref)
|
normalized, err := util.NormalizeImageRef(ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
image, err := c.client.GetImage(ctx, normalized.String())
|
id, err := c.imageStore.Resolve(normalized.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
desc, err := image.Config(ctx)
|
return id
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return desc.Digest.String()
|
|
||||||
}(refOrID)
|
}(refOrID)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -266,14 +258,7 @@ func (c *criService) localResolve(ctx context.Context, refOrID string) (*imagest
|
|||||||
// Try to treat ref as imageID
|
// Try to treat ref as imageID
|
||||||
imageID = refOrID
|
imageID = refOrID
|
||||||
}
|
}
|
||||||
image, err := c.imageStore.Get(imageID)
|
return c.imageStore.Get(imageID)
|
||||||
if err != nil {
|
|
||||||
if err == store.ErrNotExist {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return nil, errors.Wrapf(err, "failed to get image %q", imageID)
|
|
||||||
}
|
|
||||||
return &image, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getUserFromImage gets uid or user name of the image user.
|
// getUserFromImage gets uid or user name of the image user.
|
||||||
@ -298,12 +283,12 @@ func getUserFromImage(user string) (*int64, string) {
|
|||||||
// ensureImageExists returns corresponding metadata of the image reference, if image is not
|
// ensureImageExists returns corresponding metadata of the image reference, if image is not
|
||||||
// pulled yet, the function will pull the image.
|
// pulled yet, the function will pull the image.
|
||||||
func (c *criService) ensureImageExists(ctx context.Context, ref string) (*imagestore.Image, error) {
|
func (c *criService) ensureImageExists(ctx context.Context, ref string) (*imagestore.Image, error) {
|
||||||
image, err := c.localResolve(ctx, ref)
|
image, err := c.localResolve(ref)
|
||||||
if err != nil {
|
if err != nil && err != store.ErrNotExist {
|
||||||
return nil, errors.Wrapf(err, "failed to resolve image %q", ref)
|
return nil, errors.Wrapf(err, "failed to get image %q", ref)
|
||||||
}
|
}
|
||||||
if image != nil {
|
if err == nil {
|
||||||
return image, nil
|
return &image, nil
|
||||||
}
|
}
|
||||||
// Pull image to ensure the image exists
|
// Pull image to ensure the image exists
|
||||||
resp, err := c.PullImage(ctx, &runtime.PullImageRequest{Image: &runtime.ImageSpec{Image: ref}})
|
resp, err := c.PullImage(ctx, &runtime.PullImageRequest{Image: &runtime.ImageSpec{Image: ref}})
|
||||||
@ -314,56 +299,11 @@ func (c *criService) ensureImageExists(ctx context.Context, ref string) (*images
|
|||||||
newImage, err := c.imageStore.Get(imageID)
|
newImage, err := c.imageStore.Get(imageID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// It's still possible that someone removed the image right after it is pulled.
|
// It's still possible that someone removed the image right after it is pulled.
|
||||||
return nil, errors.Wrapf(err, "failed to get image %q metadata after pulling", imageID)
|
return nil, errors.Wrapf(err, "failed to get image %q after pulling", imageID)
|
||||||
}
|
}
|
||||||
return &newImage, nil
|
return &newImage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// imageInfo is the information about the image got from containerd.
|
|
||||||
type imageInfo struct {
|
|
||||||
id string
|
|
||||||
chainID imagedigest.Digest
|
|
||||||
size int64
|
|
||||||
imagespec imagespec.Image
|
|
||||||
}
|
|
||||||
|
|
||||||
// getImageInfo gets image info from containerd.
|
|
||||||
func getImageInfo(ctx context.Context, image containerd.Image) (*imageInfo, error) {
|
|
||||||
// Get image information.
|
|
||||||
diffIDs, err := image.RootFS(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to get image diffIDs")
|
|
||||||
}
|
|
||||||
chainID := identity.ChainID(diffIDs)
|
|
||||||
|
|
||||||
size, err := image.Size(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to get image compressed resource size")
|
|
||||||
}
|
|
||||||
|
|
||||||
desc, err := image.Config(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to get image config descriptor")
|
|
||||||
}
|
|
||||||
id := desc.Digest.String()
|
|
||||||
|
|
||||||
rb, err := content.ReadBlob(ctx, image.ContentStore(), desc)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to read image config from content store")
|
|
||||||
}
|
|
||||||
var ociimage imagespec.Image
|
|
||||||
if err := json.Unmarshal(rb, &ociimage); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to unmarshal image config %s", rb)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &imageInfo{
|
|
||||||
id: id,
|
|
||||||
chainID: chainID,
|
|
||||||
size: size,
|
|
||||||
imagespec: ociimage,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func initSelinuxOpts(selinuxOpt *runtime.SELinuxOption) (string, string, error) {
|
func initSelinuxOpts(selinuxOpt *runtime.SELinuxOption) (string, string, error) {
|
||||||
if selinuxOpt == nil {
|
if selinuxOpt == nil {
|
||||||
return "", "", nil
|
return "", "", nil
|
||||||
@ -500,3 +440,21 @@ func (m orderedMounts) Swap(i, j int) {
|
|||||||
func (m orderedMounts) parts(i int) int {
|
func (m orderedMounts) parts(i int) int {
|
||||||
return strings.Count(filepath.Clean(m[i].ContainerPath), string(os.PathSeparator))
|
return strings.Count(filepath.Clean(m[i].ContainerPath), string(os.PathSeparator))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseImageReferences parses a list of arbitrary image references and returns
|
||||||
|
// the repotags and repodigests
|
||||||
|
func parseImageReferences(refs []string) ([]string, []string) {
|
||||||
|
var tags, digests []string
|
||||||
|
for _, ref := range refs {
|
||||||
|
parsed, err := reference.ParseAnyReference(ref)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := parsed.(reference.Canonical); ok {
|
||||||
|
digests = append(digests, parsed.String())
|
||||||
|
} else if _, ok := parsed.(reference.Tagged); ok {
|
||||||
|
tags = append(tags, parsed.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tags, digests
|
||||||
|
}
|
||||||
|
@ -29,6 +29,8 @@ import (
|
|||||||
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||||
|
|
||||||
criconfig "github.com/containerd/cri/pkg/config"
|
criconfig "github.com/containerd/cri/pkg/config"
|
||||||
|
"github.com/containerd/cri/pkg/store"
|
||||||
|
imagestore "github.com/containerd/cri/pkg/store/image"
|
||||||
"github.com/containerd/cri/pkg/util"
|
"github.com/containerd/cri/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -213,3 +215,58 @@ func TestOrderedMounts(t *testing.T) {
|
|||||||
sort.Stable(orderedMounts(mounts))
|
sort.Stable(orderedMounts(mounts))
|
||||||
assert.Equal(t, expected, mounts)
|
assert.Equal(t, expected, mounts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseImageReferences(t *testing.T) {
|
||||||
|
refs := []string{
|
||||||
|
"gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
"gcr.io/library/busybox:1.2",
|
||||||
|
"sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
"arbitrary-ref",
|
||||||
|
}
|
||||||
|
expectedTags := []string{
|
||||||
|
"gcr.io/library/busybox:1.2",
|
||||||
|
}
|
||||||
|
expectedDigests := []string{"gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582"}
|
||||||
|
tags, digests := parseImageReferences(refs)
|
||||||
|
assert.Equal(t, expectedTags, tags)
|
||||||
|
assert.Equal(t, expectedDigests, digests)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocalResolve(t *testing.T) {
|
||||||
|
image := imagestore.Image{
|
||||||
|
ID: "sha256:c75bebcdd211f41b3a460c7bf82970ed6c75acaab9cd4c9a4e125b03ca113799",
|
||||||
|
ChainID: "test-chain-id-1",
|
||||||
|
References: []string{
|
||||||
|
"docker.io/library/busybox:latest",
|
||||||
|
"docker.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
},
|
||||||
|
Size: 10,
|
||||||
|
}
|
||||||
|
c := newTestCRIService()
|
||||||
|
var err error
|
||||||
|
c.imageStore, err = imagestore.NewFakeStore([]imagestore.Image{image})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
for _, ref := range []string{
|
||||||
|
"sha256:c75bebcdd211f41b3a460c7bf82970ed6c75acaab9cd4c9a4e125b03ca113799",
|
||||||
|
"busybox",
|
||||||
|
"busybox:latest",
|
||||||
|
"busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
"library/busybox",
|
||||||
|
"library/busybox:latest",
|
||||||
|
"library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
"docker.io/busybox",
|
||||||
|
"docker.io/busybox:latest",
|
||||||
|
"docker.io/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
"docker.io/library/busybox",
|
||||||
|
"docker.io/library/busybox:latest",
|
||||||
|
"docker.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
} {
|
||||||
|
img, err := c.localResolve(ref)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, image, img)
|
||||||
|
}
|
||||||
|
img, err := c.localResolve("randomid")
|
||||||
|
assert.Equal(t, store.ErrNotExist, err)
|
||||||
|
assert.Equal(t, imagestore.Image{}, img)
|
||||||
|
}
|
||||||
|
@ -19,8 +19,6 @@ package server
|
|||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||||
|
|
||||||
imagestore "github.com/containerd/cri/pkg/store/image"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListImages lists existing images.
|
// ListImages lists existing images.
|
||||||
@ -38,19 +36,3 @@ func (c *criService) ListImages(ctx context.Context, r *runtime.ListImagesReques
|
|||||||
|
|
||||||
return &runtime.ListImagesResponse{Images: images}, nil
|
return &runtime.ListImagesResponse{Images: images}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// toCRIImage converts image to CRI image type.
|
|
||||||
func toCRIImage(image imagestore.Image) *runtime.Image {
|
|
||||||
runtimeImage := &runtime.Image{
|
|
||||||
Id: image.ID,
|
|
||||||
RepoTags: image.RepoTags,
|
|
||||||
RepoDigests: image.RepoDigests,
|
|
||||||
Size_: uint64(image.Size),
|
|
||||||
}
|
|
||||||
uid, username := getUserFromImage(image.ImageSpec.Config.User)
|
|
||||||
if uid != nil {
|
|
||||||
runtimeImage.Uid = &runtime.Int64Value{Value: *uid}
|
|
||||||
}
|
|
||||||
runtimeImage.Username = username
|
|
||||||
return runtimeImage
|
|
||||||
}
|
|
||||||
|
@ -32,11 +32,13 @@ func TestListImages(t *testing.T) {
|
|||||||
c := newTestCRIService()
|
c := newTestCRIService()
|
||||||
imagesInStore := []imagestore.Image{
|
imagesInStore := []imagestore.Image{
|
||||||
{
|
{
|
||||||
ID: "sha256:1123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
ID: "sha256:1123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||||
ChainID: "test-chainid-1",
|
ChainID: "test-chainid-1",
|
||||||
RepoTags: []string{"tag-a-1", "tag-b-1"},
|
References: []string{
|
||||||
RepoDigests: []string{"digest-a-1", "digest-b-1"},
|
"gcr.io/library/busybox:latest",
|
||||||
Size: 1000,
|
"gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
},
|
||||||
|
Size: 1000,
|
||||||
ImageSpec: imagespec.Image{
|
ImageSpec: imagespec.Image{
|
||||||
Config: imagespec.ImageConfig{
|
Config: imagespec.ImageConfig{
|
||||||
User: "root",
|
User: "root",
|
||||||
@ -44,11 +46,13 @@ func TestListImages(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "sha256:2123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
ID: "sha256:2123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||||
ChainID: "test-chainid-2",
|
ChainID: "test-chainid-2",
|
||||||
RepoTags: []string{"tag-a-2", "tag-b-2"},
|
References: []string{
|
||||||
RepoDigests: []string{"digest-a-2", "digest-b-2"},
|
"gcr.io/library/alpine:latest",
|
||||||
Size: 2000,
|
"gcr.io/library/alpine@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
},
|
||||||
|
Size: 2000,
|
||||||
ImageSpec: imagespec.Image{
|
ImageSpec: imagespec.Image{
|
||||||
Config: imagespec.ImageConfig{
|
Config: imagespec.ImageConfig{
|
||||||
User: "1234:1234",
|
User: "1234:1234",
|
||||||
@ -56,11 +60,13 @@ func TestListImages(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "sha256:3123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
ID: "sha256:3123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||||
ChainID: "test-chainid-3",
|
ChainID: "test-chainid-3",
|
||||||
RepoTags: []string{"tag-a-3", "tag-b-3"},
|
References: []string{
|
||||||
RepoDigests: []string{"digest-a-3", "digest-b-3"},
|
"gcr.io/library/ubuntu:latest",
|
||||||
Size: 3000,
|
"gcr.io/library/ubuntu@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
},
|
||||||
|
Size: 3000,
|
||||||
ImageSpec: imagespec.Image{
|
ImageSpec: imagespec.Image{
|
||||||
Config: imagespec.ImageConfig{
|
Config: imagespec.ImageConfig{
|
||||||
User: "nobody",
|
User: "nobody",
|
||||||
@ -71,30 +77,30 @@ func TestListImages(t *testing.T) {
|
|||||||
expect := []*runtime.Image{
|
expect := []*runtime.Image{
|
||||||
{
|
{
|
||||||
Id: "sha256:1123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
Id: "sha256:1123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||||
RepoTags: []string{"tag-a-1", "tag-b-1"},
|
RepoTags: []string{"gcr.io/library/busybox:latest"},
|
||||||
RepoDigests: []string{"digest-a-1", "digest-b-1"},
|
RepoDigests: []string{"gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582"},
|
||||||
Size_: uint64(1000),
|
Size_: uint64(1000),
|
||||||
Username: "root",
|
Username: "root",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Id: "sha256:2123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
Id: "sha256:2123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||||
RepoTags: []string{"tag-a-2", "tag-b-2"},
|
RepoTags: []string{"gcr.io/library/alpine:latest"},
|
||||||
RepoDigests: []string{"digest-a-2", "digest-b-2"},
|
RepoDigests: []string{"gcr.io/library/alpine@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582"},
|
||||||
Size_: uint64(2000),
|
Size_: uint64(2000),
|
||||||
Uid: &runtime.Int64Value{Value: 1234},
|
Uid: &runtime.Int64Value{Value: 1234},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Id: "sha256:3123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
Id: "sha256:3123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||||
RepoTags: []string{"tag-a-3", "tag-b-3"},
|
RepoTags: []string{"gcr.io/library/ubuntu:latest"},
|
||||||
RepoDigests: []string{"digest-a-3", "digest-b-3"},
|
RepoDigests: []string{"gcr.io/library/ubuntu@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582"},
|
||||||
Size_: uint64(3000),
|
Size_: uint64(3000),
|
||||||
Username: "nobody",
|
Username: "nobody",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, i := range imagesInStore {
|
var err error
|
||||||
c.imageStore.Add(i)
|
c.imageStore, err = imagestore.NewFakeStore(imagesInStore)
|
||||||
}
|
assert.NoError(t, err)
|
||||||
|
|
||||||
resp, err := c.ListImages(context.Background(), &runtime.ListImagesRequest{})
|
resp, err := c.ListImages(context.Background(), &runtime.ListImagesRequest{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
|
|
||||||
api "github.com/containerd/cri/pkg/api/v1"
|
api "github.com/containerd/cri/pkg/api/v1"
|
||||||
"github.com/containerd/cri/pkg/containerd/importer"
|
"github.com/containerd/cri/pkg/containerd/importer"
|
||||||
imagestore "github.com/containerd/cri/pkg/store/image"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// LoadImage loads a image into containerd.
|
// LoadImage loads a image into containerd.
|
||||||
@ -44,33 +43,11 @@ func (c *criService) LoadImage(ctx context.Context, r *api.LoadImageRequest) (*a
|
|||||||
return nil, errors.Wrap(err, "failed to import image")
|
return nil, errors.Wrap(err, "failed to import image")
|
||||||
}
|
}
|
||||||
for _, repoTag := range repoTags {
|
for _, repoTag := range repoTags {
|
||||||
image, err := c.client.GetImage(ctx, repoTag)
|
// Update image store to reflect the newest state in containerd.
|
||||||
if err != nil {
|
if err := c.imageStore.Update(ctx, repoTag); err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to get image %q", repoTag)
|
return nil, errors.Wrapf(err, "failed to update image store %q", repoTag)
|
||||||
}
|
}
|
||||||
info, err := getImageInfo(ctx, image)
|
logrus.Debugf("Imported image %q", repoTag)
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to get image %q info", repoTag)
|
|
||||||
}
|
|
||||||
id := info.id
|
|
||||||
|
|
||||||
if err := c.createImageReference(ctx, id, image.Target()); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to create image reference %q", id)
|
|
||||||
}
|
|
||||||
|
|
||||||
img := imagestore.Image{
|
|
||||||
ID: id,
|
|
||||||
RepoTags: []string{repoTag},
|
|
||||||
ChainID: info.chainID.String(),
|
|
||||||
Size: info.size,
|
|
||||||
ImageSpec: info.imagespec,
|
|
||||||
Image: image,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.imageStore.Add(img); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to add image %q into store", id)
|
|
||||||
}
|
|
||||||
logrus.Debugf("Imported image with id %q, repo tag %q", id, repoTag)
|
|
||||||
}
|
}
|
||||||
return &api.LoadImageResponse{Images: repoTags}, nil
|
return &api.LoadImageResponse{Images: repoTags}, nil
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,6 @@ import (
|
|||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||||
|
|
||||||
imagestore "github.com/containerd/cri/pkg/store/image"
|
|
||||||
"github.com/containerd/cri/pkg/util"
|
"github.com/containerd/cri/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -108,49 +107,34 @@ func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest)
|
|||||||
return nil, errors.Wrapf(err, "failed to pull and unpack image %q", ref)
|
return nil, errors.Wrapf(err, "failed to pull and unpack image %q", ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get image information.
|
configDesc, err := image.Config(ctx)
|
||||||
info, err := getImageInfo(ctx, image)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get image information")
|
return nil, errors.Wrap(err, "get image config descriptor")
|
||||||
}
|
}
|
||||||
imageID := info.id
|
imageID := configDesc.Digest.String()
|
||||||
|
|
||||||
repoDigest, repoTag := getRepoDigestAndTag(namedRef, image.Target().Digest, isSchema1)
|
repoDigest, repoTag := getRepoDigestAndTag(namedRef, image.Target().Digest, isSchema1)
|
||||||
for _, r := range []string{repoTag, repoDigest, imageID} {
|
for _, r := range []string{repoTag, repoDigest} {
|
||||||
if r == "" {
|
if r == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := c.createImageReference(ctx, r, image.Target()); err != nil {
|
if err := c.createImageReference(ctx, r, image.Target()); err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to update image reference %q", r)
|
return nil, errors.Wrapf(err, "failed to update image reference %q", r)
|
||||||
}
|
}
|
||||||
|
// Update image store to reflect the newest state in containerd.
|
||||||
|
if err := c.imageStore.Update(ctx, r); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to update image store %q", r)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Pulled image %q with image id %q, repo tag %q, repo digest %q", imageRef, imageID,
|
logrus.Debugf("Pulled image %q with image id %q, repo tag %q, repo digest %q", imageRef, imageID,
|
||||||
repoTag, repoDigest)
|
repoTag, repoDigest)
|
||||||
img := imagestore.Image{
|
|
||||||
ID: imageID,
|
|
||||||
ChainID: info.chainID.String(),
|
|
||||||
Size: info.size,
|
|
||||||
ImageSpec: info.imagespec,
|
|
||||||
Image: image,
|
|
||||||
}
|
|
||||||
if repoDigest != "" {
|
|
||||||
img.RepoDigests = []string{repoDigest}
|
|
||||||
}
|
|
||||||
if repoTag != "" {
|
|
||||||
img.RepoTags = []string{repoTag}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.imageStore.Add(img); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to add image %q into store", img.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE(random-liu): the actual state in containerd is the source of truth, even we maintain
|
// NOTE(random-liu): the actual state in containerd is the source of truth, even we maintain
|
||||||
// in-memory image store, it's only for in-memory indexing. The image could be removed
|
// in-memory image store, it's only for in-memory indexing. The image could be removed
|
||||||
// by someone else anytime, before/during/after we create the metadata. We should always
|
// by someone else anytime, before/during/after we create the metadata. We should always
|
||||||
// check the actual state in containerd before using the image or returning status of the
|
// check the actual state in containerd before using the image or returning status of the
|
||||||
// image.
|
// image.
|
||||||
return &runtime.PullImageResponse{ImageRef: img.ID}, nil
|
return &runtime.PullImageResponse{ImageRef: imageID}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseAuth parses AuthConfig and returns username and password/secret required by containerd.
|
// ParseAuth parses AuthConfig and returns username and password/secret required by containerd.
|
||||||
|
@ -20,9 +20,10 @@ import (
|
|||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||||
|
|
||||||
|
"github.com/containerd/cri/pkg/store"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RemoveImage removes the image.
|
// RemoveImage removes the image.
|
||||||
@ -32,62 +33,33 @@ import (
|
|||||||
// Remove the whole image no matter the it's image id or reference. This is the
|
// Remove the whole image no matter the it's image id or reference. This is the
|
||||||
// semantic defined in CRI now.
|
// semantic defined in CRI now.
|
||||||
func (c *criService) RemoveImage(ctx context.Context, r *runtime.RemoveImageRequest) (*runtime.RemoveImageResponse, error) {
|
func (c *criService) RemoveImage(ctx context.Context, r *runtime.RemoveImageRequest) (*runtime.RemoveImageResponse, error) {
|
||||||
image, err := c.localResolve(ctx, r.GetImage().GetImage())
|
image, err := c.localResolve(r.GetImage().GetImage())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == store.ErrNotExist {
|
||||||
|
// return empty without error when image not found.
|
||||||
|
return &runtime.RemoveImageResponse{}, nil
|
||||||
|
}
|
||||||
return nil, errors.Wrapf(err, "can not resolve %q locally", r.GetImage().GetImage())
|
return nil, errors.Wrapf(err, "can not resolve %q locally", r.GetImage().GetImage())
|
||||||
}
|
}
|
||||||
if image == nil {
|
|
||||||
// return empty without error when image not found.
|
|
||||||
return &runtime.RemoveImageResponse{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exclude outdated image tag.
|
// Remove all image references.
|
||||||
for i, tag := range image.RepoTags {
|
for i, ref := range image.References {
|
||||||
cImage, err := c.client.GetImage(ctx, tag)
|
var opts []images.DeleteOpt
|
||||||
if err != nil {
|
if i == len(image.References)-1 {
|
||||||
if errdefs.IsNotFound(err) {
|
// Delete the last image reference synchronously to trigger garbage collection.
|
||||||
continue
|
// This is best effort. It is possible that the image reference is deleted by
|
||||||
}
|
// someone else before this point.
|
||||||
return nil, errors.Wrapf(err, "failed to get image %q", tag)
|
opts = []images.DeleteOpt{images.SynchronousDelete()}
|
||||||
}
|
}
|
||||||
desc, err := cImage.Config(ctx)
|
err = c.client.ImageService().Delete(ctx, ref, opts...)
|
||||||
if err != nil {
|
|
||||||
// We can only get image id by reading Config from content.
|
|
||||||
// If the config is missing, we will fail to get image id,
|
|
||||||
// So we won't be able to remove the image forever,
|
|
||||||
// and the cri plugin always reports the image is ok.
|
|
||||||
// But we also don't check it by manifest,
|
|
||||||
// It's possible that two manifest digests have the same image ID in theory.
|
|
||||||
// In theory it's possible that an image is compressed with different algorithms,
|
|
||||||
// then they'll have the same uncompressed id - image id,
|
|
||||||
// but different ids generated from compressed contents - manifest digest.
|
|
||||||
// So we decide to leave it.
|
|
||||||
// After all, the user can override the repoTag by pulling image again.
|
|
||||||
logrus.WithError(err).Errorf("Can't remove image,failed to get config for Image tag %q,id %q", tag, image.ID)
|
|
||||||
image.RepoTags = append(image.RepoTags[:i], image.RepoTags[i+1:]...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cID := desc.Digest.String()
|
|
||||||
if cID != image.ID {
|
|
||||||
logrus.Debugf("Image tag %q for %q is outdated, it's currently used by %q", tag, image.ID, cID)
|
|
||||||
image.RepoTags = append(image.RepoTags[:i], image.RepoTags[i+1:]...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Include all image references, including RepoTag, RepoDigest and id.
|
|
||||||
for _, ref := range append(image.RepoTags, image.RepoDigests...) {
|
|
||||||
err = c.client.ImageService().Delete(ctx, ref)
|
|
||||||
if err == nil || errdefs.IsNotFound(err) {
|
if err == nil || errdefs.IsNotFound(err) {
|
||||||
|
// Update image store to reflect the newest state in containerd.
|
||||||
|
if err := c.imageStore.Update(ctx, ref); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to update image reference %q for %q", ref, image.ID)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, errors.Wrapf(err, "failed to delete image reference %q for image %q", ref, image.ID)
|
return nil, errors.Wrapf(err, "failed to delete image reference %q for %q", ref, image.ID)
|
||||||
}
|
}
|
||||||
// Delete image id synchronously to trigger garbage collection.
|
|
||||||
err = c.client.ImageService().Delete(ctx, image.ID, images.SynchronousDelete())
|
|
||||||
if err != nil && !errdefs.IsNotFound(err) {
|
|
||||||
return nil, errors.Wrapf(err, "failed to delete image id %q", image.ID)
|
|
||||||
}
|
|
||||||
c.imageStore.Delete(image.ID)
|
|
||||||
return &runtime.RemoveImageResponse{}, nil
|
return &runtime.RemoveImageResponse{}, nil
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||||
|
|
||||||
|
"github.com/containerd/cri/pkg/store"
|
||||||
imagestore "github.com/containerd/cri/pkg/store/image"
|
imagestore "github.com/containerd/cri/pkg/store/image"
|
||||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
@ -32,19 +33,19 @@ import (
|
|||||||
// TODO(random-liu): We should change CRI to distinguish image id and image spec. (See
|
// TODO(random-liu): We should change CRI to distinguish image id and image spec. (See
|
||||||
// kubernetes/kubernetes#46255)
|
// kubernetes/kubernetes#46255)
|
||||||
func (c *criService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequest) (*runtime.ImageStatusResponse, error) {
|
func (c *criService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequest) (*runtime.ImageStatusResponse, error) {
|
||||||
image, err := c.localResolve(ctx, r.GetImage().GetImage())
|
image, err := c.localResolve(r.GetImage().GetImage())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == store.ErrNotExist {
|
||||||
|
// return empty without error when image not found.
|
||||||
|
return &runtime.ImageStatusResponse{}, nil
|
||||||
|
}
|
||||||
return nil, errors.Wrapf(err, "can not resolve %q locally", r.GetImage().GetImage())
|
return nil, errors.Wrapf(err, "can not resolve %q locally", r.GetImage().GetImage())
|
||||||
}
|
}
|
||||||
if image == nil {
|
|
||||||
// return empty without error when image not found.
|
|
||||||
return &runtime.ImageStatusResponse{}, nil
|
|
||||||
}
|
|
||||||
// TODO(random-liu): [P0] Make sure corresponding snapshot exists. What if snapshot
|
// TODO(random-liu): [P0] Make sure corresponding snapshot exists. What if snapshot
|
||||||
// doesn't exist?
|
// doesn't exist?
|
||||||
|
|
||||||
runtimeImage := toCRIRuntimeImage(image)
|
runtimeImage := toCRIImage(image)
|
||||||
info, err := c.toCRIImageInfo(ctx, image, r.GetVerbose())
|
info, err := c.toCRIImageInfo(ctx, &image, r.GetVerbose())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to generate image info")
|
return nil, errors.Wrap(err, "failed to generate image info")
|
||||||
}
|
}
|
||||||
@ -55,12 +56,13 @@ func (c *criService) ImageStatus(ctx context.Context, r *runtime.ImageStatusRequ
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// toCRIRuntimeImage converts internal image object to CRI runtime.Image.
|
// toCRIImage converts internal image object to CRI runtime.Image.
|
||||||
func toCRIRuntimeImage(image *imagestore.Image) *runtime.Image {
|
func toCRIImage(image imagestore.Image) *runtime.Image {
|
||||||
|
repoTags, repoDigests := parseImageReferences(image.References)
|
||||||
runtimeImage := &runtime.Image{
|
runtimeImage := &runtime.Image{
|
||||||
Id: image.ID,
|
Id: image.ID,
|
||||||
RepoTags: image.RepoTags,
|
RepoTags: repoTags,
|
||||||
RepoDigests: image.RepoDigests,
|
RepoDigests: repoDigests,
|
||||||
Size_: uint64(image.Size),
|
Size_: uint64(image.Size),
|
||||||
}
|
}
|
||||||
uid, username := getUserFromImage(image.ImageSpec.Config.User)
|
uid, username := getUserFromImage(image.ImageSpec.Config.User)
|
||||||
|
@ -31,11 +31,13 @@ import (
|
|||||||
func TestImageStatus(t *testing.T) {
|
func TestImageStatus(t *testing.T) {
|
||||||
testID := "sha256:d848ce12891bf78792cda4a23c58984033b0c397a55e93a1556202222ecc5ed4"
|
testID := "sha256:d848ce12891bf78792cda4a23c58984033b0c397a55e93a1556202222ecc5ed4"
|
||||||
image := imagestore.Image{
|
image := imagestore.Image{
|
||||||
ID: testID,
|
ID: testID,
|
||||||
ChainID: "test-chain-id",
|
ChainID: "test-chain-id",
|
||||||
RepoTags: []string{"a", "b"},
|
References: []string{
|
||||||
RepoDigests: []string{"c", "d"},
|
"gcr.io/library/busybox:latest",
|
||||||
Size: 1234,
|
"gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
|
||||||
|
},
|
||||||
|
Size: 1234,
|
||||||
ImageSpec: imagespec.Image{
|
ImageSpec: imagespec.Image{
|
||||||
Config: imagespec.ImageConfig{
|
Config: imagespec.ImageConfig{
|
||||||
User: "user:group",
|
User: "user:group",
|
||||||
@ -44,8 +46,8 @@ func TestImageStatus(t *testing.T) {
|
|||||||
}
|
}
|
||||||
expected := &runtime.Image{
|
expected := &runtime.Image{
|
||||||
Id: testID,
|
Id: testID,
|
||||||
RepoTags: []string{"a", "b"},
|
RepoTags: []string{"gcr.io/library/busybox:latest"},
|
||||||
RepoDigests: []string{"c", "d"},
|
RepoDigests: []string{"gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582"},
|
||||||
Size_: uint64(1234),
|
Size_: uint64(1234),
|
||||||
Username: "user",
|
Username: "user",
|
||||||
}
|
}
|
||||||
@ -59,7 +61,8 @@ func TestImageStatus(t *testing.T) {
|
|||||||
require.NotNil(t, resp)
|
require.NotNil(t, resp)
|
||||||
assert.Nil(t, resp.GetImage())
|
assert.Nil(t, resp.GetImage())
|
||||||
|
|
||||||
c.imageStore.Add(image)
|
c.imageStore, err = imagestore.NewFakeStore([]imagestore.Image{image})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
t.Logf("should return correct image status for exist image")
|
t.Logf("should return correct image status for exist image")
|
||||||
resp, err = c.ImageStatus(context.Background(), &runtime.ImageStatusRequest{
|
resp, err = c.ImageStatus(context.Background(), &runtime.ImageStatusRequest{
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
containerdimages "github.com/containerd/containerd/images"
|
containerdimages "github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
"github.com/containerd/typeurl"
|
"github.com/containerd/typeurl"
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/docker/docker/pkg/system"
|
"github.com/docker/docker/pkg/system"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
@ -97,16 +96,7 @@ func (c *criService) recover(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to list images")
|
return errors.Wrap(err, "failed to list images")
|
||||||
}
|
}
|
||||||
images, err := loadImages(ctx, cImages, c.config.ContainerdConfig.Snapshotter)
|
loadImages(ctx, c.imageStore, cImages, c.config.ContainerdConfig.Snapshotter)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to load images")
|
|
||||||
}
|
|
||||||
for _, image := range images {
|
|
||||||
logrus.Debugf("Loaded image %+v", image)
|
|
||||||
if err := c.imageStore.Add(image); err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to add image %q to store", image.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// It's possible that containerd containers are deleted unexpectedly. In that case,
|
// It's possible that containerd containers are deleted unexpectedly. In that case,
|
||||||
// we can't even get metadata, we should cleanup orphaned sandbox/container directories
|
// we can't even get metadata, we should cleanup orphaned sandbox/container directories
|
||||||
@ -404,26 +394,9 @@ func loadSandbox(ctx context.Context, cntr containerd.Container) (sandboxstore.S
|
|||||||
}
|
}
|
||||||
|
|
||||||
// loadImages loads images from containerd.
|
// loadImages loads images from containerd.
|
||||||
// TODO(random-liu): Check whether image is unpacked, because containerd put image reference
|
func loadImages(ctx context.Context, store *imagestore.Store, cImages []containerd.Image,
|
||||||
// into store before image is unpacked.
|
snapshotter string) {
|
||||||
func loadImages(ctx context.Context, cImages []containerd.Image,
|
|
||||||
snapshotter string) ([]imagestore.Image, error) {
|
|
||||||
// Group images by image id.
|
|
||||||
imageMap := make(map[string][]containerd.Image)
|
|
||||||
for _, i := range cImages {
|
for _, i := range cImages {
|
||||||
desc, err := i.Config(ctx)
|
|
||||||
if err != nil {
|
|
||||||
logrus.WithError(err).Warnf("Failed to get image config for %q", i.Name())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
id := desc.Digest.String()
|
|
||||||
imageMap[id] = append(imageMap[id], i)
|
|
||||||
}
|
|
||||||
var images []imagestore.Image
|
|
||||||
for id, imgs := range imageMap {
|
|
||||||
// imgs len must be > 0, or else the entry will not be created in
|
|
||||||
// previous loop.
|
|
||||||
i := imgs[0]
|
|
||||||
ok, _, _, _, err := containerdimages.Check(ctx, i.ContentStore(), i.Target(), platforms.Default())
|
ok, _, _, _, err := containerdimages.Check(ctx, i.ContentStore(), i.Target(), platforms.Default())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithError(err).Errorf("Failed to check image content readiness for %q", i.Name())
|
logrus.WithError(err).Errorf("Failed to check image content readiness for %q", i.Name())
|
||||||
@ -436,48 +409,19 @@ func loadImages(ctx context.Context, cImages []containerd.Image,
|
|||||||
// Checking existence of top-level snapshot for each image being recovered.
|
// Checking existence of top-level snapshot for each image being recovered.
|
||||||
unpacked, err := i.IsUnpacked(ctx, snapshotter)
|
unpacked, err := i.IsUnpacked(ctx, snapshotter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithError(err).Warnf("Failed to Check whether image is unpacked for image %s", i.Name())
|
logrus.WithError(err).Warnf("Failed to check whether image is unpacked for image %s", i.Name())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !unpacked {
|
if !unpacked {
|
||||||
logrus.Warnf("The image %s is not unpacked.", i.Name())
|
logrus.Warnf("The image %s is not unpacked.", i.Name())
|
||||||
// TODO(random-liu): Consider whether we should try unpack here.
|
// TODO(random-liu): Consider whether we should try unpack here.
|
||||||
}
|
}
|
||||||
|
if err := store.Update(ctx, i.Name()); err != nil {
|
||||||
info, err := getImageInfo(ctx, i)
|
logrus.WithError(err).Warnf("Failed to update reference for image %q", i.Name())
|
||||||
if err != nil {
|
|
||||||
logrus.WithError(err).Warnf("Failed to get image info for %q", i.Name())
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
image := imagestore.Image{
|
logrus.Debugf("Loaded image %q", i.Name())
|
||||||
ID: id,
|
|
||||||
ChainID: info.chainID.String(),
|
|
||||||
Size: info.size,
|
|
||||||
ImageSpec: info.imagespec,
|
|
||||||
Image: i,
|
|
||||||
}
|
|
||||||
// Recover repo digests and repo tags.
|
|
||||||
for _, i := range imgs {
|
|
||||||
name := i.Name()
|
|
||||||
r, err := reference.ParseAnyReference(name)
|
|
||||||
if err != nil {
|
|
||||||
logrus.WithError(err).Warnf("Failed to parse image reference %q", name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, ok := r.(reference.Canonical); ok {
|
|
||||||
image.RepoDigests = append(image.RepoDigests, name)
|
|
||||||
} else if _, ok := r.(reference.Tagged); ok {
|
|
||||||
image.RepoTags = append(image.RepoTags, name)
|
|
||||||
} else if _, ok := r.(reference.Digested); ok {
|
|
||||||
// This is an image id.
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
logrus.Warnf("Invalid image reference %q", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
images = append(images, image)
|
|
||||||
}
|
}
|
||||||
return images, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanupOrphanedIDDirs(cntrs []containerd.Container, base string) error {
|
func cleanupOrphanedIDDirs(cntrs []containerd.Container, base string) error {
|
||||||
|
@ -113,7 +113,7 @@ func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIServi
|
|||||||
os: osinterface.RealOS{},
|
os: osinterface.RealOS{},
|
||||||
sandboxStore: sandboxstore.NewStore(),
|
sandboxStore: sandboxstore.NewStore(),
|
||||||
containerStore: containerstore.NewStore(),
|
containerStore: containerstore.NewStore(),
|
||||||
imageStore: imagestore.NewStore(),
|
imageStore: imagestore.NewStore(client),
|
||||||
snapshotStore: snapshotstore.NewStore(),
|
snapshotStore: snapshotstore.NewStore(),
|
||||||
sandboxNameIndex: registrar.NewRegistrar(),
|
sandboxNameIndex: registrar.NewRegistrar(),
|
||||||
containerNameIndex: registrar.NewRegistrar(),
|
containerNameIndex: registrar.NewRegistrar(),
|
||||||
@ -157,7 +157,7 @@ func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIServi
|
|||||||
return nil, errors.Wrap(err, "failed to create stream server")
|
return nil, errors.Wrap(err, "failed to create stream server")
|
||||||
}
|
}
|
||||||
|
|
||||||
c.eventMonitor = newEventMonitor(c.containerStore, c.sandboxStore)
|
c.eventMonitor = newEventMonitor(c.containerStore, c.sandboxStore, c.imageStore)
|
||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ func newTestCRIService() *criService {
|
|||||||
imageFSPath: testImageFSPath,
|
imageFSPath: testImageFSPath,
|
||||||
os: ostesting.NewFakeOS(),
|
os: ostesting.NewFakeOS(),
|
||||||
sandboxStore: sandboxstore.NewStore(),
|
sandboxStore: sandboxstore.NewStore(),
|
||||||
imageStore: imagestore.NewStore(),
|
imageStore: imagestore.NewStore(nil),
|
||||||
snapshotStore: snapshotstore.NewStore(),
|
snapshotStore: snapshotstore.NewStore(),
|
||||||
sandboxNameIndex: registrar.NewRegistrar(),
|
sandboxNameIndex: registrar.NewRegistrar(),
|
||||||
containerStore: containerstore.NewStore(),
|
containerStore: containerstore.NewStore(),
|
||||||
|
34
pkg/store/image/fake_image.go
Normal file
34
pkg/store/image/fake_image.go
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2018 The Containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package image
|
||||||
|
|
||||||
|
import "github.com/pkg/errors"
|
||||||
|
|
||||||
|
// NewFakeStore returns an image store with predefined images.
|
||||||
|
// Update is not allowed for this fake store.
|
||||||
|
func NewFakeStore(images []Image) (*Store, error) {
|
||||||
|
s := NewStore(nil)
|
||||||
|
for _, i := range images {
|
||||||
|
for _, ref := range i.References {
|
||||||
|
s.refCache[ref] = i.ID
|
||||||
|
}
|
||||||
|
if err := s.store.add(i); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "add image %q", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
@ -17,14 +17,21 @@ limitations under the License.
|
|||||||
package image
|
package image
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/docker/distribution/digestset"
|
"github.com/docker/distribution/digestset"
|
||||||
godigest "github.com/opencontainers/go-digest"
|
imagedigest "github.com/opencontainers/go-digest"
|
||||||
|
imageidentity "github.com/opencontainers/image-spec/identity"
|
||||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/containerd/cri/pkg/store"
|
storeutil "github.com/containerd/cri/pkg/store"
|
||||||
|
"github.com/containerd/cri/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Image contains all resources associated with the image. All fields
|
// Image contains all resources associated with the image. All fields
|
||||||
@ -32,10 +39,8 @@ import (
|
|||||||
type Image struct {
|
type Image struct {
|
||||||
// Id of the image. Normally the digest of image config.
|
// Id of the image. Normally the digest of image config.
|
||||||
ID string
|
ID string
|
||||||
// Other names by which this image is known.
|
// References are references to the image, e.g. RepoTag and RepoDigest.
|
||||||
RepoTags []string
|
References []string
|
||||||
// Digests by which this image is known.
|
|
||||||
RepoDigests []string
|
|
||||||
// ChainID is the chainID of the image.
|
// ChainID is the chainID of the image.
|
||||||
ChainID string
|
ChainID string
|
||||||
// Size is the compressed size of the image.
|
// Size is the compressed size of the image.
|
||||||
@ -48,28 +53,156 @@ type Image struct {
|
|||||||
|
|
||||||
// Store stores all images.
|
// Store stores all images.
|
||||||
type Store struct {
|
type Store struct {
|
||||||
|
lock sync.RWMutex
|
||||||
|
// refCache is a containerd image reference to image id cache.
|
||||||
|
refCache map[string]string
|
||||||
|
// client is the containerd client.
|
||||||
|
client *containerd.Client
|
||||||
|
// store is the internal image store indexed by image id.
|
||||||
|
store *store
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStore creates an image store.
|
||||||
|
func NewStore(client *containerd.Client) *Store {
|
||||||
|
return &Store{
|
||||||
|
refCache: make(map[string]string),
|
||||||
|
client: client,
|
||||||
|
store: &store{
|
||||||
|
images: make(map[string]Image),
|
||||||
|
digestSet: digestset.NewSet(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update updates cache for a reference.
|
||||||
|
func (s *Store) Update(ctx context.Context, ref string) error {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
i, err := s.client.GetImage(ctx, ref)
|
||||||
|
if err != nil && !errdefs.IsNotFound(err) {
|
||||||
|
return errors.Wrap(err, "get image from containerd")
|
||||||
|
}
|
||||||
|
var img *Image
|
||||||
|
if err == nil {
|
||||||
|
img, err = getImage(ctx, i)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "get image info from containerd")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s.update(ref, img)
|
||||||
|
}
|
||||||
|
|
||||||
|
// update updates the internal cache. img == nil means that
|
||||||
|
// the image does not exist in containerd.
|
||||||
|
func (s *Store) update(ref string, img *Image) error {
|
||||||
|
oldID, oldExist := s.refCache[ref]
|
||||||
|
if img == nil {
|
||||||
|
// The image reference doesn't exist in containerd.
|
||||||
|
if oldExist {
|
||||||
|
// Remove the reference from the store.
|
||||||
|
s.store.delete(oldID, ref)
|
||||||
|
delete(s.refCache, ref)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if oldExist {
|
||||||
|
if oldID == img.ID {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Updated. Remove tag from old image.
|
||||||
|
s.store.delete(oldID, ref)
|
||||||
|
}
|
||||||
|
// New image. Add new image.
|
||||||
|
s.refCache[ref] = img.ID
|
||||||
|
return s.store.add(*img)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getImage gets image information from containerd.
|
||||||
|
func getImage(ctx context.Context, i containerd.Image) (*Image, error) {
|
||||||
|
// Get image information.
|
||||||
|
diffIDs, err := i.RootFS(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "get image diffIDs")
|
||||||
|
}
|
||||||
|
chainID := imageidentity.ChainID(diffIDs)
|
||||||
|
|
||||||
|
size, err := i.Size(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "get image compressed resource size")
|
||||||
|
}
|
||||||
|
|
||||||
|
desc, err := i.Config(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "get image config descriptor")
|
||||||
|
}
|
||||||
|
id := desc.Digest.String()
|
||||||
|
|
||||||
|
rb, err := content.ReadBlob(ctx, i.ContentStore(), desc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "read image config from content store")
|
||||||
|
}
|
||||||
|
var ociimage imagespec.Image
|
||||||
|
if err := json.Unmarshal(rb, &ociimage); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "unmarshal image config %s", rb)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Image{
|
||||||
|
ID: id,
|
||||||
|
References: []string{i.Name()},
|
||||||
|
ChainID: chainID.String(),
|
||||||
|
Size: size,
|
||||||
|
ImageSpec: ociimage,
|
||||||
|
Image: i,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve resolves a image reference to image id.
|
||||||
|
func (s *Store) Resolve(ref string) (string, error) {
|
||||||
|
s.lock.RLock()
|
||||||
|
defer s.lock.RUnlock()
|
||||||
|
id, ok := s.refCache[ref]
|
||||||
|
if !ok {
|
||||||
|
return "", storeutil.ErrNotExist
|
||||||
|
}
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets image metadata by image id. The id can be truncated.
|
||||||
|
// Returns various validation errors if the image id is invalid.
|
||||||
|
// Returns storeutil.ErrNotExist if the image doesn't exist.
|
||||||
|
func (s *Store) Get(id string) (Image, error) {
|
||||||
|
return s.store.get(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List lists all images.
|
||||||
|
func (s *Store) List() []Image {
|
||||||
|
return s.store.list()
|
||||||
|
}
|
||||||
|
|
||||||
|
type store struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
images map[string]Image
|
images map[string]Image
|
||||||
digestSet *digestset.Set
|
digestSet *digestset.Set
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStore creates an image store.
|
func (s *store) list() []Image {
|
||||||
func NewStore() *Store {
|
s.lock.RLock()
|
||||||
return &Store{
|
defer s.lock.RUnlock()
|
||||||
images: make(map[string]Image),
|
var images []Image
|
||||||
digestSet: digestset.NewSet(),
|
for _, i := range s.images {
|
||||||
|
images = append(images, i)
|
||||||
}
|
}
|
||||||
|
return images
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add an image into the store.
|
func (s *store) add(img Image) error {
|
||||||
func (s *Store) Add(img Image) error {
|
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
if _, err := s.digestSet.Lookup(img.ID); err != nil {
|
if _, err := s.digestSet.Lookup(img.ID); err != nil {
|
||||||
if err != digestset.ErrDigestNotFound {
|
if err != digestset.ErrDigestNotFound {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := s.digestSet.Add(godigest.Digest(img.ID)); err != nil {
|
if err := s.digestSet.Add(imagedigest.Digest(img.ID)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -80,44 +213,29 @@ func (s *Store) Add(img Image) error {
|
|||||||
s.images[img.ID] = img
|
s.images[img.ID] = img
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Or else, merge the repo tags/digests.
|
// Or else, merge the references.
|
||||||
i.RepoTags = mergeStringSlices(i.RepoTags, img.RepoTags)
|
i.References = util.MergeStringSlices(i.References, img.References)
|
||||||
i.RepoDigests = mergeStringSlices(i.RepoDigests, img.RepoDigests)
|
|
||||||
s.images[img.ID] = i
|
s.images[img.ID] = i
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the image with specified id. Returns store.ErrNotExist if the
|
func (s *store) get(id string) (Image, error) {
|
||||||
// image doesn't exist.
|
|
||||||
func (s *Store) Get(id string) (Image, error) {
|
|
||||||
s.lock.RLock()
|
s.lock.RLock()
|
||||||
defer s.lock.RUnlock()
|
defer s.lock.RUnlock()
|
||||||
digest, err := s.digestSet.Lookup(id)
|
digest, err := s.digestSet.Lookup(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == digestset.ErrDigestNotFound {
|
if err == digestset.ErrDigestNotFound {
|
||||||
err = store.ErrNotExist
|
err = storeutil.ErrNotExist
|
||||||
}
|
}
|
||||||
return Image{}, err
|
return Image{}, err
|
||||||
}
|
}
|
||||||
if i, ok := s.images[digest.String()]; ok {
|
if i, ok := s.images[digest.String()]; ok {
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
return Image{}, store.ErrNotExist
|
return Image{}, storeutil.ErrNotExist
|
||||||
}
|
}
|
||||||
|
|
||||||
// List lists all images.
|
func (s *store) delete(id, ref string) {
|
||||||
func (s *Store) List() []Image {
|
|
||||||
s.lock.RLock()
|
|
||||||
defer s.lock.RUnlock()
|
|
||||||
var images []Image
|
|
||||||
for _, i := range s.images {
|
|
||||||
images = append(images, i)
|
|
||||||
}
|
|
||||||
return images
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the image with specified id.
|
|
||||||
func (s *Store) Delete(id string) {
|
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
digest, err := s.digestSet.Lookup(id)
|
digest, err := s.digestSet.Lookup(id)
|
||||||
@ -126,22 +244,16 @@ func (s *Store) Delete(id string) {
|
|||||||
// So we need to return if there are error.
|
// So we need to return if there are error.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
i, ok := s.images[digest.String()]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i.References = util.SubtractStringSlice(i.References, ref)
|
||||||
|
if len(i.References) != 0 {
|
||||||
|
s.images[digest.String()] = i
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Remove the image if it is not referenced any more.
|
||||||
s.digestSet.Remove(digest) // nolint: errcheck
|
s.digestSet.Remove(digest) // nolint: errcheck
|
||||||
delete(s.images, digest.String())
|
delete(s.images, digest.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// mergeStringSlices merges 2 string slices into one and remove duplicated elements.
|
|
||||||
func mergeStringSlices(a []string, b []string) []string {
|
|
||||||
set := map[string]struct{}{}
|
|
||||||
for _, s := range a {
|
|
||||||
set[s] = struct{}{}
|
|
||||||
}
|
|
||||||
for _, s := range b {
|
|
||||||
set[s] = struct{}{}
|
|
||||||
}
|
|
||||||
var ss []string
|
|
||||||
for s := range set {
|
|
||||||
ss = append(ss, s)
|
|
||||||
}
|
|
||||||
return ss
|
|
||||||
}
|
|
||||||
|
@ -17,65 +17,61 @@ limitations under the License.
|
|||||||
package image
|
package image
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
"github.com/docker/distribution/digestset"
|
||||||
assertlib "github.com/stretchr/testify/assert"
|
assertlib "github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/containerd/cri/pkg/store"
|
storeutil "github.com/containerd/cri/pkg/store"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestImageStore(t *testing.T) {
|
func TestInternalStore(t *testing.T) {
|
||||||
images := []Image{
|
images := []Image{
|
||||||
{
|
{
|
||||||
ID: "sha256:1123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
ID: "sha256:1123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||||
ChainID: "test-chain-id-1",
|
ChainID: "test-chain-id-1",
|
||||||
RepoTags: []string{"tag-1"},
|
References: []string{"ref-1"},
|
||||||
RepoDigests: []string{"digest-1"},
|
Size: 10,
|
||||||
Size: 10,
|
|
||||||
ImageSpec: imagespec.Image{},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "sha256:2123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
ID: "sha256:2123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||||
ChainID: "test-chain-id-2abcd",
|
ChainID: "test-chain-id-2abcd",
|
||||||
RepoTags: []string{"tag-2abcd"},
|
References: []string{"ref-2abcd"},
|
||||||
RepoDigests: []string{"digest-2abcd"},
|
Size: 20,
|
||||||
Size: 20,
|
|
||||||
ImageSpec: imagespec.Image{},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "sha256:3123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
ID: "sha256:3123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||||
RepoTags: []string{"tag-4a333"},
|
References: []string{"ref-4a333"},
|
||||||
RepoDigests: []string{"digest-4a333"},
|
ChainID: "test-chain-id-4a333",
|
||||||
ChainID: "test-chain-id-4a333",
|
Size: 30,
|
||||||
Size: 30,
|
|
||||||
ImageSpec: imagespec.Image{},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "sha256:4123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
ID: "sha256:4123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||||
RepoTags: []string{"tag-4abcd"},
|
References: []string{"ref-4abcd"},
|
||||||
RepoDigests: []string{"digest-4abcd"},
|
ChainID: "test-chain-id-4abcd",
|
||||||
ChainID: "test-chain-id-4abcd",
|
Size: 40,
|
||||||
Size: 40,
|
|
||||||
ImageSpec: imagespec.Image{},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
assert := assertlib.New(t)
|
assert := assertlib.New(t)
|
||||||
genTruncIndex := func(normalName string) string { return normalName[:(len(normalName)+1)/2] }
|
genTruncIndex := func(normalName string) string { return normalName[:(len(normalName)+1)/2] }
|
||||||
|
|
||||||
s := NewStore()
|
s := &store{
|
||||||
|
images: make(map[string]Image),
|
||||||
|
digestSet: digestset.NewSet(),
|
||||||
|
}
|
||||||
|
|
||||||
t.Logf("should be able to add image")
|
t.Logf("should be able to add image")
|
||||||
for _, img := range images {
|
for _, img := range images {
|
||||||
err := s.Add(img)
|
err := s.add(img)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf("should be able to get image")
|
t.Logf("should be able to get image")
|
||||||
for _, v := range images {
|
for _, v := range images {
|
||||||
truncID := genTruncIndex(v.ID)
|
truncID := genTruncIndex(v.ID)
|
||||||
got, err := s.Get(truncID)
|
got, err := s.get(truncID)
|
||||||
assert.NoError(err, "truncID:%s, fullID:%s", truncID, v.ID)
|
assert.NoError(err, "truncID:%s, fullID:%s", truncID, v.ID)
|
||||||
assert.Equal(v, got)
|
assert.Equal(v, got)
|
||||||
}
|
}
|
||||||
@ -83,7 +79,7 @@ func TestImageStore(t *testing.T) {
|
|||||||
t.Logf("should be able to get image by truncated imageId without algorithm")
|
t.Logf("should be able to get image by truncated imageId without algorithm")
|
||||||
for _, v := range images {
|
for _, v := range images {
|
||||||
truncID := genTruncIndex(v.ID[strings.Index(v.ID, ":")+1:])
|
truncID := genTruncIndex(v.ID[strings.Index(v.ID, ":")+1:])
|
||||||
got, err := s.Get(truncID)
|
got, err := s.get(truncID)
|
||||||
assert.NoError(err, "truncID:%s, fullID:%s", truncID, v.ID)
|
assert.NoError(err, "truncID:%s, fullID:%s", truncID, v.ID)
|
||||||
assert.Equal(v, got)
|
assert.Equal(v, got)
|
||||||
}
|
}
|
||||||
@ -91,54 +87,162 @@ func TestImageStore(t *testing.T) {
|
|||||||
t.Logf("should not be able to get image by ambiguous prefix")
|
t.Logf("should not be able to get image by ambiguous prefix")
|
||||||
ambiguousPrefixs := []string{"sha256", "sha256:"}
|
ambiguousPrefixs := []string{"sha256", "sha256:"}
|
||||||
for _, v := range ambiguousPrefixs {
|
for _, v := range ambiguousPrefixs {
|
||||||
_, err := s.Get(v)
|
_, err := s.get(v)
|
||||||
assert.NotEqual(nil, err)
|
assert.NotEqual(nil, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf("should be able to list images")
|
t.Logf("should be able to list images")
|
||||||
imgs := s.List()
|
imgs := s.list()
|
||||||
assert.Len(imgs, len(images))
|
assert.Len(imgs, len(images))
|
||||||
|
|
||||||
imageNum := len(images)
|
imageNum := len(images)
|
||||||
for _, v := range images {
|
for _, v := range images {
|
||||||
truncID := genTruncIndex(v.ID)
|
truncID := genTruncIndex(v.ID)
|
||||||
oldRepoTag := v.RepoTags[0]
|
oldRef := v.References[0]
|
||||||
oldRepoDigest := v.RepoDigests[0]
|
newRef := oldRef + "new"
|
||||||
newRepoTag := oldRepoTag + "new"
|
|
||||||
newRepoDigest := oldRepoDigest + "new"
|
|
||||||
|
|
||||||
t.Logf("should be able to add new repo tags/digests")
|
t.Logf("should be able to add new references")
|
||||||
newImg := v
|
newImg := v
|
||||||
newImg.RepoTags = []string{newRepoTag}
|
newImg.References = []string{newRef}
|
||||||
newImg.RepoDigests = []string{newRepoDigest}
|
err := s.add(newImg)
|
||||||
err := s.Add(newImg)
|
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
got, err := s.Get(truncID)
|
got, err := s.get(truncID)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.Len(got.RepoTags, 2)
|
assert.Len(got.References, 2)
|
||||||
assert.Contains(got.RepoTags, oldRepoTag, newRepoTag)
|
assert.Contains(got.References, oldRef, newRef)
|
||||||
assert.Len(got.RepoDigests, 2)
|
|
||||||
assert.Contains(got.RepoDigests, oldRepoDigest, newRepoDigest)
|
|
||||||
|
|
||||||
t.Logf("should not be able to add duplicated repo tags/digests")
|
t.Logf("should not be able to add duplicated references")
|
||||||
err = s.Add(newImg)
|
err = s.add(newImg)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
got, err = s.Get(truncID)
|
got, err = s.get(truncID)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.Len(got.RepoTags, 2)
|
assert.Len(got.References, 2)
|
||||||
assert.Contains(got.RepoTags, oldRepoTag, newRepoTag)
|
assert.Contains(got.References, oldRef, newRef)
|
||||||
assert.Len(got.RepoDigests, 2)
|
|
||||||
assert.Contains(got.RepoDigests, oldRepoDigest, newRepoDigest)
|
t.Logf("should be able to delete image references")
|
||||||
|
s.delete(truncID, oldRef)
|
||||||
|
got, err = s.get(truncID)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Equal([]string{newRef}, got.References)
|
||||||
|
|
||||||
t.Logf("should be able to delete image")
|
t.Logf("should be able to delete image")
|
||||||
s.Delete(truncID)
|
s.delete(truncID, newRef)
|
||||||
imageNum--
|
got, err = s.get(truncID)
|
||||||
imgs = s.List()
|
assert.Equal(storeutil.ErrNotExist, err)
|
||||||
assert.Len(imgs, imageNum)
|
assert.Equal(Image{}, got)
|
||||||
|
|
||||||
t.Logf("get should return empty struct and ErrNotExist after deletion")
|
imageNum--
|
||||||
img, err := s.Get(truncID)
|
imgs = s.list()
|
||||||
assert.Equal(Image{}, img)
|
assert.Len(imgs, imageNum)
|
||||||
assert.Equal(store.ErrNotExist, err)
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestImageStore(t *testing.T) {
|
||||||
|
id := "sha256:1123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||||
|
newID := "sha256:9923456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||||
|
image := Image{
|
||||||
|
ID: id,
|
||||||
|
ChainID: "test-chain-id-1",
|
||||||
|
References: []string{"ref-1"},
|
||||||
|
Size: 10,
|
||||||
|
}
|
||||||
|
assert := assertlib.New(t)
|
||||||
|
|
||||||
|
equal := func(i1, i2 Image) {
|
||||||
|
sort.Strings(i1.References)
|
||||||
|
sort.Strings(i2.References)
|
||||||
|
assert.Equal(i1, i2)
|
||||||
|
}
|
||||||
|
for desc, test := range map[string]struct {
|
||||||
|
ref string
|
||||||
|
image *Image
|
||||||
|
expected []Image
|
||||||
|
}{
|
||||||
|
"nothing should happen if a non-exist ref disappear": {
|
||||||
|
ref: "ref-2",
|
||||||
|
image: nil,
|
||||||
|
expected: []Image{image},
|
||||||
|
},
|
||||||
|
"new ref for an existing image": {
|
||||||
|
ref: "ref-2",
|
||||||
|
image: &Image{
|
||||||
|
ID: id,
|
||||||
|
ChainID: "test-chain-id-1",
|
||||||
|
References: []string{"ref-2"},
|
||||||
|
Size: 10,
|
||||||
|
},
|
||||||
|
expected: []Image{
|
||||||
|
{
|
||||||
|
ID: id,
|
||||||
|
ChainID: "test-chain-id-1",
|
||||||
|
References: []string{"ref-1", "ref-2"},
|
||||||
|
Size: 10,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"new ref for a new image": {
|
||||||
|
ref: "ref-2",
|
||||||
|
image: &Image{
|
||||||
|
ID: newID,
|
||||||
|
ChainID: "test-chain-id-2",
|
||||||
|
References: []string{"ref-2"},
|
||||||
|
Size: 20,
|
||||||
|
},
|
||||||
|
expected: []Image{
|
||||||
|
image,
|
||||||
|
{
|
||||||
|
ID: newID,
|
||||||
|
ChainID: "test-chain-id-2",
|
||||||
|
References: []string{"ref-2"},
|
||||||
|
Size: 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"existing ref point to a new image": {
|
||||||
|
ref: "ref-1",
|
||||||
|
image: &Image{
|
||||||
|
ID: newID,
|
||||||
|
ChainID: "test-chain-id-2",
|
||||||
|
References: []string{"ref-1"},
|
||||||
|
Size: 20,
|
||||||
|
},
|
||||||
|
expected: []Image{
|
||||||
|
{
|
||||||
|
ID: newID,
|
||||||
|
ChainID: "test-chain-id-2",
|
||||||
|
References: []string{"ref-1"},
|
||||||
|
Size: 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"existing ref disappear": {
|
||||||
|
ref: "ref-1",
|
||||||
|
image: nil,
|
||||||
|
expected: []Image{},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Logf("TestCase %q", desc)
|
||||||
|
s, err := NewFakeStore([]Image{image})
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NoError(s.update(test.ref, test.image))
|
||||||
|
|
||||||
|
assert.Len(s.List(), len(test.expected))
|
||||||
|
for _, expect := range test.expected {
|
||||||
|
got, err := s.Get(expect.ID)
|
||||||
|
assert.NoError(err)
|
||||||
|
equal(got, expect)
|
||||||
|
for _, ref := range expect.References {
|
||||||
|
id, err := s.Resolve(ref)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Equal(expect.ID, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if test.image == nil {
|
||||||
|
// Shouldn't be able to index by removed ref.
|
||||||
|
id, err := s.Resolve(test.ref)
|
||||||
|
assert.Equal(storeutil.ErrNotExist, err)
|
||||||
|
assert.Empty(id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -41,3 +41,19 @@ func SubtractStringSlice(ss []string, str string) []string {
|
|||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MergeStringSlices merges 2 string slices into one and remove duplicated elements.
|
||||||
|
func MergeStringSlices(a []string, b []string) []string {
|
||||||
|
set := map[string]struct{}{}
|
||||||
|
for _, s := range a {
|
||||||
|
set[s] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, s := range b {
|
||||||
|
set[s] = struct{}{}
|
||||||
|
}
|
||||||
|
var ss []string
|
||||||
|
for s := range set {
|
||||||
|
ss = append(ss, s)
|
||||||
|
}
|
||||||
|
return ss
|
||||||
|
}
|
||||||
|
@ -46,3 +46,14 @@ func TestSubtractStringSlice(t *testing.T) {
|
|||||||
assert.Empty(t, SubtractStringSlice(nil, "hij"))
|
assert.Empty(t, SubtractStringSlice(nil, "hij"))
|
||||||
assert.Empty(t, SubtractStringSlice([]string{}, "hij"))
|
assert.Empty(t, SubtractStringSlice([]string{}, "hij"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMergeStringSlices(t *testing.T) {
|
||||||
|
s1 := []string{"abc", "def", "ghi"}
|
||||||
|
s2 := []string{"def", "jkl", "mno"}
|
||||||
|
expect := []string{"abc", "def", "ghi", "jkl", "mno"}
|
||||||
|
result := MergeStringSlices(s1, s2)
|
||||||
|
assert.Len(t, result, len(expect))
|
||||||
|
for _, s := range expect {
|
||||||
|
assert.Contains(t, result, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user