Create image reference cache.
Signed-off-by: Lantao Liu <lantaol@google.com>
This commit is contained in:
34
pkg/store/image/fake_image.go
Normal file
34
pkg/store/image/fake_image.go
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
Copyright 2018 The Containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package image
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// NewFakeStore returns an image store with predefined images.
|
||||
// Update is not allowed for this fake store.
|
||||
func NewFakeStore(images []Image) (*Store, error) {
|
||||
s := NewStore(nil)
|
||||
for _, i := range images {
|
||||
for _, ref := range i.References {
|
||||
s.refCache[ref] = i.ID
|
||||
}
|
||||
if err := s.store.add(i); err != nil {
|
||||
return nil, errors.Wrapf(err, "add image %q", i)
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
@@ -17,14 +17,21 @@ limitations under the License.
|
||||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/docker/distribution/digestset"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
imagedigest "github.com/opencontainers/go-digest"
|
||||
imageidentity "github.com/opencontainers/image-spec/identity"
|
||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containerd/cri/pkg/store"
|
||||
storeutil "github.com/containerd/cri/pkg/store"
|
||||
"github.com/containerd/cri/pkg/util"
|
||||
)
|
||||
|
||||
// Image contains all resources associated with the image. All fields
|
||||
@@ -32,10 +39,8 @@ import (
|
||||
type Image struct {
|
||||
// Id of the image. Normally the digest of image config.
|
||||
ID string
|
||||
// Other names by which this image is known.
|
||||
RepoTags []string
|
||||
// Digests by which this image is known.
|
||||
RepoDigests []string
|
||||
// References are references to the image, e.g. RepoTag and RepoDigest.
|
||||
References []string
|
||||
// ChainID is the chainID of the image.
|
||||
ChainID string
|
||||
// Size is the compressed size of the image.
|
||||
@@ -48,28 +53,156 @@ type Image struct {
|
||||
|
||||
// Store stores all images.
|
||||
type Store struct {
|
||||
lock sync.RWMutex
|
||||
// refCache is a containerd image reference to image id cache.
|
||||
refCache map[string]string
|
||||
// client is the containerd client.
|
||||
client *containerd.Client
|
||||
// store is the internal image store indexed by image id.
|
||||
store *store
|
||||
}
|
||||
|
||||
// NewStore creates an image store.
|
||||
func NewStore(client *containerd.Client) *Store {
|
||||
return &Store{
|
||||
refCache: make(map[string]string),
|
||||
client: client,
|
||||
store: &store{
|
||||
images: make(map[string]Image),
|
||||
digestSet: digestset.NewSet(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Update updates cache for a reference.
|
||||
func (s *Store) Update(ctx context.Context, ref string) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
i, err := s.client.GetImage(ctx, ref)
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
return errors.Wrap(err, "get image from containerd")
|
||||
}
|
||||
var img *Image
|
||||
if err == nil {
|
||||
img, err = getImage(ctx, i)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get image info from containerd")
|
||||
}
|
||||
}
|
||||
return s.update(ref, img)
|
||||
}
|
||||
|
||||
// update updates the internal cache. img == nil means that
|
||||
// the image does not exist in containerd.
|
||||
func (s *Store) update(ref string, img *Image) error {
|
||||
oldID, oldExist := s.refCache[ref]
|
||||
if img == nil {
|
||||
// The image reference doesn't exist in containerd.
|
||||
if oldExist {
|
||||
// Remove the reference from the store.
|
||||
s.store.delete(oldID, ref)
|
||||
delete(s.refCache, ref)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if oldExist {
|
||||
if oldID == img.ID {
|
||||
return nil
|
||||
}
|
||||
// Updated. Remove tag from old image.
|
||||
s.store.delete(oldID, ref)
|
||||
}
|
||||
// New image. Add new image.
|
||||
s.refCache[ref] = img.ID
|
||||
return s.store.add(*img)
|
||||
}
|
||||
|
||||
// getImage gets image information from containerd.
|
||||
func getImage(ctx context.Context, i containerd.Image) (*Image, error) {
|
||||
// Get image information.
|
||||
diffIDs, err := i.RootFS(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get image diffIDs")
|
||||
}
|
||||
chainID := imageidentity.ChainID(diffIDs)
|
||||
|
||||
size, err := i.Size(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get image compressed resource size")
|
||||
}
|
||||
|
||||
desc, err := i.Config(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get image config descriptor")
|
||||
}
|
||||
id := desc.Digest.String()
|
||||
|
||||
rb, err := content.ReadBlob(ctx, i.ContentStore(), desc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read image config from content store")
|
||||
}
|
||||
var ociimage imagespec.Image
|
||||
if err := json.Unmarshal(rb, &ociimage); err != nil {
|
||||
return nil, errors.Wrapf(err, "unmarshal image config %s", rb)
|
||||
}
|
||||
|
||||
return &Image{
|
||||
ID: id,
|
||||
References: []string{i.Name()},
|
||||
ChainID: chainID.String(),
|
||||
Size: size,
|
||||
ImageSpec: ociimage,
|
||||
Image: i,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Resolve resolves a image reference to image id.
|
||||
func (s *Store) Resolve(ref string) (string, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
id, ok := s.refCache[ref]
|
||||
if !ok {
|
||||
return "", storeutil.ErrNotExist
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// Get gets image metadata by image id. The id can be truncated.
|
||||
// Returns various validation errors if the image id is invalid.
|
||||
// Returns storeutil.ErrNotExist if the image doesn't exist.
|
||||
func (s *Store) Get(id string) (Image, error) {
|
||||
return s.store.get(id)
|
||||
}
|
||||
|
||||
// List lists all images.
|
||||
func (s *Store) List() []Image {
|
||||
return s.store.list()
|
||||
}
|
||||
|
||||
type store struct {
|
||||
lock sync.RWMutex
|
||||
images map[string]Image
|
||||
digestSet *digestset.Set
|
||||
}
|
||||
|
||||
// NewStore creates an image store.
|
||||
func NewStore() *Store {
|
||||
return &Store{
|
||||
images: make(map[string]Image),
|
||||
digestSet: digestset.NewSet(),
|
||||
func (s *store) list() []Image {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
var images []Image
|
||||
for _, i := range s.images {
|
||||
images = append(images, i)
|
||||
}
|
||||
return images
|
||||
}
|
||||
|
||||
// Add an image into the store.
|
||||
func (s *Store) Add(img Image) error {
|
||||
func (s *store) add(img Image) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if _, err := s.digestSet.Lookup(img.ID); err != nil {
|
||||
if err != digestset.ErrDigestNotFound {
|
||||
return err
|
||||
}
|
||||
if err := s.digestSet.Add(godigest.Digest(img.ID)); err != nil {
|
||||
if err := s.digestSet.Add(imagedigest.Digest(img.ID)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -80,44 +213,29 @@ func (s *Store) Add(img Image) error {
|
||||
s.images[img.ID] = img
|
||||
return nil
|
||||
}
|
||||
// Or else, merge the repo tags/digests.
|
||||
i.RepoTags = mergeStringSlices(i.RepoTags, img.RepoTags)
|
||||
i.RepoDigests = mergeStringSlices(i.RepoDigests, img.RepoDigests)
|
||||
// Or else, merge the references.
|
||||
i.References = util.MergeStringSlices(i.References, img.References)
|
||||
s.images[img.ID] = i
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the image with specified id. Returns store.ErrNotExist if the
|
||||
// image doesn't exist.
|
||||
func (s *Store) Get(id string) (Image, error) {
|
||||
func (s *store) get(id string) (Image, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
digest, err := s.digestSet.Lookup(id)
|
||||
if err != nil {
|
||||
if err == digestset.ErrDigestNotFound {
|
||||
err = store.ErrNotExist
|
||||
err = storeutil.ErrNotExist
|
||||
}
|
||||
return Image{}, err
|
||||
}
|
||||
if i, ok := s.images[digest.String()]; ok {
|
||||
return i, nil
|
||||
}
|
||||
return Image{}, store.ErrNotExist
|
||||
return Image{}, storeutil.ErrNotExist
|
||||
}
|
||||
|
||||
// List lists all images.
|
||||
func (s *Store) List() []Image {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
var images []Image
|
||||
for _, i := range s.images {
|
||||
images = append(images, i)
|
||||
}
|
||||
return images
|
||||
}
|
||||
|
||||
// Delete deletes the image with specified id.
|
||||
func (s *Store) Delete(id string) {
|
||||
func (s *store) delete(id, ref string) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
digest, err := s.digestSet.Lookup(id)
|
||||
@@ -126,22 +244,16 @@ func (s *Store) Delete(id string) {
|
||||
// So we need to return if there are error.
|
||||
return
|
||||
}
|
||||
i, ok := s.images[digest.String()]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
i.References = util.SubtractStringSlice(i.References, ref)
|
||||
if len(i.References) != 0 {
|
||||
s.images[digest.String()] = i
|
||||
return
|
||||
}
|
||||
// Remove the image if it is not referenced any more.
|
||||
s.digestSet.Remove(digest) // nolint: errcheck
|
||||
delete(s.images, digest.String())
|
||||
}
|
||||
|
||||
// mergeStringSlices merges 2 string slices into one and remove duplicated elements.
|
||||
func mergeStringSlices(a []string, b []string) []string {
|
||||
set := map[string]struct{}{}
|
||||
for _, s := range a {
|
||||
set[s] = struct{}{}
|
||||
}
|
||||
for _, s := range b {
|
||||
set[s] = struct{}{}
|
||||
}
|
||||
var ss []string
|
||||
for s := range set {
|
||||
ss = append(ss, s)
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
@@ -17,65 +17,61 @@ limitations under the License.
|
||||
package image
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/docker/distribution/digestset"
|
||||
assertlib "github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/containerd/cri/pkg/store"
|
||||
storeutil "github.com/containerd/cri/pkg/store"
|
||||
)
|
||||
|
||||
func TestImageStore(t *testing.T) {
|
||||
func TestInternalStore(t *testing.T) {
|
||||
images := []Image{
|
||||
{
|
||||
ID: "sha256:1123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
ChainID: "test-chain-id-1",
|
||||
RepoTags: []string{"tag-1"},
|
||||
RepoDigests: []string{"digest-1"},
|
||||
Size: 10,
|
||||
ImageSpec: imagespec.Image{},
|
||||
ID: "sha256:1123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
ChainID: "test-chain-id-1",
|
||||
References: []string{"ref-1"},
|
||||
Size: 10,
|
||||
},
|
||||
{
|
||||
ID: "sha256:2123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
ChainID: "test-chain-id-2abcd",
|
||||
RepoTags: []string{"tag-2abcd"},
|
||||
RepoDigests: []string{"digest-2abcd"},
|
||||
Size: 20,
|
||||
ImageSpec: imagespec.Image{},
|
||||
ID: "sha256:2123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
ChainID: "test-chain-id-2abcd",
|
||||
References: []string{"ref-2abcd"},
|
||||
Size: 20,
|
||||
},
|
||||
{
|
||||
ID: "sha256:3123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
RepoTags: []string{"tag-4a333"},
|
||||
RepoDigests: []string{"digest-4a333"},
|
||||
ChainID: "test-chain-id-4a333",
|
||||
Size: 30,
|
||||
ImageSpec: imagespec.Image{},
|
||||
ID: "sha256:3123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
References: []string{"ref-4a333"},
|
||||
ChainID: "test-chain-id-4a333",
|
||||
Size: 30,
|
||||
},
|
||||
{
|
||||
ID: "sha256:4123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
RepoTags: []string{"tag-4abcd"},
|
||||
RepoDigests: []string{"digest-4abcd"},
|
||||
ChainID: "test-chain-id-4abcd",
|
||||
Size: 40,
|
||||
ImageSpec: imagespec.Image{},
|
||||
ID: "sha256:4123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
References: []string{"ref-4abcd"},
|
||||
ChainID: "test-chain-id-4abcd",
|
||||
Size: 40,
|
||||
},
|
||||
}
|
||||
assert := assertlib.New(t)
|
||||
genTruncIndex := func(normalName string) string { return normalName[:(len(normalName)+1)/2] }
|
||||
|
||||
s := NewStore()
|
||||
s := &store{
|
||||
images: make(map[string]Image),
|
||||
digestSet: digestset.NewSet(),
|
||||
}
|
||||
|
||||
t.Logf("should be able to add image")
|
||||
for _, img := range images {
|
||||
err := s.Add(img)
|
||||
err := s.add(img)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
t.Logf("should be able to get image")
|
||||
for _, v := range images {
|
||||
truncID := genTruncIndex(v.ID)
|
||||
got, err := s.Get(truncID)
|
||||
got, err := s.get(truncID)
|
||||
assert.NoError(err, "truncID:%s, fullID:%s", truncID, v.ID)
|
||||
assert.Equal(v, got)
|
||||
}
|
||||
@@ -83,7 +79,7 @@ func TestImageStore(t *testing.T) {
|
||||
t.Logf("should be able to get image by truncated imageId without algorithm")
|
||||
for _, v := range images {
|
||||
truncID := genTruncIndex(v.ID[strings.Index(v.ID, ":")+1:])
|
||||
got, err := s.Get(truncID)
|
||||
got, err := s.get(truncID)
|
||||
assert.NoError(err, "truncID:%s, fullID:%s", truncID, v.ID)
|
||||
assert.Equal(v, got)
|
||||
}
|
||||
@@ -91,54 +87,162 @@ func TestImageStore(t *testing.T) {
|
||||
t.Logf("should not be able to get image by ambiguous prefix")
|
||||
ambiguousPrefixs := []string{"sha256", "sha256:"}
|
||||
for _, v := range ambiguousPrefixs {
|
||||
_, err := s.Get(v)
|
||||
_, err := s.get(v)
|
||||
assert.NotEqual(nil, err)
|
||||
}
|
||||
|
||||
t.Logf("should be able to list images")
|
||||
imgs := s.List()
|
||||
imgs := s.list()
|
||||
assert.Len(imgs, len(images))
|
||||
|
||||
imageNum := len(images)
|
||||
for _, v := range images {
|
||||
truncID := genTruncIndex(v.ID)
|
||||
oldRepoTag := v.RepoTags[0]
|
||||
oldRepoDigest := v.RepoDigests[0]
|
||||
newRepoTag := oldRepoTag + "new"
|
||||
newRepoDigest := oldRepoDigest + "new"
|
||||
oldRef := v.References[0]
|
||||
newRef := oldRef + "new"
|
||||
|
||||
t.Logf("should be able to add new repo tags/digests")
|
||||
t.Logf("should be able to add new references")
|
||||
newImg := v
|
||||
newImg.RepoTags = []string{newRepoTag}
|
||||
newImg.RepoDigests = []string{newRepoDigest}
|
||||
err := s.Add(newImg)
|
||||
newImg.References = []string{newRef}
|
||||
err := s.add(newImg)
|
||||
assert.NoError(err)
|
||||
got, err := s.Get(truncID)
|
||||
got, err := s.get(truncID)
|
||||
assert.NoError(err)
|
||||
assert.Len(got.RepoTags, 2)
|
||||
assert.Contains(got.RepoTags, oldRepoTag, newRepoTag)
|
||||
assert.Len(got.RepoDigests, 2)
|
||||
assert.Contains(got.RepoDigests, oldRepoDigest, newRepoDigest)
|
||||
assert.Len(got.References, 2)
|
||||
assert.Contains(got.References, oldRef, newRef)
|
||||
|
||||
t.Logf("should not be able to add duplicated repo tags/digests")
|
||||
err = s.Add(newImg)
|
||||
t.Logf("should not be able to add duplicated references")
|
||||
err = s.add(newImg)
|
||||
assert.NoError(err)
|
||||
got, err = s.Get(truncID)
|
||||
got, err = s.get(truncID)
|
||||
assert.NoError(err)
|
||||
assert.Len(got.RepoTags, 2)
|
||||
assert.Contains(got.RepoTags, oldRepoTag, newRepoTag)
|
||||
assert.Len(got.RepoDigests, 2)
|
||||
assert.Contains(got.RepoDigests, oldRepoDigest, newRepoDigest)
|
||||
assert.Len(got.References, 2)
|
||||
assert.Contains(got.References, oldRef, newRef)
|
||||
|
||||
t.Logf("should be able to delete image references")
|
||||
s.delete(truncID, oldRef)
|
||||
got, err = s.get(truncID)
|
||||
assert.NoError(err)
|
||||
assert.Equal([]string{newRef}, got.References)
|
||||
|
||||
t.Logf("should be able to delete image")
|
||||
s.Delete(truncID)
|
||||
imageNum--
|
||||
imgs = s.List()
|
||||
assert.Len(imgs, imageNum)
|
||||
s.delete(truncID, newRef)
|
||||
got, err = s.get(truncID)
|
||||
assert.Equal(storeutil.ErrNotExist, err)
|
||||
assert.Equal(Image{}, got)
|
||||
|
||||
t.Logf("get should return empty struct and ErrNotExist after deletion")
|
||||
img, err := s.Get(truncID)
|
||||
assert.Equal(Image{}, img)
|
||||
assert.Equal(store.ErrNotExist, err)
|
||||
imageNum--
|
||||
imgs = s.list()
|
||||
assert.Len(imgs, imageNum)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageStore(t *testing.T) {
|
||||
id := "sha256:1123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||
newID := "sha256:9923456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||
image := Image{
|
||||
ID: id,
|
||||
ChainID: "test-chain-id-1",
|
||||
References: []string{"ref-1"},
|
||||
Size: 10,
|
||||
}
|
||||
assert := assertlib.New(t)
|
||||
|
||||
equal := func(i1, i2 Image) {
|
||||
sort.Strings(i1.References)
|
||||
sort.Strings(i2.References)
|
||||
assert.Equal(i1, i2)
|
||||
}
|
||||
for desc, test := range map[string]struct {
|
||||
ref string
|
||||
image *Image
|
||||
expected []Image
|
||||
}{
|
||||
"nothing should happen if a non-exist ref disappear": {
|
||||
ref: "ref-2",
|
||||
image: nil,
|
||||
expected: []Image{image},
|
||||
},
|
||||
"new ref for an existing image": {
|
||||
ref: "ref-2",
|
||||
image: &Image{
|
||||
ID: id,
|
||||
ChainID: "test-chain-id-1",
|
||||
References: []string{"ref-2"},
|
||||
Size: 10,
|
||||
},
|
||||
expected: []Image{
|
||||
{
|
||||
ID: id,
|
||||
ChainID: "test-chain-id-1",
|
||||
References: []string{"ref-1", "ref-2"},
|
||||
Size: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
"new ref for a new image": {
|
||||
ref: "ref-2",
|
||||
image: &Image{
|
||||
ID: newID,
|
||||
ChainID: "test-chain-id-2",
|
||||
References: []string{"ref-2"},
|
||||
Size: 20,
|
||||
},
|
||||
expected: []Image{
|
||||
image,
|
||||
{
|
||||
ID: newID,
|
||||
ChainID: "test-chain-id-2",
|
||||
References: []string{"ref-2"},
|
||||
Size: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
"existing ref point to a new image": {
|
||||
ref: "ref-1",
|
||||
image: &Image{
|
||||
ID: newID,
|
||||
ChainID: "test-chain-id-2",
|
||||
References: []string{"ref-1"},
|
||||
Size: 20,
|
||||
},
|
||||
expected: []Image{
|
||||
{
|
||||
ID: newID,
|
||||
ChainID: "test-chain-id-2",
|
||||
References: []string{"ref-1"},
|
||||
Size: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
"existing ref disappear": {
|
||||
ref: "ref-1",
|
||||
image: nil,
|
||||
expected: []Image{},
|
||||
},
|
||||
} {
|
||||
t.Logf("TestCase %q", desc)
|
||||
s, err := NewFakeStore([]Image{image})
|
||||
assert.NoError(err)
|
||||
assert.NoError(s.update(test.ref, test.image))
|
||||
|
||||
assert.Len(s.List(), len(test.expected))
|
||||
for _, expect := range test.expected {
|
||||
got, err := s.Get(expect.ID)
|
||||
assert.NoError(err)
|
||||
equal(got, expect)
|
||||
for _, ref := range expect.References {
|
||||
id, err := s.Resolve(ref)
|
||||
assert.NoError(err)
|
||||
assert.Equal(expect.ID, id)
|
||||
}
|
||||
}
|
||||
|
||||
if test.image == nil {
|
||||
// Shouldn't be able to index by removed ref.
|
||||
id, err := s.Resolve(test.ref)
|
||||
assert.Equal(storeutil.ErrNotExist, err)
|
||||
assert.Empty(id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user