Add content test suite

Add content test suite with test for writer.
Update fs and metadata implementations to use test suite.

Signed-off-by: Derek McGowan <derek@mcgstyle.net>
This commit is contained in:
Derek McGowan 2017-07-10 11:37:39 -07:00
parent 442365248b
commit 938f3185bd
No known key found for this signature in database
GPG Key ID: F58C5D0A4405ACDB
5 changed files with 337 additions and 9 deletions

View File

@ -1,4 +1,4 @@
package content package fs
import ( import (
"bufio" "bufio"
@ -17,10 +17,22 @@ import (
"testing" "testing"
"time" "time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/testsuite"
"github.com/containerd/containerd/testutil" "github.com/containerd/containerd/testutil"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
) )
func TestContent(t *testing.T) {
testsuite.ContentSuite(t, "fs", func(ctx context.Context, root string) (content.Store, func(), error) {
cs, err := NewStore(root)
if err != nil {
return nil, nil, err
}
return cs, func() {}, nil
})
}
func TestContentWriter(t *testing.T) { func TestContentWriter(t *testing.T) {
ctx, tmpdir, cs, cleanup := contentStoreEnv(t) ctx, tmpdir, cs, cleanup := contentStoreEnv(t)
defer cleanup() defer cleanup()
@ -63,7 +75,7 @@ func TestContentWriter(t *testing.T) {
ingestions[i].StartedAt = time.Time{} ingestions[i].StartedAt = time.Time{}
} }
if !reflect.DeepEqual(ingestions, []Status{ if !reflect.DeepEqual(ingestions, []content.Status{
{ {
Ref: "myref", Ref: "myref",
Offset: 0, Offset: 0,
@ -132,7 +144,7 @@ func TestWalkBlobs(t *testing.T) {
expected[dgst] = struct{}{} expected[dgst] = struct{}{}
} }
if err := cs.Walk(ctx, func(bi Info) error { if err := cs.Walk(ctx, func(bi content.Info) error {
found[bi.Digest] = struct{}{} found[bi.Digest] = struct{}{}
checkBlobPath(t, cs, bi.Digest) checkBlobPath(t, cs, bi.Digest)
return nil return nil
@ -201,7 +213,7 @@ func generateBlobs(t checker, nblobs, maxsize int64) map[digest.Digest][]byte {
return blobs return blobs
} }
func populateBlobStore(t checker, ctx context.Context, cs Store, nblobs, maxsize int64) map[digest.Digest][]byte { func populateBlobStore(t checker, ctx context.Context, cs content.Store, nblobs, maxsize int64) map[digest.Digest][]byte {
blobs := generateBlobs(t, nblobs, maxsize) blobs := generateBlobs(t, nblobs, maxsize)
for dgst, p := range blobs { for dgst, p := range blobs {
@ -211,7 +223,7 @@ func populateBlobStore(t checker, ctx context.Context, cs Store, nblobs, maxsize
return blobs return blobs
} }
func contentStoreEnv(t checker) (context.Context, string, Store, func()) { func contentStoreEnv(t checker) (context.Context, string, content.Store, func()) {
pc, _, _, ok := runtime.Caller(1) pc, _, _, ok := runtime.Caller(1)
if !ok { if !ok {
t.Fatal("failed to resolve caller") t.Fatal("failed to resolve caller")
@ -247,7 +259,7 @@ func checkCopy(t checker, size int64, dst io.Writer, src io.Reader) {
} }
} }
func checkBlobPath(t *testing.T, cs Store, dgst digest.Digest) string { func checkBlobPath(t *testing.T, cs content.Store, dgst digest.Digest) string {
path := cs.(*store).blobPath(dgst) path := cs.(*store).blobPath(dgst)
if path != filepath.Join(cs.(*store).root, "blobs", dgst.Algorithm().String(), dgst.Hex()) { if path != filepath.Join(cs.(*store).root, "blobs", dgst.Algorithm().String(), dgst.Hex()) {
@ -268,8 +280,8 @@ func checkBlobPath(t *testing.T, cs Store, dgst digest.Digest) string {
return path return path
} }
func checkWrite(t checker, ctx context.Context, cs Store, dgst digest.Digest, p []byte) digest.Digest { func checkWrite(t checker, ctx context.Context, cs content.Store, dgst digest.Digest, p []byte) digest.Digest {
if err := WriteBlob(ctx, cs, dgst.String(), bytes.NewReader(p), int64(len(p)), dgst); err != nil { if err := content.WriteBlob(ctx, cs, dgst.String(), bytes.NewReader(p), int64(len(p)), dgst); err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -110,6 +110,10 @@ func (w *writer) Commit(size int64, expected digest.Digest) error {
} }
return err return err
} }
commitTime := time.Now()
if err := os.Chtimes(target, commitTime, commitTime); err != nil {
return err
}
unlock(w.ref) unlock(w.ref)
w.fp = nil w.fp = nil

View File

@ -0,0 +1,281 @@
package testsuite
import (
"bytes"
"context"
"io"
"io/ioutil"
"math/rand"
"os"
"testing"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/testutil"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// ContentSuite runs a test suite on the snapshotter given a factory function.
func ContentSuite(t *testing.T, name string, storeFn func(ctx context.Context, root string) (content.Store, func(), error)) {
t.Run("Writer", makeTest(t, name, storeFn, checkContentStoreWriter))
}
func makeTest(t *testing.T, name string, storeFn func(ctx context.Context, root string) (content.Store, func(), error), fn func(ctx context.Context, t *testing.T, cs content.Store)) func(t *testing.T) {
return func(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), name)
tmpDir, err := ioutil.TempDir("", "content-suite-"+name+"-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
cs, cleanup, err := storeFn(ctx, tmpDir)
if err != nil {
t.Fatal(err)
}
defer cleanup()
defer testutil.DumpDir(t, tmpDir)
fn(ctx, t, cs)
}
}
func checkContentStoreWriter(ctx context.Context, t *testing.T, cs content.Store) {
c1, d1 := createContent(256, 1)
w1, err := cs.Writer(ctx, "c1", 0, "")
if err != nil {
t.Fatal(err)
}
c2, d2 := createContent(256, 2)
w2, err := cs.Writer(ctx, "c2", int64(len(c2)), "")
if err != nil {
t.Fatal(err)
}
c3, d3 := createContent(256, 3)
w3, err := cs.Writer(ctx, "c3", 0, d3)
if err != nil {
t.Fatal(err)
}
c4, d4 := createContent(256, 4)
w4, err := cs.Writer(ctx, "c4", int64(len(c4)), d4)
if err != nil {
t.Fatal(err)
}
smallbuf := make([]byte, 32)
for _, s := range []struct {
content []byte
digest digest.Digest
writer content.Writer
}{
{
content: c1,
digest: d1,
writer: w1,
},
{
content: c2,
digest: d2,
writer: w2,
},
{
content: c3,
digest: d3,
writer: w3,
},
{
content: c4,
digest: d4,
writer: w4,
},
} {
n, err := io.CopyBuffer(s.writer, bytes.NewReader(s.content), smallbuf)
if err != nil {
t.Fatal(err)
}
if n != int64(len(s.content)) {
t.Fatalf("Unexpected copy length %d, expected %d", n, len(s.content))
}
preCommit := time.Now()
if err := s.writer.Commit(0, ""); err != nil {
t.Fatal(err)
}
postCommit := time.Now()
if s.writer.Digest() != s.digest {
t.Fatalf("Unexpected commit digest %s, expected %s", s.writer.Digest(), s.digest)
}
info := content.Info{
Digest: s.digest,
Size: int64(len(s.content)),
}
if err := checkInfo(ctx, cs, s.digest, info, preCommit, postCommit, preCommit, postCommit); err != nil {
t.Fatalf("Check info failed: %+v", err)
}
}
}
func checkUploadStatus(ctx context.Context, t *testing.T, cs content.Store) {
c1, d1 := createContent(256, 1)
preStart := time.Now()
w1, err := cs.Writer(ctx, "c1", 256, d1)
if err != nil {
t.Fatal(err)
}
postStart := time.Now()
d := digest.FromBytes([]byte{})
expected := content.Status{
Ref: "c1",
Total: 256,
Expected: d1,
}
preUpdate := preStart
postUpdate := postStart
if err := checkStatus(w1, expected, d, preStart, postStart, preUpdate, postUpdate); err != nil {
t.Fatalf("Status check failed: %+v", err)
}
// Write first 64 bytes
preUpdate = time.Now()
if _, err := w1.Write(c1[:64]); err != nil {
t.Fatalf("Failed to write: %+v", err)
}
postUpdate = time.Now()
expected.Offset = 64
d = digest.FromBytes(c1[:64])
if err := checkStatus(w1, expected, d, preStart, postStart, preUpdate, postUpdate); err != nil {
t.Fatalf("Status check failed: %+v", err)
}
// Write next 128 bytes
preUpdate = time.Now()
if _, err := w1.Write(c1[64:128]); err != nil {
t.Fatalf("Failed to write: %+v", err)
}
postUpdate = time.Now()
expected.Offset = 192
d = digest.FromBytes(c1[64:128])
if err := checkStatus(w1, expected, d, preStart, postStart, preUpdate, postUpdate); err != nil {
t.Fatalf("Status check failed: %+v", err)
}
// Write last 64 bytes
preUpdate = time.Now()
if _, err := w1.Write(c1[192:]); err != nil {
t.Fatalf("Failed to write: %+v", err)
}
postUpdate = time.Now()
expected.Offset = 256
d = digest.FromBytes(c1[192:])
if err := checkStatus(w1, expected, d, preStart, postStart, preUpdate, postUpdate); err != nil {
t.Fatalf("Status check failed: %+v", err)
}
preCommit := time.Now()
if err := w1.Commit(0, ""); err != nil {
t.Fatalf("Commit failed: %+v", err)
}
postCommit := time.Now()
info := content.Info{
Digest: d1,
Size: 256,
}
if err := checkInfo(ctx, cs, d1, info, preCommit, postCommit, preCommit, postCommit); err != nil {
t.Fatalf("Check info failed: %+v", err)
}
}
func checkStatus(w content.Writer, expected content.Status, d digest.Digest, preStart, postStart, preUpdate, postUpdate time.Time) error {
st, err := w.Status()
if err != nil {
return errors.Wrap(err, "failed to get status")
}
wd := w.Digest()
if wd != d {
return errors.Errorf("unexpected digest %v, expected %v", wd, d)
}
if st.Ref != expected.Ref {
return errors.Errorf("unexpected ref %v, expected %v", st.Ref, expected.Ref)
}
if st.Offset != expected.Offset {
return errors.Errorf("unexpected offset %d, expected %d", st.Offset, expected.Offset)
}
if st.Total != expected.Total {
return errors.Errorf("unexpected total %d, expected %d", st.Total, expected.Total)
}
if st.Expected != expected.Expected {
return errors.Errorf("unexpected \"expected digest\" %v, expected %v", st.Expected, expected.Expected)
}
if st.StartedAt.After(postStart) || st.StartedAt.Before(preStart) {
return errors.Errorf("unexpected started at time %s, expected between %s and %s", st.StartedAt, preStart, postStart)
}
if st.UpdatedAt.After(postUpdate) || st.UpdatedAt.Before(preUpdate) {
return errors.Errorf("unexpected updated at time %s, expected between %s and %s", st.UpdatedAt, preUpdate, postUpdate)
}
return nil
}
func checkInfo(ctx context.Context, cs content.Store, d digest.Digest, expected content.Info, c1, c2, u1, u2 time.Time) error {
info, err := cs.Info(ctx, d)
if err != nil {
return errors.Wrap(err, "failed to get info")
}
if info.Digest != d {
return errors.Errorf("unexpected info digest %s, expected %s", info.Digest, d)
}
if info.Size != expected.Size {
return errors.Errorf("unexpected info size %d, expected %d", info.Size, expected.Size)
}
if info.CreatedAt.After(c2) || info.CreatedAt.Before(c1) {
return errors.Errorf("unexpected created at time %s, expected between %s and %s", info.CreatedAt, c1, c2)
}
if info.UpdatedAt.After(u2) || info.UpdatedAt.Before(u1) {
return errors.Errorf("unexpected updated at time %s, expected between %s and %s", info.UpdatedAt, u1, u2)
}
if len(info.Labels) != len(expected.Labels) {
return errors.Errorf("mismatched number of labels\ngot:\n%#v\nexpected:\n%#v", info.Labels, expected.Labels)
}
for k, v := range expected.Labels {
actual := info.Labels[k]
if v != actual {
return errors.Errorf("unexpected value for label %q: %q, expected %q", k, actual, v)
}
}
return nil
}
func createContent(size, seed int64) ([]byte, digest.Digest) {
b, err := ioutil.ReadAll(io.LimitReader(rand.New(rand.NewSource(seed)), size))
if err != nil {
panic(err)
}
return b, digest.FromBytes(b)
}

View File

@ -385,13 +385,14 @@ func (nw *namespacedWriter) commit(tx *bolt.Tx, size int64, expected digest.Dige
return err return err
} }
timeEncoded, err := status.UpdatedAt.MarshalBinary() timeEncoded, err := time.Now().UTC().MarshalBinary()
if err != nil { if err != nil {
return err return err
} }
for _, v := range [][2][]byte{ for _, v := range [][2][]byte{
{bucketKeyCreatedAt, timeEncoded}, {bucketKeyCreatedAt, timeEncoded},
{bucketKeyUpdatedAt, timeEncoded},
{bucketKeySize, sizeEncoded}, {bucketKeySize, sizeEncoded},
} { } {
if err := bkt.Put(v[0], v[1]); err != nil { if err := bkt.Put(v[0], v[1]); err != nil {

30
metadata/content_test.go Normal file
View File

@ -0,0 +1,30 @@
package metadata
import (
"context"
"path/filepath"
"testing"
"github.com/boltdb/bolt"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/fs"
"github.com/containerd/containerd/content/testsuite"
)
func TestContent(t *testing.T) {
testsuite.ContentSuite(t, "metadata", func(ctx context.Context, root string) (content.Store, func(), error) {
// TODO: Use mocked or in-memory store
cs, err := fs.NewStore(root)
if err != nil {
return nil, nil, err
}
db, err := bolt.Open(filepath.Join(root, "metadata.db"), 0660, nil)
if err != nil {
return nil, nil, err
}
cs = NewContentStore(db, cs)
return cs, func() {}, nil
})
}