images, containers: converge metadata API conventions
The primary feature we get with this PR is support for filters and labels on the image metadata store. In the process of doing this, the conventions for the API have been converged between containers and images, providing a model for other services. With images, `Put` (renamed to `Update` briefly) has been split into a `Create` and `Update`, allowing one to control the behavior around these operations. `Update` now includes support for masking fields at the datastore-level across both the containers and image service. Filters are now just string values to interpreted directly within the data store. This should allow for some interesting future use cases in which the datastore might use the syntax for more efficient query paths. The containers service has been updated to follow these conventions as closely as possible. Signed-off-by: Stephen J Day <stephen.day@docker.com>
This commit is contained in:
79
metadata/adaptors.go
Normal file
79
metadata/adaptors.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/images"
|
||||
)
|
||||
|
||||
func adaptImage(o interface{}) filters.Adaptor {
|
||||
obj := o.(images.Image)
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[0] {
|
||||
case "name":
|
||||
return obj.Name, len(obj.Name) > 0
|
||||
case "target":
|
||||
if len(fieldpath) < 2 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[1] {
|
||||
case "digest":
|
||||
return obj.Target.Digest.String(), len(obj.Target.Digest) > 0
|
||||
case "mediatype":
|
||||
return obj.Target.MediaType, len(obj.Target.MediaType) > 0
|
||||
}
|
||||
case "labels":
|
||||
return checkMap(fieldpath[1:], obj.Labels)
|
||||
// TODO(stevvooe): Greater/Less than filters would be awesome for
|
||||
// size. Let's do it!
|
||||
}
|
||||
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
func adaptContainer(o interface{}) filters.Adaptor {
|
||||
obj := o.(containers.Container)
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[0] {
|
||||
case "id":
|
||||
return obj.ID, len(obj.ID) > 0
|
||||
case "runtime":
|
||||
if len(fieldpath) <= 1 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[1] {
|
||||
case "name":
|
||||
return obj.Runtime.Name, len(obj.Runtime.Name) > 0
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
case "image":
|
||||
return obj.Image, len(obj.Image) > 0
|
||||
case "labels":
|
||||
return checkMap(fieldpath[1:], obj.Labels)
|
||||
}
|
||||
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
|
||||
func checkMap(fieldpath []string, m map[string]string) (string, bool) {
|
||||
if len(m) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
value, ok := m[strings.Join(fieldpath, ".")]
|
||||
return value, ok
|
||||
}
|
||||
@@ -44,6 +44,7 @@ var (
|
||||
bucketKeyOptions = []byte("options")
|
||||
bucketKeySpec = []byte("spec")
|
||||
bucketKeyRootFS = []byte("rootfs")
|
||||
bucketKeyTarget = []byte("target")
|
||||
bucketKeyCreatedAt = []byte("createdat")
|
||||
bucketKeyUpdatedAt = []byte("updatedat")
|
||||
)
|
||||
|
||||
@@ -45,25 +45,23 @@ func (s *containerStore) Get(ctx context.Context, id string) (containers.Contain
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func (s *containerStore) List(ctx context.Context, fs ...filters.Filter) ([]containers.Container, error) {
|
||||
func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.Container, error) {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
m []containers.Container
|
||||
filter = filters.Filter(filters.Any(fs))
|
||||
bkt = getContainersBucket(s.tx, namespace)
|
||||
)
|
||||
|
||||
if len(fs) == 0 {
|
||||
filter = filters.Always
|
||||
filter, err := filters.ParseAll(fs...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
bkt := getContainersBucket(s.tx, namespace)
|
||||
if bkt == nil {
|
||||
return m, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var m []containers.Container
|
||||
if err := bkt.ForEach(func(k, v []byte) error {
|
||||
cbkt := bkt.Bucket(k)
|
||||
if cbkt == nil {
|
||||
@@ -86,46 +84,6 @@ func (s *containerStore) List(ctx context.Context, fs ...filters.Filter) ([]cont
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func adaptContainer(o interface{}) filters.Adaptor {
|
||||
obj := o.(containers.Container)
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[0] {
|
||||
case "id":
|
||||
return obj.ID, len(obj.ID) > 0
|
||||
case "runtime":
|
||||
if len(fieldpath) <= 1 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[1] {
|
||||
case "name":
|
||||
return obj.Runtime.Name, len(obj.Runtime.Name) > 0
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
case "image":
|
||||
return obj.Image, len(obj.Image) > 0
|
||||
case "labels":
|
||||
return checkMap(fieldpath[1:], obj.Labels)
|
||||
}
|
||||
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
|
||||
func checkMap(fieldpath []string, m map[string]string) (string, bool) {
|
||||
if len(m) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
value, ok := m[strings.Join(fieldpath, ".")]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
func (s *containerStore) Create(ctx context.Context, container containers.Container) (containers.Container, error) {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
@@ -149,16 +107,16 @@ func (s *containerStore) Create(ctx context.Context, container containers.Contai
|
||||
return containers.Container{}, err
|
||||
}
|
||||
|
||||
container.CreatedAt = time.Now()
|
||||
container.CreatedAt = time.Now().UTC()
|
||||
container.UpdatedAt = container.CreatedAt
|
||||
if err := writeContainer(&container, cbkt); err != nil {
|
||||
if err := writeContainer(cbkt, &container); err != nil {
|
||||
return containers.Container{}, errors.Wrap(err, "failed to write container")
|
||||
}
|
||||
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func (s *containerStore) Update(ctx context.Context, container containers.Container) (containers.Container, error) {
|
||||
func (s *containerStore) Update(ctx context.Context, container containers.Container, fieldpaths ...string) (containers.Container, error) {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return containers.Container{}, err
|
||||
@@ -178,8 +136,50 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai
|
||||
return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID)
|
||||
}
|
||||
|
||||
container.UpdatedAt = time.Now()
|
||||
if err := writeContainer(&container, cbkt); err != nil {
|
||||
var updated containers.Container
|
||||
if err := readContainer(&updated, cbkt); err != nil {
|
||||
return updated, errors.Wrapf(err, "failed to read container from bucket")
|
||||
}
|
||||
updated.ID = container.ID
|
||||
|
||||
// apply the field mask. If you update this code, you better follow the
|
||||
// field mask rules in field_mask.proto. If you don't know what this
|
||||
// is, do not update this code.
|
||||
if len(fieldpaths) > 0 {
|
||||
// TODO(stevvooe): Move this logic into the store itself.
|
||||
for _, path := range fieldpaths {
|
||||
if strings.HasPrefix(path, "labels.") {
|
||||
if updated.Labels == nil {
|
||||
updated.Labels = map[string]string{}
|
||||
}
|
||||
key := strings.TrimPrefix(path, "labels.")
|
||||
updated.Labels[key] = container.Labels[key]
|
||||
continue
|
||||
}
|
||||
|
||||
switch path {
|
||||
case "labels":
|
||||
updated.Labels = container.Labels
|
||||
case "image":
|
||||
updated.Image = container.Image
|
||||
case "runtime":
|
||||
// TODO(stevvooe): Should this actually be allowed?
|
||||
updated.Runtime = container.Runtime
|
||||
case "spec":
|
||||
updated.Spec = container.Spec
|
||||
case "rootfs":
|
||||
updated.RootFS = container.RootFS
|
||||
default:
|
||||
return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// no field mask present, just replace everything
|
||||
updated = container
|
||||
}
|
||||
|
||||
updated.UpdatedAt = time.Now().UTC()
|
||||
if err := writeContainer(cbkt, &updated); err != nil {
|
||||
return containers.Container{}, errors.Wrap(err, "failed to write container")
|
||||
}
|
||||
|
||||
@@ -251,10 +251,8 @@ func readContainer(container *containers.Container, bkt *bolt.Bucket) error {
|
||||
return nil
|
||||
}
|
||||
container.Labels = map[string]string{}
|
||||
if err := lbkt.ForEach(func(k, v []byte) error {
|
||||
container.Labels[string(k)] = string(v)
|
||||
return nil
|
||||
}); err != nil {
|
||||
|
||||
if err := readLabels(container.Labels, lbkt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -263,15 +261,11 @@ func readContainer(container *containers.Container, bkt *bolt.Bucket) error {
|
||||
})
|
||||
}
|
||||
|
||||
func writeContainer(container *containers.Container, bkt *bolt.Bucket) error {
|
||||
createdAt, err := container.CreatedAt.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedAt, err := container.UpdatedAt.MarshalBinary()
|
||||
if err != nil {
|
||||
func writeContainer(bkt *bolt.Bucket, container *containers.Container) error {
|
||||
if err := writeTimestamps(bkt, container.CreatedAt, container.UpdatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
spec, err := container.Spec.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -281,8 +275,6 @@ func writeContainer(container *containers.Container, bkt *bolt.Bucket) error {
|
||||
{bucketKeyImage, []byte(container.Image)},
|
||||
{bucketKeySpec, spec},
|
||||
{bucketKeyRootFS, []byte(container.RootFS)},
|
||||
{bucketKeyCreatedAt, createdAt},
|
||||
{bucketKeyUpdatedAt, updatedAt},
|
||||
} {
|
||||
if err := bkt.Put(v[0], v[1]); err != nil {
|
||||
return err
|
||||
@@ -320,28 +312,5 @@ func writeContainer(container *containers.Container, bkt *bolt.Bucket) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Remove existing labels to keep from merging
|
||||
if lbkt := bkt.Bucket(bucketKeyLabels); lbkt != nil {
|
||||
if err := bkt.DeleteBucket(bucketKeyLabels); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lbkt, err := bkt.CreateBucket(bucketKeyLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range container.Labels {
|
||||
if v == "" {
|
||||
delete(container.Labels, k) // remove since we don't actually set it
|
||||
continue
|
||||
}
|
||||
|
||||
if err := lbkt.Put([]byte(k), []byte(v)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return writeLabels(bkt, container.Labels)
|
||||
}
|
||||
|
||||
86
metadata/helpers.go
Normal file
86
metadata/helpers.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func readLabels(m map[string]string, bkt *bolt.Bucket) error {
|
||||
return bkt.ForEach(func(k, v []byte) error {
|
||||
m[string(k)] = string(v)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// writeLabels will write a new labels bucket to the provided bucket at key
|
||||
// bucketKeyLabels, replacing the contents of the bucket with the provided map.
|
||||
//
|
||||
// The provide map labels will be modified to have the final contents of the
|
||||
// bucket. Typically, this removes zero-value entries.
|
||||
func writeLabels(bkt *bolt.Bucket, labels map[string]string) error {
|
||||
// Remove existing labels to keep from merging
|
||||
if lbkt := bkt.Bucket(bucketKeyLabels); lbkt != nil {
|
||||
if err := bkt.DeleteBucket(bucketKeyLabels); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lbkt, err := bkt.CreateBucket(bucketKeyLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range labels {
|
||||
if v == "" {
|
||||
delete(labels, k) // remove since we don't actually set it
|
||||
continue
|
||||
}
|
||||
|
||||
if err := lbkt.Put([]byte(k), []byte(v)); err != nil {
|
||||
return errors.Wrapf(err, "failed to set label %q=%q", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readTimestamps(created, updated *time.Time, bkt *bolt.Bucket) error {
|
||||
for _, f := range []struct {
|
||||
b []byte
|
||||
t *time.Time
|
||||
}{
|
||||
{bucketKeyCreatedAt, created},
|
||||
{bucketKeyUpdatedAt, updated},
|
||||
} {
|
||||
v := bkt.Get(f.b)
|
||||
if v != nil {
|
||||
if err := f.t.UnmarshalBinary(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeTimestamps(bkt *bolt.Bucket, created, updated time.Time) error {
|
||||
createdAt, err := created.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedAt, err := updated.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range [][2][]byte{
|
||||
{bucketKeyCreatedAt, createdAt},
|
||||
{bucketKeyUpdatedAt, updatedAt},
|
||||
} {
|
||||
if err := bkt.Put(v[0], v[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -4,13 +4,15 @@ import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -42,60 +44,29 @@ func (s *imageStore) Get(ctx context.Context, name string) (images.Image, error)
|
||||
|
||||
image.Name = name
|
||||
if err := readImage(&image, ibkt); err != nil {
|
||||
return images.Image{}, err
|
||||
return images.Image{}, errors.Wrapf(err, "image %q", name)
|
||||
}
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func (s *imageStore) Update(ctx context.Context, name string, desc ocispec.Descriptor) error {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return withImagesBucket(s.tx, namespace, func(bkt *bolt.Bucket) error {
|
||||
ibkt, err := bkt.CreateBucketIfNotExists([]byte(name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
buf [binary.MaxVarintLen64]byte
|
||||
sizeEncoded []byte = buf[:]
|
||||
)
|
||||
sizeEncoded = sizeEncoded[:binary.PutVarint(sizeEncoded, desc.Size)]
|
||||
|
||||
if len(sizeEncoded) == 0 {
|
||||
return fmt.Errorf("failed encoding size = %v", desc.Size)
|
||||
}
|
||||
|
||||
for _, v := range [][2][]byte{
|
||||
{bucketKeyDigest, []byte(desc.Digest)},
|
||||
{bucketKeyMediaType, []byte(desc.MediaType)},
|
||||
{bucketKeySize, sizeEncoded},
|
||||
} {
|
||||
if err := ibkt.Put(v[0], v[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *imageStore) List(ctx context.Context) ([]images.Image, error) {
|
||||
var m []images.Image
|
||||
func (s *imageStore) List(ctx context.Context, fs ...string) ([]images.Image, error) {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filter, err := filters.ParseAll(fs...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
bkt := getImagesBucket(s.tx, namespace)
|
||||
if bkt == nil {
|
||||
return nil, nil // empty store
|
||||
}
|
||||
|
||||
var m []images.Image
|
||||
if err := bkt.ForEach(func(k, v []byte) error {
|
||||
var (
|
||||
image = images.Image{
|
||||
@@ -108,7 +79,9 @@ func (s *imageStore) List(ctx context.Context) ([]images.Image, error) {
|
||||
return err
|
||||
}
|
||||
|
||||
m = append(m, image)
|
||||
if filter.Match(adaptImage(image)) {
|
||||
m = append(m, image)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
@@ -117,6 +90,90 @@ func (s *imageStore) List(ctx context.Context) ([]images.Image, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (s *imageStore) Create(ctx context.Context, image images.Image) (images.Image, error) {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return images.Image{}, err
|
||||
}
|
||||
|
||||
if image.Name == "" {
|
||||
return images.Image{}, errors.Wrapf(errdefs.ErrInvalidArgument, "image name is required for create")
|
||||
}
|
||||
|
||||
return image, withImagesBucket(s.tx, namespace, func(bkt *bolt.Bucket) error {
|
||||
ibkt, err := bkt.CreateBucket([]byte(image.Name))
|
||||
if err != nil {
|
||||
if err != bolt.ErrBucketExists {
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.Wrapf(errdefs.ErrAlreadyExists, "image %q", image.Name)
|
||||
}
|
||||
|
||||
image.CreatedAt = time.Now().UTC()
|
||||
image.UpdatedAt = image.CreatedAt
|
||||
return writeImage(ibkt, &image)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return images.Image{}, err
|
||||
}
|
||||
|
||||
if image.Name == "" {
|
||||
return images.Image{}, errors.Wrapf(errdefs.ErrInvalidArgument, "image name is required for update")
|
||||
}
|
||||
|
||||
var updated images.Image
|
||||
return updated, withImagesBucket(s.tx, namespace, func(bkt *bolt.Bucket) error {
|
||||
ibkt := bkt.Bucket([]byte(image.Name))
|
||||
if ibkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "image %q", image.Name)
|
||||
}
|
||||
|
||||
if err := readImage(&updated, ibkt); err != nil {
|
||||
return errors.Wrapf(err, "image %q", image.Name)
|
||||
}
|
||||
updated.Name = image.Name
|
||||
|
||||
if len(fieldpaths) > 0 {
|
||||
for _, path := range fieldpaths {
|
||||
if strings.HasPrefix(path, "labels.") {
|
||||
if updated.Labels == nil {
|
||||
updated.Labels = map[string]string{}
|
||||
}
|
||||
|
||||
key := strings.TrimPrefix(path, "labels.")
|
||||
updated.Labels[key] = image.Labels[key]
|
||||
continue
|
||||
}
|
||||
|
||||
switch path {
|
||||
case "labels":
|
||||
updated.Labels = image.Labels
|
||||
case "target":
|
||||
// NOTE(stevvooe): While we allow setting individual labels, we
|
||||
// only support replacing the target as a unit, since that is
|
||||
// commonly pulled as a unit from other sources. It often doesn't
|
||||
// make sense to modify the size or digest without touching the
|
||||
// mediatype, as well, for example.
|
||||
updated.Target = image.Target
|
||||
default:
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on image %q", path, image.Name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
updated = image
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Should only mark as updated if we have actual changes.
|
||||
updated.UpdatedAt = time.Now().UTC()
|
||||
return writeImage(ibkt, &updated)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *imageStore) Delete(ctx context.Context, name string) error {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
@@ -133,7 +190,23 @@ func (s *imageStore) Delete(ctx context.Context, name string) error {
|
||||
}
|
||||
|
||||
func readImage(image *images.Image, bkt *bolt.Bucket) error {
|
||||
return bkt.ForEach(func(k, v []byte) error {
|
||||
if err := readTimestamps(&image.CreatedAt, &image.UpdatedAt, bkt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lbkt := bkt.Bucket(bucketKeyLabels)
|
||||
if lbkt != nil {
|
||||
image.Labels = map[string]string{}
|
||||
if err := readLabels(image.Labels, lbkt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tbkt := bkt.Bucket(bucketKeyTarget)
|
||||
if tbkt == nil {
|
||||
return errors.New("unable to read target bucket")
|
||||
}
|
||||
return tbkt.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
return nil // skip it? a bkt maybe?
|
||||
}
|
||||
@@ -152,3 +225,43 @@ func readImage(image *images.Image, bkt *bolt.Bucket) error {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func writeImage(bkt *bolt.Bucket, image *images.Image) error {
|
||||
if err := writeTimestamps(bkt, image.CreatedAt, image.UpdatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(image.Labels) > 0 {
|
||||
if err := writeLabels(bkt, image.Labels); err != nil {
|
||||
return errors.Wrapf(err, "writing labels for image %v", image.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// write the target bucket
|
||||
tbkt, err := bkt.CreateBucketIfNotExists([]byte(bucketKeyTarget))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
buf [binary.MaxVarintLen64]byte
|
||||
sizeEncoded []byte = buf[:]
|
||||
)
|
||||
sizeEncoded = sizeEncoded[:binary.PutVarint(sizeEncoded, image.Target.Size)]
|
||||
|
||||
if len(sizeEncoded) == 0 {
|
||||
return fmt.Errorf("failed encoding size = %v", image.Target.Size)
|
||||
}
|
||||
|
||||
for _, v := range [][2][]byte{
|
||||
{bucketKeyDigest, []byte(image.Target.Digest)},
|
||||
{bucketKeyMediaType, []byte(image.Target.MediaType)},
|
||||
{bucketKeySize, sizeEncoded},
|
||||
} {
|
||||
if err := tbkt.Put(v[0], v[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user