
The split between provider and ingester was a long standing division reflecting the client-side use cases. For the most part, we were differentiating these for the algorithms that operate them, but it made instantation and use of the types challenging. On the server-side, this distinction is generally less important. This change unifies these types and in the process we get a few benefits. The first is that we now completely access the content store over GRPC. This was the initial intent and we have now satisfied this goal completely. There are a few issues around listing content and getting status, but we resolve these with simple streaming and regexp filters. More can probably be done to polish this but the result is clean. Several other content-oriented methods were polished in the process of unification. We have now properly seperated out the `Abort` method to cancel ongoing or stalled ingest processes. We have also replaced the `Active` method with a single status method. The transition went extremely smoothly. Once the clients were updated to use the new methods, every thing worked as expected on the first compile. Signed-off-by: Stephen J Day <stephen.day@docker.com>
402 lines
9.9 KiB
Go
402 lines
9.9 KiB
Go
package content
|
|
|
|
import (
|
|
"io"
|
|
"sync"
|
|
|
|
"github.com/Sirupsen/logrus"
|
|
api "github.com/containerd/containerd/api/services/content"
|
|
"github.com/containerd/containerd/content"
|
|
"github.com/containerd/containerd/log"
|
|
"github.com/containerd/containerd/plugin"
|
|
"github.com/golang/protobuf/ptypes/empty"
|
|
digest "github.com/opencontainers/go-digest"
|
|
"github.com/pkg/errors"
|
|
"golang.org/x/net/context"
|
|
"google.golang.org/grpc"
|
|
"google.golang.org/grpc/codes"
|
|
)
|
|
|
|
type Service struct {
|
|
store content.Store
|
|
}
|
|
|
|
var bufPool = sync.Pool{
|
|
New: func() interface{} {
|
|
return make([]byte, 1<<20)
|
|
},
|
|
}
|
|
|
|
var _ api.ContentServer = &Service{}
|
|
|
|
func init() {
|
|
plugin.Register("content-grpc", &plugin.Registration{
|
|
Type: plugin.GRPCPlugin,
|
|
Init: NewService,
|
|
})
|
|
}
|
|
|
|
func NewService(ic *plugin.InitContext) (interface{}, error) {
|
|
return &Service{
|
|
store: ic.Content,
|
|
}, nil
|
|
}
|
|
|
|
func (s *Service) Register(server *grpc.Server) error {
|
|
api.RegisterContentServer(server, s)
|
|
return nil
|
|
}
|
|
|
|
func (s *Service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) {
|
|
if err := req.Digest.Validate(); err != nil {
|
|
return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
|
|
}
|
|
|
|
bi, err := s.store.Info(ctx, req.Digest)
|
|
if err != nil {
|
|
return nil, serverErrorToGRPC(err, req.Digest.String())
|
|
}
|
|
|
|
return &api.InfoResponse{
|
|
Info: api.Info{
|
|
Digest: bi.Digest,
|
|
Size_: bi.Size,
|
|
CommittedAt: bi.CommittedAt,
|
|
},
|
|
}, nil
|
|
}
|
|
|
|
func (s *Service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
|
|
var (
|
|
buffer []api.Info
|
|
sendBlock = func(block []api.Info) error {
|
|
// send last block
|
|
return session.Send(&api.ListContentResponse{
|
|
Info: block,
|
|
})
|
|
}
|
|
)
|
|
|
|
if err := s.store.Walk(session.Context(), func(info content.Info) error {
|
|
buffer = append(buffer, api.Info{
|
|
Digest: info.Digest,
|
|
Size_: info.Size,
|
|
CommittedAt: info.CommittedAt,
|
|
})
|
|
|
|
if len(buffer) >= 100 {
|
|
if err := sendBlock(buffer); err != nil {
|
|
return err
|
|
}
|
|
|
|
buffer = buffer[:0]
|
|
}
|
|
|
|
return nil
|
|
}); err != nil {
|
|
return err
|
|
}
|
|
|
|
if len(buffer) > 0 {
|
|
// send last block
|
|
if err := sendBlock(buffer); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (s *Service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*empty.Empty, error) {
|
|
if err := req.Digest.Validate(); err != nil {
|
|
return nil, grpc.Errorf(codes.InvalidArgument, err.Error())
|
|
}
|
|
|
|
if err := s.store.Delete(ctx, req.Digest); err != nil {
|
|
return nil, serverErrorToGRPC(err, req.Digest.String())
|
|
}
|
|
|
|
return &empty.Empty{}, nil
|
|
}
|
|
|
|
func (s *Service) Read(req *api.ReadRequest, session api.Content_ReadServer) error {
|
|
if err := req.Digest.Validate(); err != nil {
|
|
return grpc.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
|
|
}
|
|
|
|
oi, err := s.store.Info(session.Context(), req.Digest)
|
|
if err != nil {
|
|
return serverErrorToGRPC(err, req.Digest.String())
|
|
}
|
|
|
|
rc, err := s.store.Reader(session.Context(), req.Digest)
|
|
if err != nil {
|
|
return serverErrorToGRPC(err, req.Digest.String())
|
|
}
|
|
defer rc.Close() // TODO(stevvooe): Cache these file descriptors for performance.
|
|
|
|
ra, ok := rc.(io.ReaderAt)
|
|
if !ok {
|
|
// TODO(stevvooe): Need to set this up to get correct behavior across
|
|
// board. May change interface to store to just return ReaderAtCloser.
|
|
// Possibly, we could just return io.ReaderAt and handle file
|
|
// descriptors internally.
|
|
return errors.New("content service only supports content stores that return ReaderAt")
|
|
}
|
|
|
|
var (
|
|
offset = req.Offset
|
|
size = req.Size_
|
|
|
|
// TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably
|
|
// little inefficient for work over a fast network. We can tune this later.
|
|
p = bufPool.Get().([]byte)
|
|
)
|
|
defer bufPool.Put(p)
|
|
|
|
if offset < 0 {
|
|
offset = 0
|
|
}
|
|
|
|
if size <= 0 {
|
|
size = oi.Size - offset
|
|
}
|
|
|
|
if offset+size > oi.Size {
|
|
return grpc.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size)
|
|
}
|
|
|
|
if _, err := io.CopyBuffer(
|
|
&readResponseWriter{session: session},
|
|
io.NewSectionReader(ra, offset, size), p); err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// readResponseWriter is a writer that places the output into ReadResponse messages.
|
|
//
|
|
// This allows io.CopyBuffer to do the heavy lifting of chunking the responses
|
|
// into the buffer size.
|
|
type readResponseWriter struct {
|
|
offset int64
|
|
session api.Content_ReadServer
|
|
}
|
|
|
|
func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
|
|
if err := rw.session.Send(&api.ReadResponse{
|
|
Offset: rw.offset,
|
|
Data: p,
|
|
}); err != nil {
|
|
return 0, err
|
|
}
|
|
|
|
rw.offset += int64(len(p))
|
|
return len(p), nil
|
|
}
|
|
|
|
func (s *Service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
|
|
statuses, err := s.store.Status(ctx, req.Regexp)
|
|
if err != nil {
|
|
return nil, serverErrorToGRPC(err, req.Regexp)
|
|
}
|
|
|
|
var resp api.StatusResponse
|
|
for _, status := range statuses {
|
|
resp.Statuses = append(resp.Statuses, api.Status{
|
|
StartedAt: status.StartedAt,
|
|
UpdatedAt: status.UpdatedAt,
|
|
Ref: status.Ref,
|
|
Offset: status.Offset,
|
|
Total: status.Total,
|
|
Expected: status.Expected,
|
|
})
|
|
}
|
|
|
|
return &resp, nil
|
|
}
|
|
|
|
func (s *Service) Write(session api.Content_WriteServer) (err error) {
|
|
var (
|
|
ctx = session.Context()
|
|
msg api.WriteResponse
|
|
req *api.WriteRequest
|
|
ref string
|
|
total int64
|
|
expected digest.Digest
|
|
)
|
|
|
|
defer func(msg *api.WriteResponse) {
|
|
// pump through the last message if no error was encountered
|
|
if err != nil {
|
|
// TODO(stevvooe): Really need a log line here to track which
|
|
// errors are actually causing failure on the server side. May want
|
|
// to configure the service with an interceptor to make this work
|
|
// identically across all GRPC methods.
|
|
//
|
|
// This is pretty noisy, so we can remove it but leave it for now.
|
|
log.G(ctx).WithError(err).Error("(*Service).Write failed")
|
|
|
|
return
|
|
}
|
|
|
|
err = session.Send(msg)
|
|
}(&msg)
|
|
|
|
// handle the very first request!
|
|
req, err = session.Recv()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
ref = req.Ref
|
|
|
|
if ref == "" {
|
|
return grpc.Errorf(codes.InvalidArgument, "first message must have a reference")
|
|
}
|
|
|
|
fields := logrus.Fields{
|
|
"ref": ref,
|
|
}
|
|
total = req.Total
|
|
expected = req.Expected
|
|
if total > 0 {
|
|
fields["total"] = total
|
|
}
|
|
|
|
if expected != "" {
|
|
fields["expected"] = expected
|
|
}
|
|
|
|
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields))
|
|
|
|
log.G(ctx).Debug("(*Service).Write started")
|
|
// this action locks the writer for the session.
|
|
wr, err := s.store.Writer(ctx, ref, total, expected)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer wr.Close()
|
|
|
|
for {
|
|
msg.Action = req.Action
|
|
ws, err := wr.Status()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
msg.Offset = ws.Offset // always set the offset.
|
|
|
|
// NOTE(stevvooe): In general, there are two cases underwhich a remote
|
|
// writer is used.
|
|
//
|
|
// For pull, we almost always have this before fetching large content,
|
|
// through descriptors. We allow predeclaration of the expected size
|
|
// and digest.
|
|
//
|
|
// For push, it is more complex. If we want to cut through content into
|
|
// storage, we may have no expectation until we are done processing the
|
|
// content. The case here is the following:
|
|
//
|
|
// 1. Start writing content.
|
|
// 2. Compress inline.
|
|
// 3. Validate digest and size (maybe).
|
|
//
|
|
// Supporting these two paths is quite awkward but it let's both API
|
|
// users use the same writer style for each with a minimum of overhead.
|
|
if req.Expected != "" {
|
|
if expected != "" && expected != req.Expected {
|
|
return grpc.Errorf(codes.InvalidArgument, "inconsistent digest provided: %v != %v", req.Expected, expected)
|
|
}
|
|
expected = req.Expected
|
|
|
|
if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
|
|
if err := s.store.Abort(session.Context(), ref); err != nil {
|
|
log.G(ctx).WithError(err).Error("failed to abort write")
|
|
}
|
|
|
|
return grpc.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected)
|
|
}
|
|
}
|
|
|
|
if req.Total > 0 {
|
|
// Update the expected total. Typically, this could be seen at
|
|
// negotiation time or on a commit message.
|
|
if total > 0 && req.Total != total {
|
|
return grpc.Errorf(codes.InvalidArgument, "inconsistent total provided: %v != %v", req.Total, total)
|
|
}
|
|
total = req.Total
|
|
}
|
|
|
|
switch req.Action {
|
|
case api.WriteActionStat:
|
|
msg.Digest = wr.Digest()
|
|
msg.StartedAt = ws.StartedAt
|
|
msg.UpdatedAt = ws.UpdatedAt
|
|
msg.Total = total
|
|
case api.WriteActionWrite, api.WriteActionCommit:
|
|
if req.Offset > 0 {
|
|
// validate the offset if provided
|
|
if req.Offset != ws.Offset {
|
|
return grpc.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset)
|
|
}
|
|
}
|
|
|
|
if req.Offset == 0 && ws.Offset > 0 {
|
|
if err := wr.Truncate(req.Offset); err != nil {
|
|
return errors.Wrapf(err, "truncate failed")
|
|
}
|
|
msg.Offset = req.Offset
|
|
}
|
|
|
|
// issue the write if we actually have data.
|
|
if len(req.Data) > 0 {
|
|
// While this looks like we could use io.WriterAt here, because we
|
|
// maintain the offset as append only, we just issue the write.
|
|
n, err := wr.Write(req.Data)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if n != len(req.Data) {
|
|
// TODO(stevvooe): Perhaps, we can recover this by including it
|
|
// in the offset on the write return.
|
|
return grpc.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data))
|
|
}
|
|
|
|
msg.Offset += int64(n)
|
|
}
|
|
|
|
if req.Action == api.WriteActionCommit {
|
|
if err := wr.Commit(total, expected); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
msg.Digest = wr.Digest()
|
|
}
|
|
|
|
if err := session.Send(&msg); err != nil {
|
|
return err
|
|
}
|
|
|
|
req, err = session.Recv()
|
|
if err != nil {
|
|
if err == io.EOF {
|
|
return nil
|
|
}
|
|
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
func (s *Service) Abort(ctx context.Context, req *api.AbortRequest) (*empty.Empty, error) {
|
|
if err := s.store.Abort(ctx, req.Ref); err != nil {
|
|
return nil, serverErrorToGRPC(err, req.Ref)
|
|
}
|
|
|
|
return &empty.Empty{}, nil
|
|
}
|