
The split between provider and ingester was a long standing division reflecting the client-side use cases. For the most part, we were differentiating these for the algorithms that operate them, but it made instantation and use of the types challenging. On the server-side, this distinction is generally less important. This change unifies these types and in the process we get a few benefits. The first is that we now completely access the content store over GRPC. This was the initial intent and we have now satisfied this goal completely. There are a few issues around listing content and getting status, but we resolve these with simple streaming and regexp filters. More can probably be done to polish this but the result is clean. Several other content-oriented methods were polished in the process of unification. We have now properly seperated out the `Abort` method to cancel ongoing or stalled ingest processes. We have also replaced the `Active` method with a single status method. The transition went extremely smoothly. Once the clients were updated to use the new methods, every thing worked as expected on the first compile. Signed-off-by: Stephen J Day <stephen.day@docker.com>
145 lines
3.5 KiB
Go
145 lines
3.5 KiB
Go
package content
|
|
|
|
import (
|
|
"os"
|
|
"path/filepath"
|
|
"time"
|
|
|
|
"github.com/containerd/containerd/log"
|
|
"github.com/nightlyone/lockfile"
|
|
"github.com/opencontainers/go-digest"
|
|
"github.com/pkg/errors"
|
|
)
|
|
|
|
// writer represents a write transaction against the blob store.
|
|
type writer struct {
|
|
s *store
|
|
fp *os.File // opened data file
|
|
lock lockfile.Lockfile
|
|
path string // path to writer dir
|
|
ref string // ref key
|
|
offset int64
|
|
total int64
|
|
digester digest.Digester
|
|
startedAt time.Time
|
|
updatedAt time.Time
|
|
}
|
|
|
|
func (w *writer) Status() (Status, error) {
|
|
return Status{
|
|
Ref: w.ref,
|
|
Offset: w.offset,
|
|
Total: w.total,
|
|
StartedAt: w.startedAt,
|
|
UpdatedAt: w.updatedAt,
|
|
}, nil
|
|
}
|
|
|
|
// Digest returns the current digest of the content, up to the current write.
|
|
//
|
|
// Cannot be called concurrently with `Write`.
|
|
func (w *writer) Digest() digest.Digest {
|
|
return w.digester.Digest()
|
|
}
|
|
|
|
// Write p to the transaction.
|
|
//
|
|
// Note that writes are unbuffered to the backing file. When writing, it is
|
|
// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer.
|
|
func (w *writer) Write(p []byte) (n int, err error) {
|
|
n, err = w.fp.Write(p)
|
|
w.digester.Hash().Write(p[:n])
|
|
w.offset += int64(len(p))
|
|
w.updatedAt = time.Now()
|
|
return n, err
|
|
}
|
|
|
|
func (w *writer) Commit(size int64, expected digest.Digest) error {
|
|
if err := w.fp.Sync(); err != nil {
|
|
return errors.Wrap(err, "sync failed")
|
|
}
|
|
|
|
fi, err := w.fp.Stat()
|
|
if err != nil {
|
|
return errors.Wrap(err, "stat on ingest file failed")
|
|
}
|
|
|
|
// change to readonly, more important for read, but provides _some_
|
|
// protection from this point on. We use the existing perms with a mask
|
|
// only allowing reads honoring the umask on creation.
|
|
//
|
|
// This removes write and exec, only allowing read per the creation umask.
|
|
if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil {
|
|
return errors.Wrap(err, "failed to change ingest file permissions")
|
|
}
|
|
|
|
if size > 0 && size != fi.Size() {
|
|
return errors.Errorf("%q failed size validation: %v != %v", w.ref, fi.Size(), size)
|
|
}
|
|
|
|
if err := w.fp.Close(); err != nil {
|
|
return errors.Wrap(err, "failed closing ingest")
|
|
}
|
|
|
|
dgst := w.digester.Digest()
|
|
if expected != "" && expected != dgst {
|
|
return errors.Errorf("unexpected digest: %v != %v", dgst, expected)
|
|
}
|
|
|
|
var (
|
|
ingest = filepath.Join(w.path, "data")
|
|
target = w.s.blobPath(dgst)
|
|
)
|
|
|
|
// make sure parent directories of blob exist
|
|
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
|
|
return err
|
|
}
|
|
|
|
// clean up!!
|
|
defer os.RemoveAll(w.path)
|
|
|
|
if err := os.Rename(ingest, target); err != nil {
|
|
if os.IsExist(err) {
|
|
// collision with the target file!
|
|
return ErrExists
|
|
}
|
|
return err
|
|
}
|
|
|
|
unlock(w.lock)
|
|
w.fp = nil
|
|
return nil
|
|
}
|
|
|
|
// Close the writer, flushing any unwritten data and leaving the progress in
|
|
// tact.
|
|
//
|
|
// If one needs to resume the transaction, a new writer can be obtained from
|
|
// `ContentStore.Resume` using the same key. The write can then be continued
|
|
// from it was left off.
|
|
//
|
|
// To abandon a transaction completely, first call close then `Store.Remove` to
|
|
// clean up the associated resources.
|
|
func (cw *writer) Close() (err error) {
|
|
if err := unlock(cw.lock); err != nil {
|
|
log.L.Debug("unlock failed: %v", err)
|
|
}
|
|
|
|
if cw.fp != nil {
|
|
cw.fp.Sync()
|
|
return cw.fp.Close()
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (w *writer) Truncate(size int64) error {
|
|
if size != 0 {
|
|
return errors.New("Truncate: unsupported size")
|
|
}
|
|
w.offset = 0
|
|
w.digester.Hash().Reset()
|
|
return w.fp.Truncate(0)
|
|
}
|