Optimize Windows and LCOW snapshotters to only create scratch layer on the final snapshot
For LCOW currently we copy (or create) the scratch.vhdx for every single snapshot so there ends up being a sandbox.vhdx in every directory seemingly unnecessarily. With the default scratch size of 20GB the size on disk is about 17MB so there's a 17MB overhead per layer plus the time to copy the file with every snapshot. Only the final sandbox.vhdx is actually used so this would be a nice little optimization. For WCOW we essentially do the exact same except copy the blank vhdx from the base layer. Signed-off-by: Daniel Canter <dcanter@microsoft.com>
This commit is contained in:
parent
ba8377590d
commit
a91c298d1d
@ -123,7 +123,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn
|
||||
)
|
||||
|
||||
for {
|
||||
key = fmt.Sprintf("extract-%s %s", uniquePart(), chainID)
|
||||
key = fmt.Sprintf(snapshots.UnpackKeyFormat, uniquePart(), chainID)
|
||||
|
||||
// Prepare snapshot with from parent, label as root
|
||||
mounts, err = sn.Prepare(ctx, key, parent.String(), opts...)
|
||||
|
@ -329,37 +329,44 @@ func (s *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k
|
||||
for _, o := range opts {
|
||||
o(&snapshotInfo)
|
||||
}
|
||||
|
||||
var sizeGB int
|
||||
if sizeGBstr, ok := snapshotInfo.Labels[rootfsSizeLabel]; ok {
|
||||
i32, err := strconv.ParseInt(sizeGBstr, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse annotation %q=%q", rootfsSizeLabel, sizeGBstr)
|
||||
}
|
||||
sizeGB = int(i32)
|
||||
}
|
||||
|
||||
scratchSource, err := s.openOrCreateScratch(ctx, sizeGB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer scratchSource.Close()
|
||||
|
||||
// TODO: JTERRY75 - This has to be called sandbox.vhdx for the time
|
||||
// being but it really is the scratch.vhdx. Using this naming convention
|
||||
// for now but this is not the kubernetes sandbox.
|
||||
// IO/disk space optimization
|
||||
//
|
||||
// Create the sandbox.vhdx for this snapshot from the cache.
|
||||
destPath := filepath.Join(snDir, "sandbox.vhdx")
|
||||
dest, err := os.OpenFile(destPath, os.O_RDWR|os.O_CREATE, 0700)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create sandbox.vhdx in snapshot")
|
||||
}
|
||||
defer dest.Close()
|
||||
if _, err := io.Copy(dest, scratchSource); err != nil {
|
||||
dest.Close()
|
||||
os.Remove(destPath)
|
||||
return nil, errors.Wrap(err, "failed to copy cached scratch.vhdx to sandbox.vhdx in snapshot")
|
||||
// We only need one sandbox.vhd for the container. Skip making one for this
|
||||
// snapshot if this isn't the snapshot that just houses the final sandbox.vhd
|
||||
// that will be mounted as the containers scratch. Currently the key for a snapshot
|
||||
// where a layer.vhd will be extracted to it will have the string `extract-` in it.
|
||||
// If this is changed this will also need to be changed.
|
||||
//
|
||||
// We save about 17MB per layer (if the default scratch vhd size of 20GB is used) and of
|
||||
// course the time to copy the vhd per snapshot.
|
||||
if !strings.Contains(key, snapshots.UnpackKeyPrefix) {
|
||||
var sizeGB int
|
||||
if sizeGBstr, ok := snapshotInfo.Labels[rootfsSizeLabel]; ok {
|
||||
i32, err := strconv.ParseInt(sizeGBstr, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse label %q=%q", rootfsSizeLabel, sizeGBstr)
|
||||
}
|
||||
sizeGB = int(i32)
|
||||
}
|
||||
|
||||
scratchSource, err := s.openOrCreateScratch(ctx, sizeGB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer scratchSource.Close()
|
||||
|
||||
// Create the sandbox.vhdx for this snapshot from the cache.
|
||||
destPath := filepath.Join(snDir, "sandbox.vhdx")
|
||||
dest, err := os.OpenFile(destPath, os.O_RDWR|os.O_CREATE, 0700)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create sandbox.vhdx in snapshot")
|
||||
}
|
||||
defer dest.Close()
|
||||
if _, err := io.Copy(dest, scratchSource); err != nil {
|
||||
dest.Close()
|
||||
os.Remove(destPath)
|
||||
return nil, errors.Wrap(err, "failed to copy cached scratch.vhdx to sandbox.vhdx in snapshot")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// UnpackKeyPrefix is the beginning of the key format used for snapshots that will have
|
||||
// image content unpacked into them.
|
||||
UnpackKeyPrefix = "extract"
|
||||
UnpackKeyFormat = UnpackKeyPrefix + "-%s %s"
|
||||
inheritedLabelsPrefix = "containerd.io/snapshot/"
|
||||
labelSnapshotRef = "containerd.io/snapshot.ref"
|
||||
)
|
||||
|
@ -326,35 +326,50 @@ func (s *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k
|
||||
}
|
||||
|
||||
if kind == snapshots.KindActive {
|
||||
parentLayerPaths := s.parentIDsToParentPaths(newSnapshot.ParentIDs)
|
||||
|
||||
var parentPath string
|
||||
if len(parentLayerPaths) != 0 {
|
||||
parentPath = parentLayerPaths[0]
|
||||
log.G(ctx).Debug("createSnapshot active")
|
||||
// Create the new snapshot dir
|
||||
snDir := s.getSnapshotDir(newSnapshot.ID)
|
||||
if err := os.MkdirAll(snDir, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := hcsshim.CreateSandboxLayer(s.info, newSnapshot.ID, parentPath, parentLayerPaths); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create sandbox layer")
|
||||
}
|
||||
// IO/disk space optimization
|
||||
//
|
||||
// We only need one sandbox.vhdx for the container. Skip making one for this
|
||||
// snapshot if this isn't the snapshot that just houses the final sandbox.vhd
|
||||
// that will be mounted as the containers scratch. Currently the key for a snapshot
|
||||
// where a layer will be extracted to will have the string `extract-` in it.
|
||||
if !strings.Contains(key, snapshots.UnpackKeyPrefix) {
|
||||
parentLayerPaths := s.parentIDsToParentPaths(newSnapshot.ParentIDs)
|
||||
|
||||
var snapshotInfo snapshots.Info
|
||||
for _, o := range opts {
|
||||
o(&snapshotInfo)
|
||||
}
|
||||
|
||||
var sizeGB int
|
||||
if sizeGBstr, ok := snapshotInfo.Labels[rootfsSizeLabel]; ok {
|
||||
i32, err := strconv.ParseInt(sizeGBstr, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse annotation %q=%q", rootfsSizeLabel, sizeGBstr)
|
||||
var parentPath string
|
||||
if len(parentLayerPaths) != 0 {
|
||||
parentPath = parentLayerPaths[0]
|
||||
}
|
||||
sizeGB = int(i32)
|
||||
}
|
||||
|
||||
if sizeGB > 0 {
|
||||
const gbToByte = 1024 * 1024 * 1024
|
||||
if err := hcsshim.ExpandSandboxSize(s.info, newSnapshot.ID, uint64(gbToByte*sizeGB)); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to expand scratch size to %d GB", sizeGB)
|
||||
if err := hcsshim.CreateSandboxLayer(s.info, newSnapshot.ID, parentPath, parentLayerPaths); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create sandbox layer")
|
||||
}
|
||||
|
||||
var snapshotInfo snapshots.Info
|
||||
for _, o := range opts {
|
||||
o(&snapshotInfo)
|
||||
}
|
||||
|
||||
var sizeGB int
|
||||
if sizeGBstr, ok := snapshotInfo.Labels[rootfsSizeLabel]; ok {
|
||||
i32, err := strconv.ParseInt(sizeGBstr, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse label %q=%q", rootfsSizeLabel, sizeGBstr)
|
||||
}
|
||||
sizeGB = int(i32)
|
||||
}
|
||||
|
||||
if sizeGB > 0 {
|
||||
const gbToByte = 1024 * 1024 * 1024
|
||||
if err := hcsshim.ExpandSandboxSize(s.info, newSnapshot.ID, uint64(gbToByte*sizeGB)); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to expand scratch size to %d GB", sizeGB)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ EachLayer:
|
||||
|
||||
for try := 1; try <= 3; try++ {
|
||||
// Prepare snapshot with from parent, label as root
|
||||
key = fmt.Sprintf("extract-%s %s", uniquePart(), chainID)
|
||||
key = fmt.Sprintf(snapshots.UnpackKeyFormat, uniquePart(), chainID)
|
||||
mounts, err = sn.Prepare(ctx, key, parent.String(), opts...)
|
||||
if err != nil {
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
|
Loading…
Reference in New Issue
Block a user