devmapper: don't create or reload thin-pool from snapshotter

Signed-off-by: Maksym Pavlenko <makpav@amazon.com>
This commit is contained in:
Maksym Pavlenko 2019-02-21 15:55:01 -08:00
parent 7efda48c53
commit adf5c640f4
No known key found for this signature in database
GPG Key ID: BDA48CBFE7A0FC14
5 changed files with 33 additions and 161 deletions

View File

@ -23,23 +23,11 @@ import (
"os"
"github.com/BurntSushi/toml"
"github.com/containerd/containerd/snapshots/devmapper/dmsetup"
"github.com/docker/go-units"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
)
const (
// See https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt for details
dataBlockMinSize = 128
dataBlockMaxSize = 2097152
)
var (
errInvalidBlockSize = errors.Errorf("block size should be between %d and %d", dataBlockMinSize, dataBlockMaxSize)
errInvalidBlockAlignment = errors.Errorf("block size should be multiple of %d sectors", dataBlockMinSize)
)
// Config represents device mapper configuration loaded from file.
// Size units can be specified in human-readable string format (like "32KIB", "32GB", "32Tb")
type Config struct {
@ -49,19 +37,6 @@ type Config struct {
// Name for 'thin-pool' device to be used by snapshotter (without /dev/mapper/ prefix)
PoolName string `toml:"pool_name"`
// Path to data volume to be used by thin-pool
DataDevice string `toml:"data_device"`
// Path to metadata volume to be used by thin-pool
MetadataDevice string `toml:"meta_device"`
// The size of allocation chunks in data file.
// Must be between 128 sectors (64KB) and 2097152 sectors (1GB) and a multiple of 128 sectors (64KB)
// Block size can't be changed after pool created.
// See https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt
DataBlockSize string `toml:"data_block_size"`
DataBlockSizeSectors uint32 `toml:"-"`
// Defines how much space to allocate when creating base image for container
BaseImageSize string `toml:"base_image_size"`
BaseImageSizeBytes uint64 `toml:"-"`
@ -94,23 +69,13 @@ func LoadConfig(path string) (*Config, error) {
}
func (c *Config) parse() error {
var result *multierror.Error
if c.DataBlockSize != "" {
if blockSize, err := units.RAMInBytes(c.DataBlockSize); err != nil {
result = multierror.Append(result, errors.Wrapf(err, "failed to parse data block size: %q", c.DataBlockSize))
} else {
c.DataBlockSizeSectors = uint32(blockSize / dmsetup.SectorSize)
}
baseImageSize, err := units.RAMInBytes(c.BaseImageSize)
if err != nil {
return errors.Wrapf(err, "failed to parse base image size: '%s'", c.BaseImageSize)
}
if baseImageSize, err := units.RAMInBytes(c.BaseImageSize); err != nil {
result = multierror.Append(result, errors.Wrapf(err, "failed to parse base image size: %q", c.BaseImageSize))
} else {
c.BaseImageSizeBytes = uint64(baseImageSize)
}
return result.ErrorOrNil()
return nil
}
// Validate makes sure configuration fields are valid
@ -129,32 +94,5 @@ func (c *Config) Validate() error {
result = multierror.Append(result, fmt.Errorf("base_image_size is required"))
}
// The following fields are required only if we want to create or reload pool.
// Otherwise existing pool with 'PoolName' (prepared in advance) can be used by snapshotter.
if c.DataDevice != "" || c.MetadataDevice != "" || c.DataBlockSize != "" || c.DataBlockSizeSectors != 0 {
strChecks := []struct {
field string
name string
}{
{c.DataDevice, "data_device"},
{c.MetadataDevice, "meta_device"},
{c.DataBlockSize, "data_block_size"},
}
for _, check := range strChecks {
if check.field == "" {
result = multierror.Append(result, errors.Errorf("%s is empty", check.name))
}
}
if c.DataBlockSizeSectors < dataBlockMinSize || c.DataBlockSizeSectors > dataBlockMaxSize {
result = multierror.Append(result, errInvalidBlockSize)
}
if c.DataBlockSizeSectors%dataBlockMinSize != 0 {
result = multierror.Append(result, errInvalidBlockAlignment)
}
}
return result.ErrorOrNil()
}

View File

@ -21,7 +21,6 @@ package devmapper
import (
"io/ioutil"
"os"
"strings"
"testing"
"github.com/BurntSushi/toml"
@ -34,9 +33,6 @@ func TestLoadConfig(t *testing.T) {
expected := Config{
RootPath: "/tmp",
PoolName: "test",
DataDevice: "/dev/loop0",
MetadataDevice: "/dev/loop1",
DataBlockSize: "1mb",
BaseImageSize: "128Mb",
}
@ -60,12 +56,8 @@ func TestLoadConfig(t *testing.T) {
assert.Equal(t, loaded.RootPath, expected.RootPath)
assert.Equal(t, loaded.PoolName, expected.PoolName)
assert.Equal(t, loaded.DataDevice, expected.DataDevice)
assert.Equal(t, loaded.MetadataDevice, expected.MetadataDevice)
assert.Equal(t, loaded.DataBlockSize, expected.DataBlockSize)
assert.Equal(t, loaded.BaseImageSize, expected.BaseImageSize)
assert.Assert(t, loaded.DataBlockSizeSectors == 1*1024*1024/512)
assert.Assert(t, loaded.BaseImageSizeBytes == 128*1024*1024)
}
@ -79,37 +71,24 @@ func TestLoadConfigInvalidPath(t *testing.T) {
func TestParseInvalidData(t *testing.T) {
config := Config{
DataBlockSize: "x",
BaseImageSize: "y",
}
err := config.parse()
assert.Assert(t, err != nil)
multErr := (err).(*multierror.Error)
assert.Assert(t, is.Len(multErr.Errors, 2))
assert.Assert(t, strings.Contains(multErr.Errors[0].Error(), "failed to parse data block size: \"x\""))
assert.Assert(t, strings.Contains(multErr.Errors[1].Error(), "failed to parse base image size: \"y\""))
assert.Error(t, err, "failed to parse base image size: 'y': invalid size: 'y'")
}
func TestFieldValidation(t *testing.T) {
config := &Config{DataBlockSizeSectors: 1}
config := &Config{}
err := config.Validate()
assert.Assert(t, err != nil)
multErr := (err).(*multierror.Error)
assert.Assert(t, is.Len(multErr.Errors, 8))
assert.Assert(t, is.Len(multErr.Errors, 3))
assert.Assert(t, multErr.Errors[0] != nil, "pool_name is empty")
assert.Assert(t, multErr.Errors[1] != nil, "root_path is empty")
assert.Assert(t, multErr.Errors[2] != nil, "base_image_size is empty")
assert.Assert(t, multErr.Errors[3] != nil, "data_device is empty")
assert.Assert(t, multErr.Errors[4] != nil, "meta_device is empty")
assert.Assert(t, multErr.Errors[5] != nil, "data_block_size is empty")
assert.Equal(t, multErr.Errors[6], errInvalidBlockSize)
assert.Equal(t, multErr.Errors[7], errInvalidBlockAlignment)
}
func TestExistingPoolFieldValidation(t *testing.T) {

View File

@ -20,7 +20,6 @@ package devmapper
import (
"context"
"os"
"path/filepath"
"github.com/containerd/containerd/log"
@ -54,60 +53,17 @@ func NewPoolDevice(ctx context.Context, config *Config) (*PoolDevice, error) {
return nil, err
}
if err := openPool(ctx, config); err != nil {
return nil, err
// Make sure pool exists and available
poolPath := dmsetup.GetFullDevicePath(config.PoolName)
if _, err := dmsetup.Info(poolPath); err != nil {
return nil, errors.Wrapf(err, "failed to query pool %q", poolPath)
}
return &PoolDevice{
poolName: config.PoolName,
metadata: poolMetaStore,
}, nil
}
func openPool(ctx context.Context, config *Config) error {
if err := config.Validate(); err != nil {
return err
}
var (
poolPath = dmsetup.GetFullDevicePath(config.PoolName)
poolExists = false
)
if _, err := os.Stat(poolPath); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "failed to stat for %q", poolPath)
} else if err == nil {
poolExists = true
}
// Create new pool if not exists
if !poolExists {
log.G(ctx).Debug("creating new pool device")
if err := dmsetup.CreatePool(config.PoolName, config.DataDevice, config.MetadataDevice, config.DataBlockSizeSectors); err != nil {
return errors.Wrapf(err, "failed to create thin-pool with name %q", config.PoolName)
}
return nil
}
// Pool exists, check if it needs to be reloaded
if config.DataDevice != "" && config.MetadataDevice != "" {
log.G(ctx).Debugf("reloading existing pool %q", poolPath)
if err := dmsetup.ReloadPool(config.PoolName, config.DataDevice, config.MetadataDevice, config.DataBlockSizeSectors); err != nil {
return errors.Wrapf(err, "failed to reload pool %q", config.PoolName)
}
return nil
}
// If data and meta devices are not provided, use existing pool. Query info to make sure it's OK.
if _, err := dmsetup.Info(poolPath); err != nil {
return errors.Wrapf(err, "failed to query info for existing pool %q", poolPath)
}
return nil
}
// transition invokes 'updateStateFn' callback to perform devmapper operation and reflects device state changes/errors in meta store.
// 'tryingState' will be set before invoking callback. If callback succeeded 'successState' will be set, otherwise
// error details will be recorded in meta store.

View File

@ -20,11 +20,13 @@ package devmapper
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/containerd/containerd/pkg/testutil"
"github.com/containerd/containerd/snapshots/devmapper/dmsetup"
@ -65,6 +67,10 @@ func TestPoolDevice(t *testing.T) {
_, loopDataDevice := createLoopbackDevice(t, tempDir)
_, loopMetaDevice := createLoopbackDevice(t, tempDir)
poolName := fmt.Sprintf("test-pool-device-%d", time.Now().Nanosecond())
err = dmsetup.CreatePool(poolName, loopDataDevice, loopMetaDevice, 64*1024/dmsetup.SectorSize)
assert.NilError(t, err, "failed to create pool %q", poolName)
defer func() {
// Detach loop devices and remove images
err := losetup.DetachLoopDevice(loopDataDevice, loopMetaDevice)
@ -75,12 +81,8 @@ func TestPoolDevice(t *testing.T) {
}()
config := &Config{
PoolName: "test-pool-device-1",
PoolName: poolName,
RootPath: tempDir,
DataDevice: loopDataDevice,
MetadataDevice: loopMetaDevice,
DataBlockSize: "65536",
DataBlockSizeSectors: 128,
BaseImageSize: "16mb",
BaseImageSizeBytes: 16 * 1024 * 1024,
}

View File

@ -30,6 +30,7 @@ import (
"github.com/containerd/containerd/snapshots/devmapper/dmsetup"
"github.com/containerd/containerd/snapshots/devmapper/losetup"
"github.com/containerd/containerd/snapshots/testsuite"
"github.com/hashicorp/go-multierror"
"github.com/sirupsen/logrus"
"gotest.tools/assert"
)
@ -59,22 +60,18 @@ func TestSnapshotterSuite(t *testing.T) {
return nil, nil, err
}
// Remove device mapper pool after test completes
// Remove device mapper pool and detach loop devices after test completes
removePool := func() error {
return snap.pool.RemovePool(ctx)
result := multierror.Append(
snap.pool.RemovePool(ctx),
losetup.DetachLoopDevice(loopDataDevice, loopMetaDevice))
return result.ErrorOrNil()
}
// Pool cleanup should be called before closing metadata store (as we need to retrieve device names)
snap.cleanupFn = append([]closeFunc{removePool}, snap.cleanupFn...)
return snap, func() error {
err := snap.Close()
assert.NilError(t, err, "failed to close snapshotter")
err = losetup.DetachLoopDevice(loopDataDevice, loopMetaDevice)
assert.NilError(t, err, "failed to detach loop devices")
return err
}, nil
return snap, snap.Close, nil
})
}