Merge pull request #261 from ijc/volume-copyup
Implement volume copy up.
This commit is contained in:
commit
a2dbc6ec1c
@ -19,6 +19,7 @@ package main
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/spf13/pflag"
|
||||
@ -30,6 +31,9 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
o := options.NewCRIContainerdOptions()
|
||||
o.AddFlags(pflag.CommandLine)
|
||||
if err := o.InitFlags(pflag.CommandLine); err != nil {
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
@ -56,14 +57,37 @@ func WithVolumes(volumeMounts map[string]string) containerd.NewContainerOpts {
|
||||
defer unix.Unmount(root, 0) // nolint: errcheck
|
||||
|
||||
for host, volume := range volumeMounts {
|
||||
if err := copyOwnership(filepath.Join(root, volume), host); err != nil {
|
||||
return err
|
||||
if err := copyExistingContents(filepath.Join(root, volume), host); err != nil {
|
||||
return errors.Wrap(err, "taking runtime copy of volume")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// copyExistingContents copies from the source to the destination and
|
||||
// ensures the ownership is appropriately set.
|
||||
func copyExistingContents(source, destination string) error {
|
||||
srcList, err := ioutil.ReadDir(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(srcList) > 0 {
|
||||
dstList, err := ioutil.ReadDir(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(dstList) != 0 {
|
||||
return errors.Errorf("volume at %q is not initially empty", destination)
|
||||
}
|
||||
|
||||
if err := chrootarchive.NewArchiver(nil).CopyWithTar(source, destination); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return copyOwnership(source, destination)
|
||||
}
|
||||
|
||||
// copyOwnership copies the permissions and uid:gid of the src file
|
||||
// to the dst file
|
||||
func copyOwnership(src, dst string) error {
|
||||
|
1
vendor/github.com/docker/docker/pkg/archive/README.md
generated
vendored
Normal file
1
vendor/github.com/docker/docker/pkg/archive/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
This code provides helper functions for dealing with archive files.
|
1219
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
Normal file
1219
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
92
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
Normal file
92
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||
if format == OverlayWhiteoutFormat {
|
||||
return overlayWhiteoutConverter{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type overlayWhiteoutConverter struct{}
|
||||
|
||||
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
|
||||
// convert whiteouts to AUFS format
|
||||
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
|
||||
// we just rename the file and make it normal
|
||||
dir, filename := filepath.Split(hdr.Name)
|
||||
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
|
||||
hdr.Mode = 0600
|
||||
hdr.Typeflag = tar.TypeReg
|
||||
hdr.Size = 0
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeDir != 0 {
|
||||
// convert opaque dirs to AUFS format by writing an empty file with the prefix
|
||||
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||
if hdr.Xattrs != nil {
|
||||
delete(hdr.Xattrs, "trusted.overlay.opaque")
|
||||
}
|
||||
|
||||
// create a header for the whiteout file
|
||||
// it should inherit some properties from the parent, but be a regular file
|
||||
wo = &tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: hdr.Mode & int64(os.ModePerm),
|
||||
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
|
||||
Size: 0,
|
||||
Uid: hdr.Uid,
|
||||
Uname: hdr.Uname,
|
||||
Gid: hdr.Gid,
|
||||
Gname: hdr.Gname,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
|
||||
base := filepath.Base(path)
|
||||
dir := filepath.Dir(path)
|
||||
|
||||
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
|
||||
if base == WhiteoutOpaqueDir {
|
||||
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
|
||||
// don't write the file itself
|
||||
return false, err
|
||||
}
|
||||
|
||||
// if a file was deleted and we are using overlay, we need to create a character device
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
|
||||
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// don't write the file itself
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
7
vendor/github.com/docker/docker/pkg/archive/archive_other.go
generated
vendored
Normal file
7
vendor/github.com/docker/docker/pkg/archive/archive_other.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build !linux
|
||||
|
||||
package archive
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||
return nil
|
||||
}
|
122
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
Normal file
122
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
// +build !windows
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return srcPath
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a separate function as this is platform specific. On Linux, we
|
||||
// can't use filepath.Join(srcPath,include) because this will clean away
|
||||
// a trailing "." or "/" which may be important.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return srcPath + string(filepath.Separator) + include
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
return p, nil // already unix-style
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
// Currently go does not fill in the major/minors
|
||||
if s.Mode&unix.S_IFBLK != 0 ||
|
||||
s.Mode&unix.S_IFCHR != 0 {
|
||||
hdr.Devmajor = int64(major(uint64(s.Rdev)))
|
||||
hdr.Devminor = int64(minor(uint64(s.Rdev)))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
inode = uint64(s.Ino)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if !ok {
|
||||
return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||
}
|
||||
return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
||||
}
|
||||
|
||||
func major(device uint64) uint64 {
|
||||
return (device >> 8) & 0xfff
|
||||
}
|
||||
|
||||
func minor(device uint64) uint64 {
|
||||
return (device & 0xff) | ((device >> 12) & 0xfff00)
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
if rsystem.RunningInUserNS() {
|
||||
// cannot create a device if running in user namespace
|
||||
return nil
|
||||
}
|
||||
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= unix.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= unix.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= unix.S_IFIFO
|
||||
}
|
||||
|
||||
return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
79
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
Normal file
79
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
// +build windows
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return longpath.AddPrefix(srcPath)
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a separate function as this is platform specific.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return filepath.Join(srcPath, include)
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
// windows: convert windows style relative path with backslashes
|
||||
// into forward slashes. Since windows does not allow '/' or '\'
|
||||
// in file names, it is mostly safe to replace however we must
|
||||
// check just in case
|
||||
if strings.Contains(p, "/") {
|
||||
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
|
||||
}
|
||||
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
|
||||
permPart := perm & os.ModePerm
|
||||
noPermPart := perm &^ os.ModePerm
|
||||
// Add the x bit: make everything +x from windows
|
||||
permPart |= 0111
|
||||
permPart &= 0755
|
||||
|
||||
return noPermPart | permPart
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
// do nothing. no notion of Rdev, Nlink in stat on Windows
|
||||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
// do nothing. no notion of Inode in stat on Windows
|
||||
return
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
// no notion of file ownership mapping yet on Windows
|
||||
return idtools.IDPair{0, 0}, nil
|
||||
}
|
441
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
Normal file
441
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
Normal file
@ -0,0 +1,441 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChangeType represents the change type.
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
// ChangeModify represents the modify operation.
|
||||
ChangeModify = iota
|
||||
// ChangeAdd represents the add operation.
|
||||
ChangeAdd
|
||||
// ChangeDelete represents the delete operation.
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
func (c ChangeType) String() string {
|
||||
switch c {
|
||||
case ChangeModify:
|
||||
return "C"
|
||||
case ChangeAdd:
|
||||
return "A"
|
||||
case ChangeDelete:
|
||||
return "D"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Change represents a change, it wraps the change type and path.
|
||||
// It describes changes of the files in the path respect to the
|
||||
// parent layers. The change could be modify, add, delete.
|
||||
// This is used for layer diff.
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
}
|
||||
|
||||
func (change *Change) String() string {
|
||||
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
||||
}
|
||||
|
||||
// for sort.Sort
|
||||
type changesByPath []Change
|
||||
|
||||
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
||||
func (c changesByPath) Len() int { return len(c) }
|
||||
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||
|
||||
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||
// precision, which is problematic when we apply changes via tar
|
||||
// files, we handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a == b ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
||||
|
||||
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||
return a.Sec == b.Sec &&
|
||||
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||
}
|
||||
|
||||
// Changes walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
|
||||
}
|
||||
|
||||
func aufsMetadataSkip(path string) (skip bool, err error) {
|
||||
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
|
||||
if err != nil {
|
||||
skip = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||
f := filepath.Base(path)
|
||||
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(f, WhiteoutPrefix) {
|
||||
originalFile := f[len(WhiteoutPrefix):]
|
||||
return filepath.Join(filepath.Dir(path), originalFile), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
type skipChange func(string) (bool, error)
|
||||
type deleteChange func(string, string, os.FileInfo) (string, error)
|
||||
|
||||
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
|
||||
var (
|
||||
changes []Change
|
||||
changedDirs = make(map[string]struct{})
|
||||
)
|
||||
|
||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(rw, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if sc != nil {
|
||||
if skip, err := sc(path); skip {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
change := Change{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
deletedFile, err := dc(rw, path, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
if deletedFile != "" {
|
||||
change.Path = deletedFile
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
change.Kind = ChangeAdd
|
||||
|
||||
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||
for _, layer := range layers {
|
||||
stat, err := os.Stat(filepath.Join(layer, path))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
// The file existed in the top layer, so that's a modification
|
||||
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
}
|
||||
change.Kind = ChangeModify
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||
// This block is here to ensure the change is recorded even if the
|
||||
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
||||
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||
if f.IsDir() {
|
||||
changedDirs[path] = struct{}{}
|
||||
}
|
||||
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
|
||||
parent := filepath.Dir(path)
|
||||
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
|
||||
changedDirs[parent] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Record change
|
||||
changes = append(changes, change)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// FileInfo describes the information of a file.
|
||||
type FileInfo struct {
|
||||
parent *FileInfo
|
||||
name string
|
||||
stat *system.StatT
|
||||
children map[string]*FileInfo
|
||||
capability []byte
|
||||
added bool
|
||||
}
|
||||
|
||||
// LookUp looks up the file information of a file.
|
||||
func (info *FileInfo) LookUp(path string) *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
parent := info
|
||||
if path == string(os.PathSeparator) {
|
||||
return info
|
||||
}
|
||||
|
||||
pathElements := strings.Split(path, string(os.PathSeparator))
|
||||
for _, elem := range pathElements {
|
||||
if elem != "" {
|
||||
child := parent.children[elem]
|
||||
if child == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child
|
||||
}
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func (info *FileInfo) path() string {
|
||||
if info.parent == nil {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
return string(os.PathSeparator)
|
||||
}
|
||||
return filepath.Join(info.parent.path(), info.name)
|
||||
}
|
||||
|
||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
|
||||
sizeAtEntry := len(*changes)
|
||||
|
||||
if oldInfo == nil {
|
||||
// add
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeAdd,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
info.added = true
|
||||
}
|
||||
|
||||
// We make a copy so we can modify it to detect additions
|
||||
// also, we only recurse on the old dir if the new info is a directory
|
||||
// otherwise any previous delete/change is considered recursive
|
||||
oldChildren := make(map[string]*FileInfo)
|
||||
if oldInfo != nil && info.isDir() {
|
||||
for k, v := range oldInfo.children {
|
||||
oldChildren[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for name, newChild := range info.children {
|
||||
oldChild := oldChildren[name]
|
||||
if oldChild != nil {
|
||||
// change?
|
||||
oldStat := oldChild.stat
|
||||
newStat := newChild.stat
|
||||
// Note: We can't compare inode or ctime or blocksize here, because these change
|
||||
// when copying a file into a container. However, that is not generally a problem
|
||||
// because any content change will change mtime, and any status change should
|
||||
// be visible when actually comparing the stat fields. The only time this
|
||||
// breaks down is if some code intentionally hides a change by setting
|
||||
// back mtime
|
||||
if statDifferent(oldStat, newStat) ||
|
||||
!bytes.Equal(oldChild.capability, newChild.capability) {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
newChild.added = true
|
||||
}
|
||||
|
||||
// Remove from copy so we can detect deletions
|
||||
delete(oldChildren, name)
|
||||
}
|
||||
|
||||
newChild.addChanges(oldChild, changes)
|
||||
}
|
||||
for _, oldChild := range oldChildren {
|
||||
// delete
|
||||
change := Change{
|
||||
Path: oldChild.path(),
|
||||
Kind: ChangeDelete,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
}
|
||||
|
||||
// If there were changes inside this directory, we need to add it, even if the directory
|
||||
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
// Let's insert the directory entry before the recently added entries located inside this dir
|
||||
*changes = append(*changes, change) // just to resize the slice, will be overwritten
|
||||
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||
(*changes)[sizeAtEntry] = change
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Changes add changes to file information.
|
||||
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||
var changes []Change
|
||||
|
||||
info.addChanges(oldInfo, &changes)
|
||||
|
||||
return changes
|
||||
}
|
||||
|
||||
func newRootFileInfo() *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
root := &FileInfo{
|
||||
name: string(os.PathSeparator),
|
||||
children: make(map[string]*FileInfo),
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
||||
// If oldDir is "", then all files in newDir will be Add-Changes.
|
||||
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
)
|
||||
if oldDir == "" {
|
||||
emptyDir, err := ioutil.TempDir("", "empty")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.Remove(emptyDir)
|
||||
oldDir = emptyDir
|
||||
}
|
||||
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newRoot.Changes(oldRoot), nil
|
||||
}
|
||||
|
||||
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
|
||||
func ChangesSize(newDir string, changes []Change) int64 {
|
||||
var (
|
||||
size int64
|
||||
sf = make(map[uint64]struct{})
|
||||
)
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||
file := filepath.Join(newDir, change.Path)
|
||||
fileInfo, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
logrus.Errorf("Can not stat %q: %s", file, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if fileInfo != nil && !fileInfo.IsDir() {
|
||||
if hasHardlinks(fileInfo) {
|
||||
inode := getIno(fileInfo)
|
||||
if _, ok := sf[inode]; !ok {
|
||||
size += fileInfo.Size()
|
||||
sf[inode] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
size += fileInfo.Size()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
||||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer)
|
||||
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
|
||||
sort.Sort(changesByPath(changes))
|
||||
|
||||
// In general we log errors here but ignore them because
|
||||
// during e.g. a diff operation the container can continue
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeDelete {
|
||||
whiteOutDir := filepath.Dir(change.Path)
|
||||
whiteOutBase := filepath.Base(change.Path)
|
||||
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
|
||||
timestamp := time.Now()
|
||||
hdr := &tar.Header{
|
||||
Name: whiteOut[1:],
|
||||
Size: 0,
|
||||
ModTime: timestamp,
|
||||
AccessTime: timestamp,
|
||||
ChangeTime: timestamp,
|
||||
}
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
logrus.Debugf("Can't write whiteout header: %s", err)
|
||||
}
|
||||
} else {
|
||||
path := filepath.Join(dir, change.Path)
|
||||
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
||||
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
if err := ta.TarWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close layer: %s", err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
logrus.Debugf("failed close Changes writer: %s", err)
|
||||
}
|
||||
}()
|
||||
return reader, nil
|
||||
}
|
313
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
Normal file
313
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
Normal file
@ -0,0 +1,313 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
||||
// method in general returns the entire contents of two directory trees, we
|
||||
// optimize some FS calls out on linux. In particular, we take advantage of the
|
||||
// fact that getdents(2) returns the inode of each file in the directory being
|
||||
// walked, which, when walking two trees in parallel to generate a list of
|
||||
// changes, can be used to prune subtrees without ever having to lstat(2) them
|
||||
// directly. Eliminating stat calls in this way can save up to seconds on large
|
||||
// images.
|
||||
type walker struct {
|
||||
dir1 string
|
||||
dir2 string
|
||||
root1 *FileInfo
|
||||
root2 *FileInfo
|
||||
}
|
||||
|
||||
// collectFileInfoForChanges returns a complete representation of the trees
|
||||
// rooted at dir1 and dir2, with one important exception: any subtree or
|
||||
// leaf where the inode and device numbers are an exact match between dir1
|
||||
// and dir2 will be pruned from the results. This method is *only* to be used
|
||||
// to generating a list of changes between the two directories, as it does not
|
||||
// reflect the full contents.
|
||||
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
|
||||
w := &walker{
|
||||
dir1: dir1,
|
||||
dir2: dir2,
|
||||
root1: newRootFileInfo(),
|
||||
root2: newRootFileInfo(),
|
||||
}
|
||||
|
||||
i1, err := os.Lstat(w.dir1)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
i2, err := os.Lstat(w.dir2)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := w.walk("/", i1, i2); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return w.root1, w.root2, nil
|
||||
}
|
||||
|
||||
// Given a FileInfo, its path info, and a reference to the root of the tree
|
||||
// being constructed, register this file with the tree.
|
||||
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||
if fi == nil {
|
||||
return nil
|
||||
}
|
||||
parent := root.LookUp(filepath.Dir(path))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
|
||||
}
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(path),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
cpath := filepath.Join(dir, path)
|
||||
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = stat
|
||||
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||
parent.children[info.name] = info
|
||||
return nil
|
||||
}
|
||||
|
||||
// Walk a subtree rooted at the same path in both trees being iterated. For
|
||||
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
|
||||
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
||||
// Register these nodes with the return trees, unless we're still at the
|
||||
// (already-created) roots:
|
||||
if path != "/" {
|
||||
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
is1Dir := i1 != nil && i1.IsDir()
|
||||
is2Dir := i2 != nil && i2.IsDir()
|
||||
|
||||
sameDevice := false
|
||||
if i1 != nil && i2 != nil {
|
||||
si1 := i1.Sys().(*syscall.Stat_t)
|
||||
si2 := i2.Sys().(*syscall.Stat_t)
|
||||
if si1.Dev == si2.Dev {
|
||||
sameDevice = true
|
||||
}
|
||||
}
|
||||
|
||||
// If these files are both non-existent, or leaves (non-dirs), we are done.
|
||||
if !is1Dir && !is2Dir {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch the names of all the files contained in both directories being walked:
|
||||
var names1, names2 []nameIno
|
||||
if is1Dir {
|
||||
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We have lists of the files contained in both parallel directories, sorted
|
||||
// in the same order. Walk them in parallel, generating a unique merged list
|
||||
// of all items present in either or both directories.
|
||||
var names []string
|
||||
ix1 := 0
|
||||
ix2 := 0
|
||||
|
||||
for {
|
||||
if ix1 >= len(names1) {
|
||||
break
|
||||
}
|
||||
if ix2 >= len(names2) {
|
||||
break
|
||||
}
|
||||
|
||||
ni1 := names1[ix1]
|
||||
ni2 := names2[ix2]
|
||||
|
||||
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
|
||||
case -1: // ni1 < ni2 -- advance ni1
|
||||
// we will not encounter ni1 in names2
|
||||
names = append(names, ni1.name)
|
||||
ix1++
|
||||
case 0: // ni1 == ni2
|
||||
if ni1.ino != ni2.ino || !sameDevice {
|
||||
names = append(names, ni1.name)
|
||||
}
|
||||
ix1++
|
||||
ix2++
|
||||
case 1: // ni1 > ni2 -- advance ni2
|
||||
// we will not encounter ni2 in names1
|
||||
names = append(names, ni2.name)
|
||||
ix2++
|
||||
}
|
||||
}
|
||||
for ix1 < len(names1) {
|
||||
names = append(names, names1[ix1].name)
|
||||
ix1++
|
||||
}
|
||||
for ix2 < len(names2) {
|
||||
names = append(names, names2[ix2].name)
|
||||
ix2++
|
||||
}
|
||||
|
||||
// For each of the names present in either or both of the directories being
|
||||
// iterated, stat the name under each root, and recurse the pair of them:
|
||||
for _, name := range names {
|
||||
fname := filepath.Join(path, name)
|
||||
var cInfo1, cInfo2 os.FileInfo
|
||||
if is1Dir {
|
||||
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// {name,inode} pairs used to support the early-pruning logic of the walker type
|
||||
type nameIno struct {
|
||||
name string
|
||||
ino uint64
|
||||
}
|
||||
|
||||
type nameInoSlice []nameIno
|
||||
|
||||
func (s nameInoSlice) Len() int { return len(s) }
|
||||
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
|
||||
|
||||
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
|
||||
// numbers further up the stack when reading directory contents. Unlike
|
||||
// os.Readdirnames, which returns a list of filenames, this function returns a
|
||||
// list of {filename,inode} pairs.
|
||||
func readdirnames(dirname string) (names []nameIno, err error) {
|
||||
var (
|
||||
size = 100
|
||||
buf = make([]byte, 4096)
|
||||
nbuf int
|
||||
bufp int
|
||||
nb int
|
||||
)
|
||||
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
names = make([]nameIno, 0, size) // Empty with room to grow.
|
||||
for {
|
||||
// Refill the buffer if necessary
|
||||
if bufp >= nbuf {
|
||||
bufp = 0
|
||||
nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
||||
if nbuf < 0 {
|
||||
nbuf = 0
|
||||
}
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("readdirent", err)
|
||||
}
|
||||
if nbuf <= 0 {
|
||||
break // EOF
|
||||
}
|
||||
}
|
||||
|
||||
// Drain the buffer
|
||||
nb, names = parseDirent(buf[bufp:nbuf], names)
|
||||
bufp += nb
|
||||
}
|
||||
|
||||
sl := nameInoSlice(names)
|
||||
sort.Sort(sl)
|
||||
return sl, nil
|
||||
}
|
||||
|
||||
// parseDirent is a minor modification of unix.ParseDirent (linux version)
|
||||
// which returns {name,inode} pairs instead of just names.
|
||||
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
||||
origlen := len(buf)
|
||||
for len(buf) > 0 {
|
||||
dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
|
||||
buf = buf[dirent.Reclen:]
|
||||
if dirent.Ino == 0 { // File absent in directory.
|
||||
continue
|
||||
}
|
||||
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||
var name = string(bytes[0:clen(bytes[:])])
|
||||
if name == "." || name == ".." { // Useless names
|
||||
continue
|
||||
}
|
||||
names = append(names, nameIno{name, dirent.Ino})
|
||||
}
|
||||
return origlen - len(buf), names
|
||||
}
|
||||
|
||||
func clen(n []byte) int {
|
||||
for i := 0; i < len(n); i++ {
|
||||
if n[i] == 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(n)
|
||||
}
|
||||
|
||||
// OverlayChanges walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func OverlayChanges(layers []string, rw string) ([]Change, error) {
|
||||
return changes(layers, rw, overlayDeletedFile, nil)
|
||||
}
|
||||
|
||||
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||
if fi.Mode()&os.ModeCharDevice != 0 {
|
||||
s := fi.Sys().(*syscall.Stat_t)
|
||||
if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
if fi.Mode()&os.ModeDir != 0 {
|
||||
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
|
||||
}
|
97
vendor/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
Normal file
97
vendor/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
||||
// +build !linux
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
err1, err2 error
|
||||
errs = make(chan error, 2)
|
||||
)
|
||||
go func() {
|
||||
oldRoot, err1 = collectFileInfo(oldDir)
|
||||
errs <- err1
|
||||
}()
|
||||
go func() {
|
||||
newRoot, err2 = collectFileInfo(newDir)
|
||||
errs <- err2
|
||||
}()
|
||||
|
||||
// block until both routines have returned
|
||||
for i := 0; i < 2; i++ {
|
||||
if err := <-errs; err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return oldRoot, newRoot, nil
|
||||
}
|
||||
|
||||
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||
root := newRootFileInfo()
|
||||
|
||||
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
relPath, err := filepath.Rel(sourceDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
relPath = filepath.Join(string(os.PathSeparator), relPath)
|
||||
|
||||
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
|
||||
// Temporary workaround. If the returned path starts with two backslashes,
|
||||
// trim it down to a single backslash. Only relevant on Windows.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(relPath, `\\`) {
|
||||
relPath = relPath[1:]
|
||||
}
|
||||
}
|
||||
|
||||
if relPath == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent := root.LookUp(filepath.Dir(relPath))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||
}
|
||||
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(relPath),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
|
||||
s, err := system.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = s
|
||||
|
||||
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||
|
||||
parent.children[info.name] = info
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return root, nil
|
||||
}
|
37
vendor/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
Normal file
37
vendor/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// +build !windows
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.UID() != newStat.UID() ||
|
||||
oldStat.GID() != newStat.GID() ||
|
||||
oldStat.Rdev() != newStat.Rdev() ||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
|
||||
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) uint64 {
|
||||
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return fi.Sys().(*syscall.Stat_t).Nlink > 1
|
||||
}
|
30
vendor/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
Normal file
30
vendor/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.Mtim() != newStat.Mtim() ||
|
||||
oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode().IsDir()
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) (inode uint64) {
|
||||
return
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return false
|
||||
}
|
461
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
Normal file
461
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
Normal file
@ -0,0 +1,461 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Errors used or returned by this file.
|
||||
var (
|
||||
ErrNotDirectory = errors.New("not a directory")
|
||||
ErrDirNotExists = errors.New("no such directory")
|
||||
ErrCannotCopyDir = errors.New("cannot copy directory")
|
||||
ErrInvalidCopySource = errors.New("invalid copy source content")
|
||||
)
|
||||
|
||||
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
|
||||
// processing using any utility functions from the path or filepath stdlib
|
||||
// packages) and appends a trailing `/.` or `/` if its corresponding original
|
||||
// path (from before being processed by utility functions from the path or
|
||||
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
||||
// path already ends in a `.` path segment, then another is not added. If the
|
||||
// clean path already ends in a path separator, then another is not added.
|
||||
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
|
||||
// Ensure paths are in platform semantics
|
||||
cleanedPath = normalizePath(cleanedPath)
|
||||
originalPath = normalizePath(originalPath)
|
||||
|
||||
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
||||
if !hasTrailingPathSeparator(cleanedPath) {
|
||||
// Add a separator if it doesn't already end with one (a cleaned
|
||||
// path would only end in a separator if it is the root).
|
||||
cleanedPath += string(filepath.Separator)
|
||||
}
|
||||
cleanedPath += "."
|
||||
}
|
||||
|
||||
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
|
||||
cleanedPath += string(filepath.Separator)
|
||||
}
|
||||
|
||||
return cleanedPath
|
||||
}
|
||||
|
||||
// assertsDirectory returns whether the given path is
|
||||
// asserted to be a directory, i.e., the path ends with
|
||||
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||
func assertsDirectory(path string) bool {
|
||||
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
|
||||
}
|
||||
|
||||
// hasTrailingPathSeparator returns whether the given
|
||||
// path ends with the system's path separator character.
|
||||
func hasTrailingPathSeparator(path string) bool {
|
||||
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
|
||||
}
|
||||
|
||||
// specifiesCurrentDir returns whether the given path specifies
|
||||
// a "current directory", i.e., the last path segment is `.`.
|
||||
func specifiesCurrentDir(path string) bool {
|
||||
return filepath.Base(path) == "."
|
||||
}
|
||||
|
||||
// SplitPathDirEntry splits the given path between its directory name and its
|
||||
// basename by first cleaning the path but preserves a trailing "." if the
|
||||
// original path specified the current directory.
|
||||
func SplitPathDirEntry(path string) (dir, base string) {
|
||||
cleanedPath := filepath.Clean(normalizePath(path))
|
||||
|
||||
if specifiesCurrentDir(path) {
|
||||
cleanedPath += string(filepath.Separator) + "."
|
||||
}
|
||||
|
||||
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||
}
|
||||
|
||||
// TarResource archives the resource described by the given CopyInfo to a Tar
|
||||
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||
// asserted to be a directory but exists as another type of file.
|
||||
//
|
||||
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||
// requires a directory as the source path. TarResource accepts either a
|
||||
// directory or a file path and correctly sets the Tar options.
|
||||
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
|
||||
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||
}
|
||||
|
||||
// TarResourceRebase is like TarResource but renames the first path element of
|
||||
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
|
||||
sourcePath = normalizePath(sourcePath)
|
||||
if _, err = os.Lstat(sourcePath); err != nil {
|
||||
// Catches the case where the source does not exist or is not a
|
||||
// directory if asserted to be a directory, as this also causes an
|
||||
// error.
|
||||
return
|
||||
}
|
||||
|
||||
// Separate the source path between its directory and
|
||||
// the entry in that directory which we are archiving.
|
||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||
|
||||
filter := []string{sourceBase}
|
||||
|
||||
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||
|
||||
return TarWithOptions(sourceDir, &TarOptions{
|
||||
Compression: Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
IncludeSourceDir: true,
|
||||
RebaseNames: map[string]string{
|
||||
sourceBase: rebaseName,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// CopyInfo holds basic info about the source
|
||||
// or destination path of a copy operation.
|
||||
type CopyInfo struct {
|
||||
Path string
|
||||
Exists bool
|
||||
IsDir bool
|
||||
RebaseName string
|
||||
}
|
||||
|
||||
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the source of an archive copy
|
||||
// operation. The given path should be an absolute local path. A source path
|
||||
// has all symlinks evaluated that appear before the last path separator ("/"
|
||||
// on Unix). As it is to be a copy source, the path must exist.
|
||||
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
|
||||
// normalize the file path and then evaluate the symbol link
|
||||
// we will use the target file instead of the symbol link if
|
||||
// followLink is set
|
||||
path = normalizePath(path)
|
||||
|
||||
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
return CopyInfo{
|
||||
Path: resolvedPath,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
RebaseName: rebaseName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the destination of an archive copy
|
||||
// operation. The given path should be an absolute local path.
|
||||
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
||||
path = normalizePath(path)
|
||||
originalPath := path
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
|
||||
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
||||
// The path exists and is not a symlink.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// While the path is a symlink.
|
||||
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
||||
if n > maxSymlinkIter {
|
||||
// Don't follow symlinks more than this arbitrary number of times.
|
||||
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
||||
}
|
||||
|
||||
// The path is a symbolic link. We need to evaluate it so that the
|
||||
// destination of the copy operation is the link target and not the
|
||||
// link itself. This is notably different than CopyInfoSourcePath which
|
||||
// only evaluates symlinks before the last appearing path separator.
|
||||
// Also note that it is okay if the last path element is a broken
|
||||
// symlink as the copy operation should create the target.
|
||||
var linkTarget string
|
||||
|
||||
linkTarget, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
if !system.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
}
|
||||
|
||||
path = linkTarget
|
||||
stat, err = os.Lstat(path)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// It's okay if the destination path doesn't exist. We can still
|
||||
// continue the copy operation if the parent directory exists.
|
||||
if !os.IsNotExist(err) {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
// Ensure destination parent dir exists.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
|
||||
parentDirStat, err := os.Lstat(dstParent)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
if !parentDirStat.IsDir() {
|
||||
return CopyInfo{}, ErrNotDirectory
|
||||
}
|
||||
|
||||
return CopyInfo{Path: path}, nil
|
||||
}
|
||||
|
||||
// The path exists after resolving symlinks.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||
// contain the archived resource described by srcInfo, to the destination
|
||||
// described by dstInfo. Returns the possibly modified content archive along
|
||||
// with the path to the destination directory which it should be extracted to.
|
||||
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
|
||||
// Ensure in platform semantics
|
||||
srcInfo.Path = normalizePath(srcInfo.Path)
|
||||
dstInfo.Path = normalizePath(dstInfo.Path)
|
||||
|
||||
// Separate the destination path between its directory and base
|
||||
// components in case the source archive contents need to be rebased.
|
||||
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
||||
_, srcBase := SplitPathDirEntry(srcInfo.Path)
|
||||
|
||||
switch {
|
||||
case dstInfo.Exists && dstInfo.IsDir:
|
||||
// The destination exists as a directory. No alteration
|
||||
// to srcContent is needed as its contents can be
|
||||
// simply extracted to the destination directory.
|
||||
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
||||
case dstInfo.Exists && srcInfo.IsDir:
|
||||
// The destination exists as some type of file and the source
|
||||
// content is a directory. This is an error condition since
|
||||
// you cannot copy a directory to an existing file location.
|
||||
return "", nil, ErrCannotCopyDir
|
||||
case dstInfo.Exists:
|
||||
// The destination exists as some type of file and the source content
|
||||
// is also a file. The source content entry will have to be renamed to
|
||||
// have a basename which matches the destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case srcInfo.IsDir:
|
||||
// The destination does not exist and the source content is an archive
|
||||
// of a directory. The archive should be extracted to the parent of
|
||||
// the destination path instead, and when it is, the directory that is
|
||||
// created as a result should take the name of the destination path.
|
||||
// The source content entries will have to be renamed to have a
|
||||
// basename which matches the destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case assertsDirectory(dstInfo.Path):
|
||||
// The destination does not exist and is asserted to be created as a
|
||||
// directory, but the source content is not a directory. This is an
|
||||
// error condition since you cannot create a directory from a file
|
||||
// source.
|
||||
return "", nil, ErrDirNotExists
|
||||
default:
|
||||
// The last remaining case is when the destination does not exist, is
|
||||
// not asserted to be a directory, and the source content is not an
|
||||
// archive of a directory. It this case, the destination file will need
|
||||
// to be created when the archive is extracted and the source content
|
||||
// entry will have to be renamed to have a basename which matches the
|
||||
// destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||
// an occurrence of oldBase with newBase at the beginning of entry names.
|
||||
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
|
||||
if oldBase == string(os.PathSeparator) {
|
||||
// If oldBase specifies the root directory, use an empty string as
|
||||
// oldBase instead so that newBase doesn't replace the path separator
|
||||
// that all paths will start with.
|
||||
oldBase = ""
|
||||
}
|
||||
|
||||
rebased, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
srcTar := tar.NewReader(srcContent)
|
||||
rebasedTar := tar.NewWriter(w)
|
||||
|
||||
for {
|
||||
hdr, err := srcTar.Next()
|
||||
if err == io.EOF {
|
||||
// Signals end of archive.
|
||||
rebasedTar.Close()
|
||||
w.Close()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
|
||||
}
|
||||
|
||||
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return rebased
|
||||
}
|
||||
|
||||
// CopyResource performs an archive copy from the given source path to the
|
||||
// given destination path. The source path MUST exist and the destination
|
||||
// path's parent directory must exist.
|
||||
func CopyResource(srcPath, dstPath string, followLink bool) error {
|
||||
var (
|
||||
srcInfo CopyInfo
|
||||
err error
|
||||
)
|
||||
|
||||
// Ensure in platform semantics
|
||||
srcPath = normalizePath(srcPath)
|
||||
dstPath = normalizePath(dstPath)
|
||||
|
||||
// Clean the source and destination paths.
|
||||
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
|
||||
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
|
||||
|
||||
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
content, err := TarResource(srcInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
return CopyTo(content, srcInfo, dstPath)
|
||||
}
|
||||
|
||||
// CopyTo handles extracting the given content whose
|
||||
// entries should be sourced from srcInfo to dstPath.
|
||||
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
|
||||
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||
// ensure that at least the parent directory exists.
|
||||
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer copyArchive.Close()
|
||||
|
||||
options := &TarOptions{
|
||||
NoLchown: true,
|
||||
NoOverwriteDirNonDir: true,
|
||||
}
|
||||
|
||||
return Untar(copyArchive, dstDir, options)
|
||||
}
|
||||
|
||||
// ResolveHostSourcePath decides real path need to be copied with parameters such as
|
||||
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
|
||||
// link target of any symbol link file, else it will only resolve symlink of directory
|
||||
// but return symbol link file itself without resolving.
|
||||
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
|
||||
if followLink {
|
||||
resolvedPath, err = filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
|
||||
} else {
|
||||
dirPath, basePath := filepath.Split(path)
|
||||
|
||||
// if not follow symbol link, then resolve symbol link of parent dir
|
||||
var resolvedDirPath string
|
||||
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
}
|
||||
return resolvedPath, rebaseName, nil
|
||||
}
|
||||
|
||||
// GetRebaseName normalizes and compares path and resolvedPath,
|
||||
// return completed resolved path and rebased file name
|
||||
func GetRebaseName(path, resolvedPath string) (string, string) {
|
||||
// linkTarget will have been cleaned (no trailing path separators and dot) so
|
||||
// we can manually join it with them
|
||||
var rebaseName string
|
||||
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
|
||||
resolvedPath += string(filepath.Separator) + "."
|
||||
}
|
||||
|
||||
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
|
||||
resolvedPath += string(filepath.Separator)
|
||||
}
|
||||
|
||||
if filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
// In the case where the path had a trailing separator and a symlink
|
||||
// evaluation has changed the last path component, we will need to
|
||||
// rebase the name in the archive that is being copied to match the
|
||||
// originally requested name.
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
return resolvedPath, rebaseName
|
||||
}
|
11
vendor/github.com/docker/docker/pkg/archive/copy_unix.go
generated
vendored
Normal file
11
vendor/github.com/docker/docker/pkg/archive/copy_unix.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build !windows
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.ToSlash(path)
|
||||
}
|
9
vendor/github.com/docker/docker/pkg/archive/copy_windows.go
generated
vendored
Normal file
9
vendor/github.com/docker/docker/pkg/archive/copy_windows.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.FromSlash(path)
|
||||
}
|
256
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
Normal file
256
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
Normal file
@ -0,0 +1,256 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
|
||||
tr := tar.NewReader(layer)
|
||||
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||
defer pools.BufioReader32KPool.Put(trBuf)
|
||||
|
||||
var dirs []*tar.Header
|
||||
unpackedPaths := make(map[string]struct{})
|
||||
|
||||
if options == nil {
|
||||
options = &TarOptions{}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
|
||||
aufsTempdir := ""
|
||||
aufsHardlinks := make(map[string]*tar.Header)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
size += hdr.Size
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
// Windows does not support filenames with colons in them. Ignore
|
||||
// these files. This is not a problem though (although it might
|
||||
// appear that it is). Let's suppose a client is running docker pull.
|
||||
// The daemon it points to is Windows. Would it make sense for the
|
||||
// client to be doing a docker pull Ubuntu for example (which has files
|
||||
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
||||
// not as it would really only make sense that they were pulling a
|
||||
// Windows image. However, for development, it is necessary to be able
|
||||
// to pull Linux images which are in the repository.
|
||||
//
|
||||
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||
// specific or Linux-specific, this warning should be changed to an error
|
||||
// to cater for the situation where someone does manage to upload a Linux
|
||||
// image but have it tagged as Windows inadvertently.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.Contains(hdr.Name, ":") {
|
||||
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
// Not the root directory, ensure that the parent directory exists.
|
||||
// This happened in some tests where an image had a tarfile without any
|
||||
// parent directories.
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = system.MkdirAll(parentPath, 0600, "")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip AUFS metadata dirs
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
|
||||
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
||||
// We don't want this directory, but we need the files in them so that
|
||||
// such hardlinks can be resolved.
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
||||
basename := filepath.Base(hdr.Name)
|
||||
aufsHardlinks[basename] = hdr
|
||||
if aufsTempdir == "" {
|
||||
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer os.RemoveAll(aufsTempdir)
|
||||
}
|
||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Name != WhiteoutOpaqueDir {
|
||||
continue
|
||||
}
|
||||
}
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
rel, err := filepath.Rel(dest, path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||
}
|
||||
base := filepath.Base(path)
|
||||
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
dir := filepath.Dir(path)
|
||||
if base == WhiteoutOpaqueDir {
|
||||
_, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = nil // parent was deleted
|
||||
}
|
||||
return err
|
||||
}
|
||||
if path == dir {
|
||||
return nil
|
||||
}
|
||||
if _, exists := unpackedPaths[path]; !exists {
|
||||
err := os.RemoveAll(path)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
if err := os.RemoveAll(originalPath); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If path exits we almost always just want to remove and replace it.
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trBuf.Reset(tr)
|
||||
srcData := io.Reader(trBuf)
|
||||
srcHdr := hdr
|
||||
|
||||
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
||||
// we manually retarget these into the temporary files we extracted them into
|
||||
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
|
||||
linkBasename := filepath.Base(hdr.Linkname)
|
||||
srcHdr = aufsHardlinks[linkBasename]
|
||||
if srcHdr == nil {
|
||||
return 0, fmt.Errorf("Invalid aufs hardlink")
|
||||
}
|
||||
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
srcData = tmpFile
|
||||
}
|
||||
|
||||
if err := remapIDs(idMappings, srcHdr); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Directory mtimes must be handled at the end to avoid further
|
||||
// file creation in them to modify the directory mtime
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
dirs = append(dirs, hdr)
|
||||
}
|
||||
unpackedPaths[path] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||
// and applies it to the directory `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, options, false)
|
||||
}
|
||||
|
||||
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
// We need to be able to set any perms
|
||||
oldmask, err := system.Umask(0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
||||
|
||||
if decompress {
|
||||
layer, err = DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return UnpackLayer(dest, layer, options)
|
||||
}
|
16
vendor/github.com/docker/docker/pkg/archive/time_linux.go
generated
vendored
Normal file
16
vendor/github.com/docker/docker/pkg/archive/time_linux.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
if time.IsZero() {
|
||||
// Return UTIME_OMIT special value
|
||||
ts.Sec = 0
|
||||
ts.Nsec = ((1 << 30) - 2)
|
||||
return
|
||||
}
|
||||
return syscall.NsecToTimespec(time.UnixNano())
|
||||
}
|
16
vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
generated
vendored
Normal file
16
vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// +build !linux
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
nsec := int64(0)
|
||||
if !time.IsZero() {
|
||||
nsec = time.UnixNano()
|
||||
}
|
||||
return syscall.NsecToTimespec(nsec)
|
||||
}
|
23
vendor/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
Normal file
23
vendor/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package archive
|
||||
|
||||
// Whiteouts are files with a special meaning for the layered filesystem.
|
||||
// Docker uses AUFS whiteout files inside exported archives. In other
|
||||
// filesystems these files are generated/handled on tar creation/extraction.
|
||||
|
||||
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||
// filename this means that file has been removed from the base layer.
|
||||
const WhiteoutPrefix = ".wh."
|
||||
|
||||
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||
// for removing an actual file. Normally these files are excluded from exported
|
||||
// archives.
|
||||
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
|
||||
|
||||
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
||||
// layers. Normally these should not go into exported archives and all changed
|
||||
// hardlinks should be copied to the top layer.
|
||||
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
|
||||
|
||||
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
|
||||
// readdir calls to this directory do not follow to lower layers.
|
||||
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
|
59
vendor/github.com/docker/docker/pkg/archive/wrap.go
generated
vendored
Normal file
59
vendor/github.com/docker/docker/pkg/archive/wrap.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Generate generates a new archive from the content provided
|
||||
// as input.
|
||||
//
|
||||
// `files` is a sequence of path/content pairs. A new file is
|
||||
// added to the archive for each pair.
|
||||
// If the last pair is incomplete, the file is created with an
|
||||
// empty content. For example:
|
||||
//
|
||||
// Generate("foo.txt", "hello world", "emptyfile")
|
||||
//
|
||||
// The above call will return an archive with 2 files:
|
||||
// * ./foo.txt with content "hello world"
|
||||
// * ./empty with empty content
|
||||
//
|
||||
// FIXME: stream content instead of buffering
|
||||
// FIXME: specify permissions and other archive metadata
|
||||
func Generate(input ...string) (io.Reader, error) {
|
||||
files := parseStringPairs(input...)
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
for _, file := range files {
|
||||
name, content := file[0], file[1]
|
||||
hdr := &tar.Header{
|
||||
Name: name,
|
||||
Size: int64(len(content)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tw.Write([]byte(content)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func parseStringPairs(input ...string) (output [][2]string) {
|
||||
output = make([][2]string, 0, len(input)/2+1)
|
||||
for i := 0; i < len(input); i += 2 {
|
||||
var pair [2]string
|
||||
pair[0] = input[i]
|
||||
if i+1 < len(input) {
|
||||
pair[1] = input[i+1]
|
||||
}
|
||||
output = append(output, pair)
|
||||
}
|
||||
return
|
||||
}
|
70
vendor/github.com/docker/docker/pkg/chrootarchive/archive.go
generated
vendored
Normal file
70
vendor/github.com/docker/docker/pkg/chrootarchive/archive.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
)
|
||||
|
||||
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
|
||||
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
|
||||
if idMappings == nil {
|
||||
idMappings = &idtools.IDMappings{}
|
||||
}
|
||||
return &archive.Archiver{Untar: Untar, IDMappings: idMappings}
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, true)
|
||||
}
|
||||
|
||||
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive must be an uncompressed stream.
|
||||
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, false)
|
||||
}
|
||||
|
||||
// Handler for teasing out the automatic decompression
|
||||
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
|
||||
if tarArchive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
}
|
||||
if options == nil {
|
||||
options = &archive.TarOptions{}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
|
||||
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
rootIDs := idMappings.RootPair()
|
||||
|
||||
dest = filepath.Clean(dest)
|
||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||
if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
r := ioutil.NopCloser(tarArchive)
|
||||
if decompress {
|
||||
decompressedArchive, err := archive.DecompressStream(tarArchive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer decompressedArchive.Close()
|
||||
r = decompressedArchive
|
||||
}
|
||||
|
||||
return invokeUnpack(r, dest, options)
|
||||
}
|
86
vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go
generated
vendored
Normal file
86
vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
// +build !windows
|
||||
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
// untar is the entry-point for docker-untar on re-exec. This is not used on
|
||||
// Windows as it does not support chroot, hence no point sandboxing through
|
||||
// chroot and rexec.
|
||||
func untar() {
|
||||
runtime.LockOSThread()
|
||||
flag.Parse()
|
||||
|
||||
var options *archive.TarOptions
|
||||
|
||||
//read the options from the pipe "ExtraFiles"
|
||||
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if err := chroot(flag.Arg(0)); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if err := archive.Unpack(os.Stdin, "/", options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
// fully consume stdin in case it is zero padded
|
||||
if _, err := flush(os.Stdin); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
|
||||
// We can't pass a potentially large exclude list directly via cmd line
|
||||
// because we easily overrun the kernel's max argument/environment size
|
||||
// when the full image list is passed (e.g. when this is used by
|
||||
// `docker load`). We will marshall the options via a pipe to the
|
||||
// child
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Untar pipe failure: %v", err)
|
||||
}
|
||||
|
||||
cmd := reexec.Command("docker-untar", dest)
|
||||
cmd.Stdin = decompressedArchive
|
||||
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
|
||||
output := bytes.NewBuffer(nil)
|
||||
cmd.Stdout = output
|
||||
cmd.Stderr = output
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
|
||||
}
|
||||
//write the options to the pipe for the untar exec to read
|
||||
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||
return fmt.Errorf("Untar json encode to pipe failed: %v", err)
|
||||
}
|
||||
w.Close()
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
// when `xz -d -c -q | docker-untar ...` failed on docker-untar side,
|
||||
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
|
||||
// pending on write pipe forever
|
||||
io.Copy(ioutil.Discard, decompressedArchive)
|
||||
|
||||
return fmt.Errorf("Error processing tar file(%v): %s", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
22
vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go
generated
vendored
Normal file
22
vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
// chroot is not supported by Windows
|
||||
func chroot(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func invokeUnpack(decompressedArchive io.ReadCloser,
|
||||
dest string,
|
||||
options *archive.TarOptions) error {
|
||||
// Windows is different to Linux here because Windows does not support
|
||||
// chroot. Hence there is no point sandboxing a chrooted process to
|
||||
// do the unpack. We call inline instead within the daemon process.
|
||||
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
|
||||
}
|
108
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
generated
vendored
Normal file
108
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// chroot on linux uses pivot_root instead of chroot
|
||||
// pivot_root takes a new root and an old root.
|
||||
// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
|
||||
// New root is where the new rootfs is set to.
|
||||
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
|
||||
// This is similar to how libcontainer sets up a container's rootfs
|
||||
func chroot(path string) (err error) {
|
||||
// if the engine is running in a user namespace we need to use actual chroot
|
||||
if rsystem.RunningInUserNS() {
|
||||
return realChroot(path)
|
||||
}
|
||||
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
|
||||
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
|
||||
}
|
||||
|
||||
// make everything in new ns private
|
||||
if err := mount.MakeRPrivate("/"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mounted, _ := mount.Mounted(path); !mounted {
|
||||
if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil {
|
||||
return realChroot(path)
|
||||
}
|
||||
}
|
||||
|
||||
// setup oldRoot for pivot_root
|
||||
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error setting up pivot dir: %v", err)
|
||||
}
|
||||
|
||||
var mounted bool
|
||||
defer func() {
|
||||
if mounted {
|
||||
// make sure pivotDir is not mounted before we try to remove it
|
||||
if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil {
|
||||
if err == nil {
|
||||
err = errCleanup
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
errCleanup := os.Remove(pivotDir)
|
||||
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
|
||||
// because we already cleaned it up on failed pivot_root
|
||||
if errCleanup != nil && !os.IsNotExist(errCleanup) {
|
||||
errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
|
||||
if err == nil {
|
||||
err = errCleanup
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := unix.PivotRoot(path, pivotDir); err != nil {
|
||||
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
|
||||
if err := os.Remove(pivotDir); err != nil {
|
||||
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
|
||||
}
|
||||
return realChroot(path)
|
||||
}
|
||||
mounted = true
|
||||
|
||||
// This is the new path for where the old root (prior to the pivot) has been moved to
|
||||
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
|
||||
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
|
||||
|
||||
if err := unix.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("Error changing to new root: %v", err)
|
||||
}
|
||||
|
||||
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
|
||||
if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
|
||||
return fmt.Errorf("Error making old root private after pivot: %v", err)
|
||||
}
|
||||
|
||||
// Now unmount the old root so it's no longer visible from the new root
|
||||
if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil {
|
||||
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
|
||||
}
|
||||
mounted = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func realChroot(path string) error {
|
||||
if err := unix.Chroot(path); err != nil {
|
||||
return fmt.Errorf("Error after fallback to chroot: %v", err)
|
||||
}
|
||||
if err := unix.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("Error changing to new root after chroot: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
12
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go
generated
vendored
Normal file
12
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build !windows,!linux
|
||||
|
||||
package chrootarchive
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
func chroot(path string) error {
|
||||
if err := unix.Chroot(path); err != nil {
|
||||
return err
|
||||
}
|
||||
return unix.Chdir("/")
|
||||
}
|
23
vendor/github.com/docker/docker/pkg/chrootarchive/diff.go
generated
vendored
Normal file
23
vendor/github.com/docker/docker/pkg/chrootarchive/diff.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||
// and applies it to the directory `dest`. The stream `layer` can only be
|
||||
// uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer io.Reader) (size int64, err error) {
|
||||
return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, options, false)
|
||||
}
|
130
vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go
generated
vendored
Normal file
130
vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
//+build !windows
|
||||
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
)
|
||||
|
||||
type applyLayerResponse struct {
|
||||
LayerSize int64 `json:"layerSize"`
|
||||
}
|
||||
|
||||
// applyLayer is the entry-point for docker-applylayer on re-exec. This is not
|
||||
// used on Windows as it does not support chroot, hence no point sandboxing
|
||||
// through chroot and rexec.
|
||||
func applyLayer() {
|
||||
|
||||
var (
|
||||
tmpDir string
|
||||
err error
|
||||
options *archive.TarOptions
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
flag.Parse()
|
||||
|
||||
inUserns := rsystem.RunningInUserNS()
|
||||
if err := chroot(flag.Arg(0)); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
// We need to be able to set any perms
|
||||
oldmask, err := system.Umask(0)
|
||||
defer system.Umask(oldmask)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if inUserns {
|
||||
options.InUserNS = true
|
||||
}
|
||||
|
||||
if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
os.Setenv("TMPDIR", tmpDir)
|
||||
size, err := archive.UnpackLayer("/", os.Stdin, options)
|
||||
os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
encoder := json.NewEncoder(os.Stdout)
|
||||
if err := encoder.Encode(applyLayerResponse{size}); err != nil {
|
||||
fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
|
||||
}
|
||||
|
||||
if _, err := flush(os.Stdin); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||
dest = filepath.Clean(dest)
|
||||
if decompress {
|
||||
decompressed, err := archive.DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer decompressed.Close()
|
||||
|
||||
layer = decompressed
|
||||
}
|
||||
if options == nil {
|
||||
options = &archive.TarOptions{}
|
||||
if rsystem.RunningInUserNS() {
|
||||
options.InUserNS = true
|
||||
}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
|
||||
data, err := json.Marshal(options)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
|
||||
}
|
||||
|
||||
cmd := reexec.Command("docker-applyLayer", dest)
|
||||
cmd.Stdin = layer
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
|
||||
|
||||
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
|
||||
cmd.Stdout, cmd.Stderr = outBuf, errBuf
|
||||
|
||||
if err = cmd.Run(); err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
|
||||
}
|
||||
|
||||
// Stdout should be a valid JSON struct representing an applyLayerResponse.
|
||||
response := applyLayerResponse{}
|
||||
decoder := json.NewDecoder(outBuf)
|
||||
if err = decoder.Decode(&response); err != nil {
|
||||
return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
|
||||
}
|
||||
|
||||
return response.LayerSize, nil
|
||||
}
|
45
vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go
generated
vendored
Normal file
45
vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
// Ensure it is a Windows-style volume path
|
||||
dest = longpath.AddPrefix(dest)
|
||||
|
||||
if decompress {
|
||||
decompressed, err := archive.DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer decompressed.Close()
|
||||
|
||||
layer = decompressed
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err)
|
||||
}
|
||||
|
||||
s, err := archive.UnpackLayer(dest, layer, nil)
|
||||
os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
28
vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go
generated
vendored
Normal file
28
vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// +build !windows
|
||||
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Register("docker-applyLayer", applyLayer)
|
||||
reexec.Register("docker-untar", untar)
|
||||
}
|
||||
|
||||
func fatal(err error) {
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// flush consumes all the bytes from the reader discarding
|
||||
// any errors
|
||||
func flush(r io.Reader) (bytes int64, err error) {
|
||||
return io.Copy(ioutil.Discard, r)
|
||||
}
|
4
vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go
generated
vendored
Normal file
4
vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
package chrootarchive
|
||||
|
||||
func init() {
|
||||
}
|
298
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
Normal file
298
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
Normal file
@ -0,0 +1,298 @@
|
||||
package fileutils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/scanner"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// PatternMatcher allows checking paths agaist a list of patterns
|
||||
type PatternMatcher struct {
|
||||
patterns []*Pattern
|
||||
exclusions bool
|
||||
}
|
||||
|
||||
// NewPatternMatcher creates a new matcher object for specific patterns that can
|
||||
// be used later to match against patterns against paths
|
||||
func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
|
||||
pm := &PatternMatcher{
|
||||
patterns: make([]*Pattern, 0, len(patterns)),
|
||||
}
|
||||
for _, p := range patterns {
|
||||
// Eliminate leading and trailing whitespace.
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
p = filepath.Clean(p)
|
||||
newp := &Pattern{}
|
||||
if p[0] == '!' {
|
||||
if len(p) == 1 {
|
||||
return nil, errors.New("illegal exclusion pattern: \"!\"")
|
||||
}
|
||||
newp.exclusion = true
|
||||
p = p[1:]
|
||||
pm.exclusions = true
|
||||
}
|
||||
// Do some syntax checking on the pattern.
|
||||
// filepath's Match() has some really weird rules that are inconsistent
|
||||
// so instead of trying to dup their logic, just call Match() for its
|
||||
// error state and if there is an error in the pattern return it.
|
||||
// If this becomes an issue we can remove this since its really only
|
||||
// needed in the error (syntax) case - which isn't really critical.
|
||||
if _, err := filepath.Match(p, "."); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newp.cleanedPattern = p
|
||||
newp.dirs = strings.Split(p, string(os.PathSeparator))
|
||||
pm.patterns = append(pm.patterns, newp)
|
||||
}
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
// Matches matches path against all the patterns. Matches is not safe to be
|
||||
// called concurrently
|
||||
func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
||||
matched := false
|
||||
file = filepath.FromSlash(file)
|
||||
parentPath := filepath.Dir(file)
|
||||
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||
|
||||
for _, pattern := range pm.patterns {
|
||||
negative := false
|
||||
|
||||
if pattern.exclusion {
|
||||
negative = true
|
||||
}
|
||||
|
||||
match, err := pattern.match(file)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !match && parentPath != "." {
|
||||
// Check to see if the pattern matches one of our parent dirs.
|
||||
if len(pattern.dirs) <= len(parentPathDirs) {
|
||||
match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
matched = !negative
|
||||
}
|
||||
}
|
||||
|
||||
if matched {
|
||||
logrus.Debugf("Skipping excluded path: %s", file)
|
||||
}
|
||||
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
// Exclusions returns true if any of the patterns define exclusions
|
||||
func (pm *PatternMatcher) Exclusions() bool {
|
||||
return pm.exclusions
|
||||
}
|
||||
|
||||
// Patterns returns array of active patterns
|
||||
func (pm *PatternMatcher) Patterns() []*Pattern {
|
||||
return pm.patterns
|
||||
}
|
||||
|
||||
// Pattern defines a single regexp used used to filter file paths.
|
||||
type Pattern struct {
|
||||
cleanedPattern string
|
||||
dirs []string
|
||||
regexp *regexp.Regexp
|
||||
exclusion bool
|
||||
}
|
||||
|
||||
func (p *Pattern) String() string {
|
||||
return p.cleanedPattern
|
||||
}
|
||||
|
||||
// Exclusion returns true if this pattern defines exclusion
|
||||
func (p *Pattern) Exclusion() bool {
|
||||
return p.exclusion
|
||||
}
|
||||
|
||||
func (p *Pattern) match(path string) (bool, error) {
|
||||
|
||||
if p.regexp == nil {
|
||||
if err := p.compile(); err != nil {
|
||||
return false, filepath.ErrBadPattern
|
||||
}
|
||||
}
|
||||
|
||||
b := p.regexp.MatchString(path)
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (p *Pattern) compile() error {
|
||||
regStr := "^"
|
||||
pattern := p.cleanedPattern
|
||||
// Go through the pattern and convert it to a regexp.
|
||||
// We use a scanner so we can support utf-8 chars.
|
||||
var scan scanner.Scanner
|
||||
scan.Init(strings.NewReader(pattern))
|
||||
|
||||
sl := string(os.PathSeparator)
|
||||
escSL := sl
|
||||
if sl == `\` {
|
||||
escSL += `\`
|
||||
}
|
||||
|
||||
for scan.Peek() != scanner.EOF {
|
||||
ch := scan.Next()
|
||||
|
||||
if ch == '*' {
|
||||
if scan.Peek() == '*' {
|
||||
// is some flavor of "**"
|
||||
scan.Next()
|
||||
|
||||
// Treat **/ as ** so eat the "/"
|
||||
if string(scan.Peek()) == sl {
|
||||
scan.Next()
|
||||
}
|
||||
|
||||
if scan.Peek() == scanner.EOF {
|
||||
// is "**EOF" - to align with .gitignore just accept all
|
||||
regStr += ".*"
|
||||
} else {
|
||||
// is "**"
|
||||
// Note that this allows for any # of /'s (even 0) because
|
||||
// the .* will eat everything, even /'s
|
||||
regStr += "(.*" + escSL + ")?"
|
||||
}
|
||||
} else {
|
||||
// is "*" so map it to anything but "/"
|
||||
regStr += "[^" + escSL + "]*"
|
||||
}
|
||||
} else if ch == '?' {
|
||||
// "?" is any char except "/"
|
||||
regStr += "[^" + escSL + "]"
|
||||
} else if ch == '.' || ch == '$' {
|
||||
// Escape some regexp special chars that have no meaning
|
||||
// in golang's filepath.Match
|
||||
regStr += `\` + string(ch)
|
||||
} else if ch == '\\' {
|
||||
// escape next char. Note that a trailing \ in the pattern
|
||||
// will be left alone (but need to escape it)
|
||||
if sl == `\` {
|
||||
// On windows map "\" to "\\", meaning an escaped backslash,
|
||||
// and then just continue because filepath.Match on
|
||||
// Windows doesn't allow escaping at all
|
||||
regStr += escSL
|
||||
continue
|
||||
}
|
||||
if scan.Peek() != scanner.EOF {
|
||||
regStr += `\` + string(scan.Next())
|
||||
} else {
|
||||
regStr += `\`
|
||||
}
|
||||
} else {
|
||||
regStr += string(ch)
|
||||
}
|
||||
}
|
||||
|
||||
regStr += "$"
|
||||
|
||||
re, err := regexp.Compile(regStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.regexp = re
|
||||
return nil
|
||||
}
|
||||
|
||||
// Matches returns true if file matches any of the patterns
|
||||
// and isn't excluded by any of the subsequent patterns.
|
||||
func Matches(file string, patterns []string) (bool, error) {
|
||||
pm, err := NewPatternMatcher(patterns)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
file = filepath.Clean(file)
|
||||
|
||||
if file == "." {
|
||||
// Don't let them exclude everything, kind of silly.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return pm.Matches(file)
|
||||
}
|
||||
|
||||
// CopyFile copies from src to dst until either EOF is reached
|
||||
// on src or an error occurs. It verifies src exists and removes
|
||||
// the dst if it exists.
|
||||
func CopyFile(src, dst string) (int64, error) {
|
||||
cleanSrc := filepath.Clean(src)
|
||||
cleanDst := filepath.Clean(dst)
|
||||
if cleanSrc == cleanDst {
|
||||
return 0, nil
|
||||
}
|
||||
sf, err := os.Open(cleanSrc)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer sf.Close()
|
||||
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
|
||||
return 0, err
|
||||
}
|
||||
df, err := os.Create(cleanDst)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer df.Close()
|
||||
return io.Copy(df, sf)
|
||||
}
|
||||
|
||||
// ReadSymlinkedDirectory returns the target directory of a symlink.
|
||||
// The target of the symbolic link may not be a file.
|
||||
func ReadSymlinkedDirectory(path string) (string, error) {
|
||||
var realPath string
|
||||
var err error
|
||||
if realPath, err = filepath.Abs(path); err != nil {
|
||||
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
|
||||
}
|
||||
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
|
||||
}
|
||||
realPathInfo, err := os.Stat(realPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
|
||||
}
|
||||
if !realPathInfo.Mode().IsDir() {
|
||||
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
|
||||
}
|
||||
return realPath, nil
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates a file or a directory only if it does not already exist.
|
||||
func CreateIfNotExists(path string, isDir bool) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if isDir {
|
||||
return os.MkdirAll(path, 0755)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
27
vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
generated
vendored
Normal file
27
vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
package fileutils
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetTotalUsedFds returns the number of used File Descriptors by
|
||||
// executing `lsof -p PID`
|
||||
func GetTotalUsedFds() int {
|
||||
pid := os.Getpid()
|
||||
|
||||
cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
outputStr := strings.TrimSpace(string(output))
|
||||
|
||||
fds := strings.Split(outputStr, "\n")
|
||||
|
||||
return len(fds) - 1
|
||||
}
|
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
generated
vendored
Normal file
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package fileutils
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors.
|
||||
// On Solaris these limits are per process and not systemwide
|
||||
func GetTotalUsedFds() int {
|
||||
return -1
|
||||
}
|
22
vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
Normal file
22
vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// +build linux freebsd
|
||||
|
||||
package fileutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||
// reading it via /proc filesystem.
|
||||
func GetTotalUsedFds() int {
|
||||
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||
} else {
|
||||
return len(fds)
|
||||
}
|
||||
return -1
|
||||
}
|
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
Normal file
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package fileutils
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
|
||||
// on Windows.
|
||||
func GetTotalUsedFds() int {
|
||||
return -1
|
||||
}
|
279
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
Normal file
279
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
Normal file
@ -0,0 +1,279 @@
|
||||
package idtools
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IDMap contains a single entry for user namespace range remapping. An array
|
||||
// of IDMap entries represents the structure that will be provided to the Linux
|
||||
// kernel for creating a user namespace.
|
||||
type IDMap struct {
|
||||
ContainerID int `json:"container_id"`
|
||||
HostID int `json:"host_id"`
|
||||
Size int `json:"size"`
|
||||
}
|
||||
|
||||
type subIDRange struct {
|
||||
Start int
|
||||
Length int
|
||||
}
|
||||
|
||||
type ranges []subIDRange
|
||||
|
||||
func (e ranges) Len() int { return len(e) }
|
||||
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
||||
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
|
||||
|
||||
const (
|
||||
subuidFileName string = "/etc/subuid"
|
||||
subgidFileName string = "/etc/subgid"
|
||||
)
|
||||
|
||||
// MkdirAllAs creates a directory (include any along the path) and then modifies
|
||||
// ownership to the requested uid/gid. If the directory already exists, this
|
||||
// function will still change ownership to the requested uid/gid pair.
|
||||
// Deprecated: Use MkdirAllAndChown
|
||||
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
|
||||
}
|
||||
|
||||
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
|
||||
// If the directory already exists, this function still changes ownership
|
||||
// Deprecated: Use MkdirAndChown with a IDPair
|
||||
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
|
||||
}
|
||||
|
||||
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
|
||||
// ownership to the requested uid/gid. If the directory already exists, this
|
||||
// function will still change ownership to the requested uid/gid pair.
|
||||
func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error {
|
||||
return mkdirAs(path, mode, ids.UID, ids.GID, true, true)
|
||||
}
|
||||
|
||||
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
|
||||
// If the directory already exists, this function still changes ownership
|
||||
func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error {
|
||||
return mkdirAs(path, mode, ids.UID, ids.GID, false, true)
|
||||
}
|
||||
|
||||
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
|
||||
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
||||
// directories along the path exist, no change of ownership will be performed
|
||||
func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error {
|
||||
return mkdirAs(path, mode, ids.UID, ids.GID, true, false)
|
||||
}
|
||||
|
||||
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
||||
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
||||
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||
uid, err := toHost(0, uidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
gid, err := toHost(0, gidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
|
||||
// toContainer takes an id mapping, and uses it to translate a
|
||||
// host ID to the remapped ID. If no map is provided, then the translation
|
||||
// assumes a 1-to-1 mapping and returns the passed in id
|
||||
func toContainer(hostID int, idMap []IDMap) (int, error) {
|
||||
if idMap == nil {
|
||||
return hostID, nil
|
||||
}
|
||||
for _, m := range idMap {
|
||||
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
|
||||
contID := m.ContainerID + (hostID - m.HostID)
|
||||
return contID, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
||||
}
|
||||
|
||||
// toHost takes an id mapping and a remapped ID, and translates the
|
||||
// ID to the mapped host ID. If no map is provided, then the translation
|
||||
// assumes a 1-to-1 mapping and returns the passed in id #
|
||||
func toHost(contID int, idMap []IDMap) (int, error) {
|
||||
if idMap == nil {
|
||||
return contID, nil
|
||||
}
|
||||
for _, m := range idMap {
|
||||
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
|
||||
hostID := m.HostID + (contID - m.ContainerID)
|
||||
return hostID, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
||||
}
|
||||
|
||||
// IDPair is a UID and GID pair
|
||||
type IDPair struct {
|
||||
UID int
|
||||
GID int
|
||||
}
|
||||
|
||||
// IDMappings contains a mappings of UIDs and GIDs
|
||||
type IDMappings struct {
|
||||
uids []IDMap
|
||||
gids []IDMap
|
||||
}
|
||||
|
||||
// NewIDMappings takes a requested user and group name and
|
||||
// using the data from /etc/sub{uid,gid} ranges, creates the
|
||||
// proper uid and gid remapping ranges for that user/group pair
|
||||
func NewIDMappings(username, groupname string) (*IDMappings, error) {
|
||||
subuidRanges, err := parseSubuid(username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subgidRanges, err := parseSubgid(groupname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(subuidRanges) == 0 {
|
||||
return nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
||||
}
|
||||
if len(subgidRanges) == 0 {
|
||||
return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
||||
}
|
||||
|
||||
return &IDMappings{
|
||||
uids: createIDMap(subuidRanges),
|
||||
gids: createIDMap(subgidRanges),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewIDMappingsFromMaps creates a new mapping from two slices
|
||||
// Deprecated: this is a temporary shim while transitioning to IDMapping
|
||||
func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings {
|
||||
return &IDMappings{uids: uids, gids: gids}
|
||||
}
|
||||
|
||||
// RootPair returns a uid and gid pair for the root user. The error is ignored
|
||||
// because a root user always exists, and the defaults are correct when the uid
|
||||
// and gid maps are empty.
|
||||
func (i *IDMappings) RootPair() IDPair {
|
||||
uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
|
||||
return IDPair{UID: uid, GID: gid}
|
||||
}
|
||||
|
||||
// ToHost returns the host UID and GID for the container uid, gid.
|
||||
// Remapping is only performed if the ids aren't already the remapped root ids
|
||||
func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
|
||||
var err error
|
||||
target := i.RootPair()
|
||||
|
||||
if pair.UID != target.UID {
|
||||
target.UID, err = toHost(pair.UID, i.uids)
|
||||
if err != nil {
|
||||
return target, err
|
||||
}
|
||||
}
|
||||
|
||||
if pair.GID != target.GID {
|
||||
target.GID, err = toHost(pair.GID, i.gids)
|
||||
}
|
||||
return target, err
|
||||
}
|
||||
|
||||
// ToContainer returns the container UID and GID for the host uid and gid
|
||||
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
|
||||
uid, err := toContainer(pair.UID, i.uids)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
gid, err := toContainer(pair.GID, i.gids)
|
||||
return uid, gid, err
|
||||
}
|
||||
|
||||
// Empty returns true if there are no id mappings
|
||||
func (i *IDMappings) Empty() bool {
|
||||
return len(i.uids) == 0 && len(i.gids) == 0
|
||||
}
|
||||
|
||||
// UIDs return the UID mapping
|
||||
// TODO: remove this once everything has been refactored to use pairs
|
||||
func (i *IDMappings) UIDs() []IDMap {
|
||||
return i.uids
|
||||
}
|
||||
|
||||
// GIDs return the UID mapping
|
||||
// TODO: remove this once everything has been refactored to use pairs
|
||||
func (i *IDMappings) GIDs() []IDMap {
|
||||
return i.gids
|
||||
}
|
||||
|
||||
func createIDMap(subidRanges ranges) []IDMap {
|
||||
idMap := []IDMap{}
|
||||
|
||||
// sort the ranges by lowest ID first
|
||||
sort.Sort(subidRanges)
|
||||
containerID := 0
|
||||
for _, idrange := range subidRanges {
|
||||
idMap = append(idMap, IDMap{
|
||||
ContainerID: containerID,
|
||||
HostID: idrange.Start,
|
||||
Size: idrange.Length,
|
||||
})
|
||||
containerID = containerID + idrange.Length
|
||||
}
|
||||
return idMap
|
||||
}
|
||||
|
||||
func parseSubuid(username string) (ranges, error) {
|
||||
return parseSubidFile(subuidFileName, username)
|
||||
}
|
||||
|
||||
func parseSubgid(username string) (ranges, error) {
|
||||
return parseSubidFile(subgidFileName, username)
|
||||
}
|
||||
|
||||
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
|
||||
// and return all found ranges for a specified username. If the special value
|
||||
// "ALL" is supplied for username, then all ranges in the file will be returned
|
||||
func parseSubidFile(path, username string) (ranges, error) {
|
||||
var rangeList ranges
|
||||
|
||||
subidFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return rangeList, err
|
||||
}
|
||||
defer subidFile.Close()
|
||||
|
||||
s := bufio.NewScanner(subidFile)
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return rangeList, err
|
||||
}
|
||||
|
||||
text := strings.TrimSpace(s.Text())
|
||||
if text == "" || strings.HasPrefix(text, "#") {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(text, ":")
|
||||
if len(parts) != 3 {
|
||||
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
|
||||
}
|
||||
if parts[0] == username || username == "ALL" {
|
||||
startid, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||
}
|
||||
length, err := strconv.Atoi(parts[2])
|
||||
if err != nil {
|
||||
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||
}
|
||||
rangeList = append(rangeList, subIDRange{startid, length})
|
||||
}
|
||||
}
|
||||
return rangeList, nil
|
||||
}
|
204
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
Normal file
204
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
Normal file
@ -0,0 +1,204 @@
|
||||
// +build !windows
|
||||
|
||||
package idtools
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
)
|
||||
|
||||
var (
|
||||
entOnce sync.Once
|
||||
getentCmd string
|
||||
)
|
||||
|
||||
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||
// make an array containing the original path asked for, plus (for mkAll == true)
|
||||
// all path components leading up to the complete path that don't exist before we MkdirAll
|
||||
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
|
||||
// chown the full directory path if it exists
|
||||
var paths []string
|
||||
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||
paths = []string{path}
|
||||
} else if err == nil && chownExisting {
|
||||
// short-circuit--we were called with an existing directory and chown was requested
|
||||
return os.Chown(path, ownerUID, ownerGID)
|
||||
} else if err == nil {
|
||||
// nothing to do; directory path fully exists already and chown was NOT requested
|
||||
return nil
|
||||
}
|
||||
|
||||
if mkAll {
|
||||
// walk back to "/" looking for directories which do not exist
|
||||
// and add them to the paths array for chown after creation
|
||||
dirPath := path
|
||||
for {
|
||||
dirPath = filepath.Dir(dirPath)
|
||||
if dirPath == "/" {
|
||||
break
|
||||
}
|
||||
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
|
||||
paths = append(paths, dirPath)
|
||||
}
|
||||
}
|
||||
if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// even if it existed, we will chown the requested path + any subpaths that
|
||||
// didn't exist when we called MkdirAll
|
||||
for _, pathComponent := range paths {
|
||||
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
||||
// if that uid, gid pair has access (execute bit) to the directory
|
||||
func CanAccess(path string, pair IDPair) bool {
|
||||
statInfo, err := system.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fileMode := os.FileMode(statInfo.Mode())
|
||||
permBits := fileMode.Perm()
|
||||
return accessible(statInfo.UID() == uint32(pair.UID),
|
||||
statInfo.GID() == uint32(pair.GID), permBits)
|
||||
}
|
||||
|
||||
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
|
||||
if isOwner && (perms&0100 == 0100) {
|
||||
return true
|
||||
}
|
||||
if isGroup && (perms&0010 == 0010) {
|
||||
return true
|
||||
}
|
||||
if perms&0001 == 0001 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupUser(username string) (user.User, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
usr, err := user.LookupUser(username)
|
||||
if err == nil {
|
||||
return usr, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||
usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
|
||||
if err != nil {
|
||||
return user.User{}, err
|
||||
}
|
||||
return usr, nil
|
||||
}
|
||||
|
||||
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupUID(uid int) (user.User, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
usr, err := user.LookupUid(uid)
|
||||
if err == nil {
|
||||
return usr, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
||||
return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
|
||||
}
|
||||
|
||||
func getentUser(args string) (user.User, error) {
|
||||
reader, err := callGetent(args)
|
||||
if err != nil {
|
||||
return user.User{}, err
|
||||
}
|
||||
users, err := user.ParsePasswd(reader)
|
||||
if err != nil {
|
||||
return user.User{}, err
|
||||
}
|
||||
if len(users) == 0 {
|
||||
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
|
||||
}
|
||||
return users[0], nil
|
||||
}
|
||||
|
||||
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupGroup(groupname string) (user.Group, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
group, err := user.LookupGroup(groupname)
|
||||
if err == nil {
|
||||
return group, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||
return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
|
||||
}
|
||||
|
||||
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
|
||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
||||
func LookupGID(gid int) (user.Group, error) {
|
||||
// first try a local system files lookup using existing capabilities
|
||||
group, err := user.LookupGid(gid)
|
||||
if err == nil {
|
||||
return group, nil
|
||||
}
|
||||
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
||||
return getentGroup(fmt.Sprintf("%s %d", "group", gid))
|
||||
}
|
||||
|
||||
func getentGroup(args string) (user.Group, error) {
|
||||
reader, err := callGetent(args)
|
||||
if err != nil {
|
||||
return user.Group{}, err
|
||||
}
|
||||
groups, err := user.ParseGroup(reader)
|
||||
if err != nil {
|
||||
return user.Group{}, err
|
||||
}
|
||||
if len(groups) == 0 {
|
||||
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
|
||||
}
|
||||
return groups[0], nil
|
||||
}
|
||||
|
||||
func callGetent(args string) (io.Reader, error) {
|
||||
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
|
||||
// if no `getent` command on host, can't do anything else
|
||||
if getentCmd == "" {
|
||||
return nil, fmt.Errorf("")
|
||||
}
|
||||
out, err := execCmd(getentCmd, args)
|
||||
if err != nil {
|
||||
exitCode, errC := system.GetExitCode(err)
|
||||
if errC != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch exitCode {
|
||||
case 1:
|
||||
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
|
||||
case 2:
|
||||
terms := strings.Split(args, " ")
|
||||
return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
|
||||
case 3:
|
||||
return nil, fmt.Errorf("getent database doesn't support enumeration")
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
return bytes.NewReader(out), nil
|
||||
}
|
25
vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
Normal file
25
vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// +build windows
|
||||
|
||||
package idtools
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
// Platforms such as Windows do not support the UID/GID concept. So make this
|
||||
// just a wrapper around system.MkdirAll.
|
||||
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||
if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
||||
// if that uid, gid pair has access (execute bit) to the directory
|
||||
// Windows does not require/support this function, so always return true
|
||||
func CanAccess(path string, pair IDPair) bool {
|
||||
return true
|
||||
}
|
164
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
Normal file
164
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
package idtools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
|
||||
// Linux distribution commands:
|
||||
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
|
||||
// useradd -r -s /bin/false <username>
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
userCommand string
|
||||
|
||||
cmdTemplates = map[string]string{
|
||||
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
|
||||
"useradd": "-r -s /bin/false %s",
|
||||
"usermod": "-%s %d-%d %s",
|
||||
}
|
||||
|
||||
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
|
||||
// default length for a UID/GID subordinate range
|
||||
defaultRangeLen = 65536
|
||||
defaultRangeStart = 100000
|
||||
userMod = "usermod"
|
||||
)
|
||||
|
||||
// AddNamespaceRangesUser takes a username and uses the standard system
|
||||
// utility to create a system user/group pair used to hold the
|
||||
// /etc/sub{uid,gid} ranges which will be used for user namespace
|
||||
// mapping ranges in containers.
|
||||
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||
if err := addUser(name); err != nil {
|
||||
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
|
||||
}
|
||||
|
||||
// Query the system for the created uid and gid pair
|
||||
out, err := execCmd("id", name)
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
|
||||
}
|
||||
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
|
||||
if len(matches) != 3 {
|
||||
return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
|
||||
}
|
||||
uid, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
|
||||
}
|
||||
gid, err := strconv.Atoi(matches[2])
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
|
||||
}
|
||||
|
||||
// Now we need to create the subuid/subgid ranges for our new user/group (system users
|
||||
// do not get auto-created ranges in subuid/subgid)
|
||||
|
||||
if err := createSubordinateRanges(name); err != nil {
|
||||
return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
|
||||
func addUser(userName string) error {
|
||||
once.Do(func() {
|
||||
// set up which commands are used for adding users/groups dependent on distro
|
||||
if _, err := resolveBinary("adduser"); err == nil {
|
||||
userCommand = "adduser"
|
||||
} else if _, err := resolveBinary("useradd"); err == nil {
|
||||
userCommand = "useradd"
|
||||
}
|
||||
})
|
||||
if userCommand == "" {
|
||||
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
|
||||
}
|
||||
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
|
||||
out, err := execCmd(userCommand, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createSubordinateRanges(name string) error {
|
||||
|
||||
// first, we should verify that ranges weren't automatically created
|
||||
// by the distro tooling
|
||||
ranges, err := parseSubuid(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
|
||||
}
|
||||
if len(ranges) == 0 {
|
||||
// no UID ranges; let's create one
|
||||
startID, err := findNextUIDRange()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Can't find available subuid range: %v", err)
|
||||
}
|
||||
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
|
||||
}
|
||||
}
|
||||
|
||||
ranges, err = parseSubgid(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
|
||||
}
|
||||
if len(ranges) == 0 {
|
||||
// no GID ranges; let's create one
|
||||
startID, err := findNextGIDRange()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Can't find available subgid range: %v", err)
|
||||
}
|
||||
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func findNextUIDRange() (int, error) {
|
||||
ranges, err := parseSubuid("ALL")
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
|
||||
}
|
||||
sort.Sort(ranges)
|
||||
return findNextRangeStart(ranges)
|
||||
}
|
||||
|
||||
func findNextGIDRange() (int, error) {
|
||||
ranges, err := parseSubgid("ALL")
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
|
||||
}
|
||||
sort.Sort(ranges)
|
||||
return findNextRangeStart(ranges)
|
||||
}
|
||||
|
||||
func findNextRangeStart(rangeList ranges) (int, error) {
|
||||
startID := defaultRangeStart
|
||||
for _, arange := range rangeList {
|
||||
if wouldOverlap(arange, startID) {
|
||||
startID = arange.Start + arange.Length
|
||||
}
|
||||
}
|
||||
return startID, nil
|
||||
}
|
||||
|
||||
func wouldOverlap(arange subIDRange, ID int) bool {
|
||||
low := ID
|
||||
high := ID + defaultRangeLen
|
||||
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
|
||||
(high <= arange.Start+arange.Length && high >= arange.Start) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
Normal file
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build !linux
|
||||
|
||||
package idtools
|
||||
|
||||
import "fmt"
|
||||
|
||||
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
|
||||
// and calls the appropriate helper function to add the group and then
|
||||
// the user to the group in /etc/group and /etc/passwd respectively.
|
||||
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
|
||||
}
|
32
vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
generated
vendored
Normal file
32
vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// +build !windows
|
||||
|
||||
package idtools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func resolveBinary(binname string) (string, error) {
|
||||
binaryPath, err := exec.LookPath(binname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
//only return no error if the final resolved binary basename
|
||||
//matches what was searched for
|
||||
if filepath.Base(resolvedPath) == binname {
|
||||
return resolvedPath, nil
|
||||
}
|
||||
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
|
||||
}
|
||||
|
||||
func execCmd(cmd, args string) ([]byte, error) {
|
||||
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
|
||||
return execCmd.CombinedOutput()
|
||||
}
|
137
vendor/github.com/docker/docker/pkg/pools/pools.go
generated
vendored
Normal file
137
vendor/github.com/docker/docker/pkg/pools/pools.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
// Package pools provides a collection of pools which provide various
|
||||
// data types with buffers. These can be used to lower the number of
|
||||
// memory allocations and reuse buffers.
|
||||
//
|
||||
// New pools should be added to this package to allow them to be
|
||||
// shared across packages.
|
||||
//
|
||||
// Utility functions which operate on pools should be added to this
|
||||
// package to allow them to be reused.
|
||||
package pools
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
)
|
||||
|
||||
const buffer32K = 32 * 1024
|
||||
|
||||
var (
|
||||
// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
|
||||
BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
|
||||
// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
|
||||
BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
|
||||
buffer32KPool = newBufferPoolWithSize(buffer32K)
|
||||
)
|
||||
|
||||
// BufioReaderPool is a bufio reader that uses sync.Pool.
|
||||
type BufioReaderPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// newBufioReaderPoolWithSize is unexported because new pools should be
|
||||
// added here to be shared where required.
|
||||
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
|
||||
return &BufioReaderPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
|
||||
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
|
||||
buf := bufPool.pool.Get().(*bufio.Reader)
|
||||
buf.Reset(r)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Put puts the bufio.Reader back into the pool.
|
||||
func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
|
||||
b.Reset(nil)
|
||||
bufPool.pool.Put(b)
|
||||
}
|
||||
|
||||
type bufferPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func newBufferPoolWithSize(size int) *bufferPool {
|
||||
return &bufferPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} { return make([]byte, size) },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *bufferPool) Get() []byte {
|
||||
return bp.pool.Get().([]byte)
|
||||
}
|
||||
|
||||
func (bp *bufferPool) Put(b []byte) {
|
||||
bp.pool.Put(b)
|
||||
}
|
||||
|
||||
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
|
||||
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||
buf := buffer32KPool.Get()
|
||||
written, err = io.CopyBuffer(dst, src, buf)
|
||||
buffer32KPool.Put(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
|
||||
// into the pool and closes the reader if it's an io.ReadCloser.
|
||||
func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
|
||||
return ioutils.NewReadCloserWrapper(r, func() error {
|
||||
if readCloser, ok := r.(io.ReadCloser); ok {
|
||||
readCloser.Close()
|
||||
}
|
||||
bufPool.Put(buf)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// BufioWriterPool is a bufio writer that uses sync.Pool.
|
||||
type BufioWriterPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// newBufioWriterPoolWithSize is unexported because new pools should be
|
||||
// added here to be shared where required.
|
||||
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
|
||||
return &BufioWriterPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
|
||||
func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
|
||||
buf := bufPool.pool.Get().(*bufio.Writer)
|
||||
buf.Reset(w)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Put puts the bufio.Writer back into the pool.
|
||||
func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
|
||||
b.Reset(nil)
|
||||
bufPool.pool.Put(b)
|
||||
}
|
||||
|
||||
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
|
||||
// into the pool and closes the writer if it's an io.Writecloser.
|
||||
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
|
||||
return ioutils.NewWriteCloserWrapper(w, func() error {
|
||||
buf.Flush()
|
||||
if writeCloser, ok := w.(io.WriteCloser); ok {
|
||||
writeCloser.Close()
|
||||
}
|
||||
bufPool.Put(buf)
|
||||
return nil
|
||||
})
|
||||
}
|
11
vendor/github.com/docker/docker/pkg/promise/promise.go
generated
vendored
Normal file
11
vendor/github.com/docker/docker/pkg/promise/promise.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
package promise
|
||||
|
||||
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
|
||||
// and returns a channel which will later return the function's return value.
|
||||
func Go(f func() error) chan error {
|
||||
ch := make(chan error, 1)
|
||||
go func() {
|
||||
ch <- f()
|
||||
}()
|
||||
return ch
|
||||
}
|
5
vendor/github.com/docker/docker/pkg/reexec/README.md
generated
vendored
Normal file
5
vendor/github.com/docker/docker/pkg/reexec/README.md
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# reexec
|
||||
|
||||
The `reexec` package facilitates the busybox style reexec of the docker binary that we require because
|
||||
of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of
|
||||
the exec of the binary will be used to find and execute custom init paths.
|
30
vendor/github.com/docker/docker/pkg/reexec/command_linux.go
generated
vendored
Normal file
30
vendor/github.com/docker/docker/pkg/reexec/command_linux.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// +build linux
|
||||
|
||||
package reexec
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Self returns the path to the current process's binary.
|
||||
// Returns "/proc/self/exe".
|
||||
func Self() string {
|
||||
return "/proc/self/exe"
|
||||
}
|
||||
|
||||
// Command returns *exec.Cmd which has Path as current binary. Also it setting
|
||||
// SysProcAttr.Pdeathsig to SIGTERM.
|
||||
// This will use the in-memory version (/proc/self/exe) of the current binary,
|
||||
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return &exec.Cmd{
|
||||
Path: Self(),
|
||||
Args: args,
|
||||
SysProcAttr: &syscall.SysProcAttr{
|
||||
Pdeathsig: unix.SIGTERM,
|
||||
},
|
||||
}
|
||||
}
|
23
vendor/github.com/docker/docker/pkg/reexec/command_unix.go
generated
vendored
Normal file
23
vendor/github.com/docker/docker/pkg/reexec/command_unix.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// +build freebsd solaris darwin
|
||||
|
||||
package reexec
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Self returns the path to the current process's binary.
|
||||
// Uses os.Args[0].
|
||||
func Self() string {
|
||||
return naiveSelf()
|
||||
}
|
||||
|
||||
// Command returns *exec.Cmd which has Path as current binary.
|
||||
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
|
||||
// be set to "/usr/bin/docker".
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return &exec.Cmd{
|
||||
Path: Self(),
|
||||
Args: args,
|
||||
}
|
||||
}
|
12
vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go
generated
vendored
Normal file
12
vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build !linux,!windows,!freebsd,!solaris,!darwin
|
||||
|
||||
package reexec
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return nil
|
||||
}
|
23
vendor/github.com/docker/docker/pkg/reexec/command_windows.go
generated
vendored
Normal file
23
vendor/github.com/docker/docker/pkg/reexec/command_windows.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// +build windows
|
||||
|
||||
package reexec
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Self returns the path to the current process's binary.
|
||||
// Uses os.Args[0].
|
||||
func Self() string {
|
||||
return naiveSelf()
|
||||
}
|
||||
|
||||
// Command returns *exec.Cmd which has Path as current binary.
|
||||
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
|
||||
// be set to "C:\docker.exe".
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return &exec.Cmd{
|
||||
Path: Self(),
|
||||
Args: args,
|
||||
}
|
||||
}
|
47
vendor/github.com/docker/docker/pkg/reexec/reexec.go
generated
vendored
Normal file
47
vendor/github.com/docker/docker/pkg/reexec/reexec.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package reexec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var registeredInitializers = make(map[string]func())
|
||||
|
||||
// Register adds an initialization func under the specified name
|
||||
func Register(name string, initializer func()) {
|
||||
if _, exists := registeredInitializers[name]; exists {
|
||||
panic(fmt.Sprintf("reexec func already registered under name %q", name))
|
||||
}
|
||||
|
||||
registeredInitializers[name] = initializer
|
||||
}
|
||||
|
||||
// Init is called as the first part of the exec process and returns true if an
|
||||
// initialization function was called.
|
||||
func Init() bool {
|
||||
initializer, exists := registeredInitializers[os.Args[0]]
|
||||
if exists {
|
||||
initializer()
|
||||
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func naiveSelf() string {
|
||||
name := os.Args[0]
|
||||
if filepath.Base(name) == name {
|
||||
if lp, err := exec.LookPath(name); err == nil {
|
||||
return lp
|
||||
}
|
||||
}
|
||||
// handle conversion of relative paths to absolute
|
||||
if absName, err := filepath.Abs(name); err == nil {
|
||||
return absName
|
||||
}
|
||||
// if we couldn't get absolute name, return original
|
||||
// (NOTE: Go only errors on Abs() if os.Getwd fails)
|
||||
return name
|
||||
}
|
Loading…
Reference in New Issue
Block a user