Merge pull request #782 from Random-Liu/update-containerd

Update containerd
This commit is contained in:
Lantao Liu 2018-05-23 13:26:21 -07:00 committed by GitHub
commit 450eb09a68
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
100 changed files with 2499 additions and 1522 deletions

View File

@ -155,6 +155,9 @@ if [ -n "${network_policy_provider}" ] && [ "${network_policy_provider}" != "non
fi
log_level="${CONTAINERD_LOG_LEVEL:-"info"}"
cat > ${config_path} <<EOF
# Kubernetes doesn't use containerd restart manager.
disabled_plugins = ["restart"]
[debug]
level = "${log_level}"

View File

@ -100,20 +100,14 @@ func (c *criService) PullImage(ctx context.Context, r *runtime.PullImageRequest)
// image has already been converted.
isSchema1 := desc.MediaType == containerdimages.MediaTypeDockerSchema1Manifest
// TODO(mikebrow): add truncIndex for image id
image, err := c.client.Pull(ctx, ref,
containerd.WithSchema1Conversion,
containerd.WithResolver(resolver),
containerd.WithPullSnapshotter(c.config.ContainerdConfig.Snapshotter),
containerd.WithPullUnpack,
)
if err != nil {
return nil, errors.Wrapf(err, "failed to pull image %q", ref)
}
// Do best effort unpack.
logrus.Debugf("Unpack image %q", imageRef)
if err := image.Unpack(ctx, c.config.ContainerdConfig.Snapshotter); err != nil {
logrus.WithError(err).Warnf("Failed to unpack image %q", imageRef)
// Do not fail image pulling. Unpack will be retried before container creation.
return nil, errors.Wrapf(err, "failed to pull and unpack image %q", ref)
}
// Get image information.

View File

@ -4,11 +4,11 @@ github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130
github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925
github.com/containerd/containerd 1381f8fddc4f826e12b48d46c9def347d5aa338a
github.com/containerd/containerd d1435e6e4dcffd99e0da396ff771b5bbe0d93f5e
github.com/containerd/continuity 3e8f2ea4b190484acb976a5b378d373429639a1a
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
github.com/containerd/go-cni f2d7272f12d045b16ed924f50e91f9f9cecc55a7
github.com/containerd/go-runc bcb223a061a3dd7de1a89c0b402a60f4dd9bd307
github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd5
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
github.com/containernetworking/cni v0.6.0
github.com/containernetworking/plugins v0.7.0
@ -34,8 +34,8 @@ github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
github.com/json-iterator/go 1.0.4
github.com/matttproud/golang_protobuf_extensions v1.0.0
github.com/Microsoft/go-winio v0.4.5
github.com/Microsoft/hcsshim v0.6.7
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
github.com/Microsoft/hcsshim v0.6.10
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
github.com/opencontainers/runtime-spec v1.0.1
@ -56,7 +56,7 @@ github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4

View File

@ -10,7 +10,7 @@ import (
)
type baseLayerWriter struct {
root string
root *os.File
f *os.File
bw *winio.BackupFileWriter
err error
@ -26,10 +26,10 @@ type dirInfo struct {
// reapplyDirectoryTimes reapplies directory modification, creation, etc. times
// after processing of the directory tree has completed. The times are expected
// to be ordered such that parent directories come before child directories.
func reapplyDirectoryTimes(dis []dirInfo) error {
func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error {
for i := range dis {
di := &dis[len(dis)-i-1] // reverse order: process child directories first
f, err := winio.OpenForBackup(di.path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, syscall.OPEN_EXISTING)
f, err := openRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, _FILE_OPEN, _FILE_DIRECTORY_FILE)
if err != nil {
return err
}
@ -75,12 +75,6 @@ func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err e
w.hasUtilityVM = true
}
path := filepath.Join(w.root, name)
path, err = makeLongAbsPath(path)
if err != nil {
return err
}
var f *os.File
defer func() {
if f != nil {
@ -88,27 +82,23 @@ func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err e
}
}()
createmode := uint32(syscall.CREATE_NEW)
extraFlags := uint32(0)
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
err := os.Mkdir(path, 0)
if err != nil && !os.IsExist(err) {
return err
}
createmode = syscall.OPEN_EXISTING
extraFlags |= _FILE_DIRECTORY_FILE
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
w.dirInfo = append(w.dirInfo, dirInfo{path, *fileInfo})
w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo})
}
}
mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY)
f, err = winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createmode)
f, err = openRelative(name, w.root, mode, syscall.FILE_SHARE_READ, _FILE_CREATE, extraFlags)
if err != nil {
return makeError(err, "Failed to OpenForBackup", path)
return makeError(err, "Failed to openRelative", name)
}
err = winio.SetFileBasicInfo(f, fileInfo)
if err != nil {
return makeError(err, "Failed to SetFileBasicInfo", path)
return makeError(err, "Failed to SetFileBasicInfo", name)
}
w.f = f
@ -129,17 +119,7 @@ func (w *baseLayerWriter) AddLink(name string, target string) (err error) {
return err
}
linkpath, err := makeLongAbsPath(filepath.Join(w.root, name))
if err != nil {
return err
}
linktarget, err := makeLongAbsPath(filepath.Join(w.root, target))
if err != nil {
return err
}
return os.Link(linktarget, linkpath)
return linkRelative(target, w.root, name, w.root)
}
func (w *baseLayerWriter) Remove(name string) error {
@ -155,6 +135,10 @@ func (w *baseLayerWriter) Write(b []byte) (int, error) {
}
func (w *baseLayerWriter) Close() error {
defer func() {
w.root.Close()
w.root = nil
}()
err := w.closeCurrentFile()
if err != nil {
return err
@ -162,18 +146,22 @@ func (w *baseLayerWriter) Close() error {
if w.err == nil {
// Restore the file times of all the directories, since they may have
// been modified by creating child directories.
err = reapplyDirectoryTimes(w.dirInfo)
err = reapplyDirectoryTimes(w.root, w.dirInfo)
if err != nil {
return err
}
err = ProcessBaseLayer(w.root)
err = ProcessBaseLayer(w.root.Name())
if err != nil {
return err
}
if w.hasUtilityVM {
err = ProcessUtilityVMImage(filepath.Join(w.root, "UtilityVM"))
err := ensureNotReparsePointRelative("UtilityVM", w.root)
if err != nil {
return err
}
err = ProcessUtilityVMImage(filepath.Join(w.root.Name(), "UtilityVM"))
if err != nil {
return err
}

View File

@ -11,7 +11,7 @@ import (
"github.com/sirupsen/logrus"
)
//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go
//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go safeopen.go
//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId

View File

@ -129,37 +129,39 @@ type legacyLayerWriterWrapper struct {
}
func (r *legacyLayerWriterWrapper) Close() error {
defer os.RemoveAll(r.root)
defer os.RemoveAll(r.root.Name())
defer r.legacyLayerWriter.CloseRoots()
err := r.legacyLayerWriter.Close()
if err != nil {
return err
}
// Use the original path here because ImportLayer does not support long paths for the source in TP5.
// But do use a long path for the destination to work around another bug with directories
// with MAX_PATH - 12 < length < MAX_PATH.
info := r.info
fullPath, err := makeLongAbsPath(filepath.Join(info.HomeDir, r.layerID))
if err != nil {
info.HomeDir = ""
if err = ImportLayer(info, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil {
return err
}
info.HomeDir = ""
if err = ImportLayer(info, fullPath, r.path, r.parentLayerPaths); err != nil {
for _, name := range r.Tombstones {
if err = removeRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) {
return err
}
}
// Add any hard links that were collected.
for _, lnk := range r.PendingLinks {
if err = os.Remove(lnk.Path); err != nil && !os.IsNotExist(err) {
if err = removeRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) {
return err
}
if err = os.Link(lnk.Target, lnk.Path); err != nil {
if err = linkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil {
return err
}
}
// Prepare the utility VM for use if one is present in the layer.
if r.HasUtilityVM {
err = ProcessUtilityVMImage(filepath.Join(fullPath, "UtilityVM"))
err := ensureNotReparsePointRelative("UtilityVM", r.destRoot)
if err != nil {
return err
}
err = ProcessUtilityVMImage(filepath.Join(r.destRoot.Name(), "UtilityVM"))
if err != nil {
return err
}
@ -173,8 +175,12 @@ func (r *legacyLayerWriterWrapper) Close() error {
func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) {
if len(parentLayerPaths) == 0 {
// This is a base layer. It gets imported differently.
f, err := openRoot(filepath.Join(info.HomeDir, layerID))
if err != nil {
return nil, err
}
return &baseLayerWriter{
root: filepath.Join(info.HomeDir, layerID),
root: f,
}, nil
}
@ -185,8 +191,12 @@ func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string)
if err != nil {
return nil, err
}
w, err := newLegacyLayerWriter(path, parentLayerPaths, filepath.Join(info.HomeDir, layerID))
if err != nil {
return nil, err
}
return &legacyLayerWriterWrapper{
legacyLayerWriter: newLegacyLayerWriter(path, parentLayerPaths, filepath.Join(info.HomeDir, layerID)),
legacyLayerWriter: w,
info: info,
layerID: layerID,
path: path,

View File

@ -121,6 +121,16 @@ func (r *legacyLayerReader) walkUntilCancelled() error {
if err != nil {
return err
}
// Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048.
// Handle failure from what may be a golang bug in the conversion of
// UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat
// which is called by filepath.Walk will fail when a filename contains
// unicode characters. Skip the recycle bin regardless which is goodness.
if path == filepath.Join(r.root, `Files\$Recycle.Bin`) && info.IsDir() {
return filepath.SkipDir
}
if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") {
return nil
}
@ -326,59 +336,79 @@ func (r *legacyLayerReader) Close() error {
type pendingLink struct {
Path, Target string
TargetRoot *os.File
}
type pendingDir struct {
Path string
Root *os.File
}
type legacyLayerWriter struct {
root string
parentRoots []string
destRoot string
root *os.File
destRoot *os.File
parentRoots []*os.File
currentFile *os.File
currentFileName string
currentFileRoot *os.File
backupWriter *winio.BackupFileWriter
tombstones []string
pathFixed bool
Tombstones []string
HasUtilityVM bool
uvmDi []dirInfo
addedFiles map[string]bool
PendingLinks []pendingLink
pendingDirs []pendingDir
currentIsDir bool
}
// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer
// transport format to disk.
func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) *legacyLayerWriter {
return &legacyLayerWriter{
root: root,
parentRoots: parentRoots,
destRoot: destRoot,
func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) {
w = &legacyLayerWriter{
addedFiles: make(map[string]bool),
}
defer func() {
if err != nil {
w.CloseRoots()
w = nil
}
}()
w.root, err = openRoot(root)
if err != nil {
return
}
w.destRoot, err = openRoot(destRoot)
if err != nil {
return
}
for _, r := range parentRoots {
f, err := openRoot(r)
if err != nil {
return w, err
}
w.parentRoots = append(w.parentRoots, f)
}
return
}
func (w *legacyLayerWriter) init() error {
if !w.pathFixed {
path, err := makeLongAbsPath(w.root)
if err != nil {
return err
func (w *legacyLayerWriter) CloseRoots() {
if w.root != nil {
w.root.Close()
w.root = nil
}
for i, p := range w.parentRoots {
w.parentRoots[i], err = makeLongAbsPath(p)
if err != nil {
return err
if w.destRoot != nil {
w.destRoot.Close()
w.destRoot = nil
}
for i := range w.parentRoots {
w.parentRoots[i].Close()
}
destPath, err := makeLongAbsPath(w.destRoot)
if err != nil {
return err
}
w.root = path
w.destRoot = destPath
w.pathFixed = true
}
return nil
w.parentRoots = nil
}
func (w *legacyLayerWriter) initUtilityVM() error {
if !w.HasUtilityVM {
err := os.Mkdir(filepath.Join(w.destRoot, utilityVMPath), 0)
err := mkdirRelative(utilityVMPath, w.destRoot)
if err != nil {
return err
}
@ -386,7 +416,7 @@ func (w *legacyLayerWriter) initUtilityVM() error {
// clone the utility VM from the parent layer into this layer. Use hard
// links to avoid unnecessary copying, since most of the files are
// immutable.
err = cloneTree(filepath.Join(w.parentRoots[0], utilityVMFilesPath), filepath.Join(w.destRoot, utilityVMFilesPath), mutatedUtilityVMFiles)
err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles)
if err != nil {
return fmt.Errorf("cloning the parent utility VM image failed: %s", err)
}
@ -395,7 +425,40 @@ func (w *legacyLayerWriter) initUtilityVM() error {
return nil
}
func (w *legacyLayerWriter) reset() {
func (w *legacyLayerWriter) reset() error {
if w.currentIsDir {
r := w.currentFile
br := winio.NewBackupStreamReader(r)
// Seek to the beginning of the backup stream, skipping the fileattrs
if _, err := r.Seek(4, io.SeekStart); err != nil {
return err
}
for {
bhdr, err := br.Next()
if err == io.EOF {
// end of backupstream data
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupReparseData:
// The current file is a `.$wcidirs$` metadata file that
// describes a directory reparse point. Delete the placeholder
// directory to prevent future files being added into the
// destination of the reparse point during the ImportLayer call
if err := removeRelative(w.currentFileName, w.currentFileRoot); err != nil {
return err
}
w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot})
default:
// ignore all other stream types, as we only care about directory reparse points
}
}
w.currentIsDir = false
}
if w.backupWriter != nil {
w.backupWriter.Close()
w.backupWriter = nil
@ -403,21 +466,21 @@ func (w *legacyLayerWriter) reset() {
if w.currentFile != nil {
w.currentFile.Close()
w.currentFile = nil
w.currentFileName = ""
w.currentFileRoot = nil
}
return nil
}
// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata
func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) {
createDisposition := uint32(syscall.CREATE_NEW)
if isDir {
err = os.Mkdir(destPath, 0)
if err != nil {
return nil, err
}
createDisposition = syscall.OPEN_EXISTING
}
src, err := openFileOrDir(srcPath, syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, syscall.OPEN_EXISTING)
func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) {
src, err := openRelative(
subPath,
srcRoot,
syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
_FILE_OPEN,
_FILE_OPEN_REPARSE_POINT)
if err != nil {
return nil, err
}
@ -430,7 +493,17 @@ func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio
return nil, err
}
dest, err := openFileOrDir(destPath, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition)
extraFlags := uint32(0)
if isDir {
extraFlags |= _FILE_DIRECTORY_FILE
}
dest, err := openRelative(
subPath,
destRoot,
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
_FILE_CREATE,
extraFlags)
if err != nil {
return nil, err
}
@ -459,18 +532,21 @@ func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio
// cloneTree clones a directory tree using hard links. It skips hard links for
// the file names in the provided map and just copies those files.
func cloneTree(srcPath, destPath string, mutatedFiles map[string]bool) error {
func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error {
var di []dirInfo
err := filepath.Walk(srcPath, func(srcFilePath string, info os.FileInfo, err error) error {
err := ensureNotReparsePointRelative(subPath, srcRoot)
if err != nil {
return err
}
err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
relPath, err := filepath.Rel(srcPath, srcFilePath)
relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath)
if err != nil {
return err
}
destFilePath := filepath.Join(destPath, relPath)
fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes
// Directories, reparse points, and files that will be mutated during
@ -482,15 +558,15 @@ func cloneTree(srcPath, destPath string, mutatedFiles map[string]bool) error {
isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0
if isDir || isReparsePoint || mutatedFiles[relPath] {
fi, err := copyFileWithMetadata(srcFilePath, destFilePath, isDir)
fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir)
if err != nil {
return err
}
if isDir && !isReparsePoint {
di = append(di, dirInfo{path: destFilePath, fileInfo: *fi})
di = append(di, dirInfo{path: relPath, fileInfo: *fi})
}
} else {
err = os.Link(srcFilePath, destFilePath)
err = linkRelative(relPath, srcRoot, relPath, destRoot)
if err != nil {
return err
}
@ -508,13 +584,11 @@ func cloneTree(srcPath, destPath string, mutatedFiles map[string]bool) error {
return err
}
return reapplyDirectoryTimes(di)
return reapplyDirectoryTimes(destRoot, di)
}
func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
w.reset()
err := w.init()
if err != nil {
if err := w.reset(); err != nil {
return err
}
@ -522,6 +596,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
return w.initUtilityVM()
}
name = filepath.Clean(name)
if hasPathPrefix(name, utilityVMPath) {
if !w.HasUtilityVM {
return errors.New("missing UtilityVM directory")
@ -529,10 +604,9 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath {
return errors.New("invalid UtilityVM layer")
}
path := filepath.Join(w.destRoot, name)
createDisposition := uint32(syscall.OPEN_EXISTING)
createDisposition := uint32(_FILE_OPEN)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
st, err := os.Lstat(path)
st, err := lstatRelative(name, w.destRoot)
if err != nil && !os.IsNotExist(err) {
return err
}
@ -540,37 +614,44 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
// Delete the existing file/directory if it is not the same type as this directory.
existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes
if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 {
if err = os.RemoveAll(path); err != nil {
if err = removeAllRelative(name, w.destRoot); err != nil {
return err
}
st = nil
}
}
if st == nil {
if err = os.Mkdir(path, 0); err != nil {
if err = mkdirRelative(name, w.destRoot); err != nil {
return err
}
}
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
w.uvmDi = append(w.uvmDi, dirInfo{path: path, fileInfo: *fileInfo})
w.uvmDi = append(w.uvmDi, dirInfo{path: name, fileInfo: *fileInfo})
}
} else {
// Overwrite any existing hard link.
err = os.Remove(path)
err := removeRelative(name, w.destRoot)
if err != nil && !os.IsNotExist(err) {
return err
}
createDisposition = syscall.CREATE_NEW
createDisposition = _FILE_CREATE
}
f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition)
f, err := openRelative(
name,
w.destRoot,
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
createDisposition,
_FILE_OPEN_REPARSE_POINT,
)
if err != nil {
return err
}
defer func() {
if f != nil {
f.Close()
os.Remove(path)
removeRelative(name, w.destRoot)
}
}()
@ -581,28 +662,31 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
w.backupWriter = winio.NewBackupFileWriter(f, true)
w.currentFile = f
w.currentFileName = name
w.currentFileRoot = w.destRoot
w.addedFiles[name] = true
f = nil
return nil
}
path := filepath.Join(w.root, name)
fname := name
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
err := os.Mkdir(path, 0)
err := mkdirRelative(name, w.root)
if err != nil {
return err
}
path += ".$wcidirs$"
fname += ".$wcidirs$"
w.currentIsDir = true
}
f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.CREATE_NEW)
f, err := openRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, _FILE_CREATE, 0)
if err != nil {
return err
}
defer func() {
if f != nil {
f.Close()
os.Remove(path)
removeRelative(fname, w.root)
}
}()
@ -624,19 +708,20 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
}
w.currentFile = f
w.currentFileName = name
w.currentFileRoot = w.root
w.addedFiles[name] = true
f = nil
return nil
}
func (w *legacyLayerWriter) AddLink(name string, target string) error {
w.reset()
err := w.init()
if err != nil {
if err := w.reset(); err != nil {
return err
}
var roots []string
target = filepath.Clean(target)
var roots []*os.File
if hasPathPrefix(target, filesPath) {
// Look for cross-layer hard link targets in the parent layers, since
// nothing is in the destination path yet.
@ -645,7 +730,7 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error {
// Since the utility VM is fully cloned into the destination path
// already, look for cross-layer hard link targets directly in the
// destination path.
roots = []string{w.destRoot}
roots = []*os.File{w.destRoot}
}
if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) {
@ -654,12 +739,12 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error {
// Find to try the target of the link in a previously added file. If that
// fails, search in parent layers.
var selectedRoot string
var selectedRoot *os.File
if _, ok := w.addedFiles[target]; ok {
selectedRoot = w.destRoot
} else {
for _, r := range roots {
if _, err = os.Lstat(filepath.Join(r, target)); err != nil {
if _, err := lstatRelative(target, r); err != nil {
if !os.IsNotExist(err) {
return err
}
@ -668,22 +753,25 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error {
break
}
}
if selectedRoot == "" {
if selectedRoot == nil {
return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target)
}
}
// The link can't be written until after the ImportLayer call.
w.PendingLinks = append(w.PendingLinks, pendingLink{
Path: filepath.Join(w.destRoot, name),
Target: filepath.Join(selectedRoot, target),
Path: name,
Target: target,
TargetRoot: selectedRoot,
})
w.addedFiles[name] = true
return nil
}
func (w *legacyLayerWriter) Remove(name string) error {
name = filepath.Clean(name)
if hasPathPrefix(name, filesPath) {
w.tombstones = append(w.tombstones, name[len(filesPath)+1:])
w.Tombstones = append(w.Tombstones, name)
} else if hasPathPrefix(name, utilityVMFilesPath) {
err := w.initUtilityVM()
if err != nil {
@ -692,11 +780,10 @@ func (w *legacyLayerWriter) Remove(name string) error {
// Make sure the path exists; os.RemoveAll will not fail if the file is
// already gone, and this needs to be a fatal error for diagnostics
// purposes.
path := filepath.Join(w.destRoot, name)
if _, err := os.Lstat(path); err != nil {
if _, err := lstatRelative(name, w.destRoot); err != nil {
return err
}
err = os.RemoveAll(path)
err = removeAllRelative(name, w.destRoot)
if err != nil {
return err
}
@ -718,28 +805,20 @@ func (w *legacyLayerWriter) Write(b []byte) (int, error) {
}
func (w *legacyLayerWriter) Close() error {
w.reset()
err := w.init()
if err != nil {
if err := w.reset(); err != nil {
return err
}
tf, err := os.Create(filepath.Join(w.root, "tombstones.txt"))
if err != nil {
if err := removeRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) {
return err
}
defer tf.Close()
_, err = tf.Write([]byte("\xef\xbb\xbfVersion 1.0\n"))
if err != nil {
return err
}
for _, t := range w.tombstones {
_, err = tf.Write([]byte(filepath.Join(`\`, t) + "\n"))
for _, pd := range w.pendingDirs {
err := mkdirRelative(pd.Path, pd.Root)
if err != nil {
return err
}
}
if w.HasUtilityVM {
err = reapplyDirectoryTimes(w.uvmDi)
err := reapplyDirectoryTimes(w.destRoot, w.uvmDi)
if err != nil {
return err
}

427
vendor/github.com/Microsoft/hcsshim/safeopen.go generated vendored Normal file
View File

@ -0,0 +1,427 @@
package hcsshim
import (
"errors"
"io"
"os"
"path/filepath"
"strings"
"syscall"
"unicode/utf16"
"unsafe"
winio "github.com/Microsoft/go-winio"
)
//sys ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile
//sys ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile
//sys rtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
//sys localAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc
//sys localFree(ptr uintptr) = kernel32.LocalFree
type ioStatusBlock struct {
Status, Information uintptr
}
type objectAttributes struct {
Length uintptr
RootDirectory uintptr
ObjectName uintptr
Attributes uintptr
SecurityDescriptor uintptr
SecurityQoS uintptr
}
type unicodeString struct {
Length uint16
MaximumLength uint16
Buffer uintptr
}
type fileLinkInformation struct {
ReplaceIfExists bool
RootDirectory uintptr
FileNameLength uint32
FileName [1]uint16
}
type fileDispositionInformationEx struct {
Flags uintptr
}
const (
_FileLinkInformation = 11
_FileDispositionInformationEx = 64
_FILE_READ_ATTRIBUTES = 0x0080
_FILE_WRITE_ATTRIBUTES = 0x0100
_DELETE = 0x10000
_FILE_OPEN = 1
_FILE_CREATE = 2
_FILE_DIRECTORY_FILE = 0x00000001
_FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020
_FILE_DELETE_ON_CLOSE = 0x00001000
_FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000
_FILE_OPEN_REPARSE_POINT = 0x00200000
_FILE_DISPOSITION_DELETE = 0x00000001
_OBJ_DONT_REPARSE = 0x1000
_STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B
)
func openRoot(path string) (*os.File, error) {
longpath, err := makeLongAbsPath(path)
if err != nil {
return nil, err
}
return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING)
}
func ntRelativePath(path string) ([]uint16, error) {
path = filepath.Clean(path)
if strings.Contains(":", path) {
// Since alternate data streams must follow the file they
// are attached to, finding one here (out of order) is invalid.
return nil, errors.New("path contains invalid character `:`")
}
fspath := filepath.FromSlash(path)
if len(fspath) > 0 && fspath[0] == '\\' {
return nil, errors.New("expected relative path")
}
path16 := utf16.Encode(([]rune)(fspath))
if len(path16) > 32767 {
return nil, syscall.ENAMETOOLONG
}
return path16, nil
}
// openRelativeInternal opens a relative path from the given root, failing if
// any of the intermediate path components are reparse points.
func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) {
var (
h uintptr
iosb ioStatusBlock
oa objectAttributes
)
path16, err := ntRelativePath(path)
if err != nil {
return nil, err
}
if root == nil || root.Fd() == 0 {
return nil, errors.New("missing root directory")
}
upathBuffer := localAlloc(0, int(unsafe.Sizeof(unicodeString{}))+len(path16)*2)
defer localFree(upathBuffer)
upath := (*unicodeString)(unsafe.Pointer(upathBuffer))
upath.Length = uint16(len(path16) * 2)
upath.MaximumLength = upath.Length
upath.Buffer = upathBuffer + unsafe.Sizeof(*upath)
copy((*[32768]uint16)(unsafe.Pointer(upath.Buffer))[:], path16)
oa.Length = unsafe.Sizeof(oa)
oa.ObjectName = upathBuffer
oa.RootDirectory = uintptr(root.Fd())
oa.Attributes = _OBJ_DONT_REPARSE
status := ntCreateFile(
&h,
accessMask|syscall.SYNCHRONIZE,
&oa,
&iosb,
nil,
0,
shareFlags,
createDisposition,
_FILE_OPEN_FOR_BACKUP_INTENT|_FILE_SYNCHRONOUS_IO_NONALERT|flags,
nil,
0,
)
if status != 0 {
return nil, rtlNtStatusToDosError(status)
}
fullPath, err := makeLongAbsPath(filepath.Join(root.Name(), path))
if err != nil {
syscall.Close(syscall.Handle(h))
return nil, err
}
return os.NewFile(h, fullPath), nil
}
// openRelative opens a relative path from the given root, failing if
// any of the intermediate path components are reparse points.
func openRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) {
f, err := openRelativeInternal(path, root, accessMask, shareFlags, createDisposition, flags)
if err != nil {
err = &os.PathError{Op: "open", Path: filepath.Join(root.Name(), path), Err: err}
}
return f, err
}
// linkRelative creates a hard link from oldname to newname (relative to oldroot
// and newroot), failing if any of the intermediate path components are reparse
// points.
func linkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error {
// Open the old file.
oldf, err := openRelativeInternal(
oldname,
oldroot,
syscall.FILE_WRITE_ATTRIBUTES,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_OPEN,
0,
)
if err != nil {
return &os.LinkError{Op: "link", Old: filepath.Join(oldroot.Name(), oldname), New: filepath.Join(newroot.Name(), newname), Err: err}
}
defer oldf.Close()
// Open the parent of the new file.
var parent *os.File
parentPath := filepath.Dir(newname)
if parentPath != "." {
parent, err = openRelativeInternal(
parentPath,
newroot,
syscall.GENERIC_READ,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_OPEN,
_FILE_DIRECTORY_FILE)
if err != nil {
return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err}
}
defer parent.Close()
fi, err := winio.GetFileBasicInfo(parent)
if err != nil {
return err
}
if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 {
return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: rtlNtStatusToDosError(_STATUS_REPARSE_POINT_ENCOUNTERED)}
}
} else {
parent = newroot
}
// Issue an NT call to create the link. This will be safe because NT will
// not open any more directories to create the link, so it cannot walk any
// more reparse points.
newbase := filepath.Base(newname)
newbase16, err := ntRelativePath(newbase)
if err != nil {
return err
}
size := int(unsafe.Offsetof(fileLinkInformation{}.FileName)) + len(newbase16)*2
linkinfoBuffer := localAlloc(0, size)
defer localFree(linkinfoBuffer)
linkinfo := (*fileLinkInformation)(unsafe.Pointer(linkinfoBuffer))
linkinfo.RootDirectory = parent.Fd()
linkinfo.FileNameLength = uint32(len(newbase16) * 2)
copy((*[32768]uint16)(unsafe.Pointer(&linkinfo.FileName[0]))[:], newbase16)
var iosb ioStatusBlock
status := ntSetInformationFile(
oldf.Fd(),
&iosb,
linkinfoBuffer,
uint32(size),
_FileLinkInformation,
)
if status != 0 {
return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: rtlNtStatusToDosError(status)}
}
return nil
}
// deleteOnClose marks a file to be deleted when the handle is closed.
func deleteOnClose(f *os.File) error {
disposition := fileDispositionInformationEx{Flags: _FILE_DISPOSITION_DELETE}
var iosb ioStatusBlock
status := ntSetInformationFile(
f.Fd(),
&iosb,
uintptr(unsafe.Pointer(&disposition)),
uint32(unsafe.Sizeof(disposition)),
_FileDispositionInformationEx,
)
if status != 0 {
return rtlNtStatusToDosError(status)
}
return nil
}
// clearReadOnly clears the readonly attribute on a file.
func clearReadOnly(f *os.File) error {
bi, err := winio.GetFileBasicInfo(f)
if err != nil {
return err
}
if bi.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY == 0 {
return nil
}
sbi := winio.FileBasicInfo{
FileAttributes: bi.FileAttributes &^ syscall.FILE_ATTRIBUTE_READONLY,
}
if sbi.FileAttributes == 0 {
sbi.FileAttributes = syscall.FILE_ATTRIBUTE_NORMAL
}
return winio.SetFileBasicInfo(f, &sbi)
}
// removeRelative removes a file or directory relative to a root, failing if any
// intermediate path components are reparse points.
func removeRelative(path string, root *os.File) error {
f, err := openRelativeInternal(
path,
root,
_FILE_READ_ATTRIBUTES|_FILE_WRITE_ATTRIBUTES|_DELETE,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_OPEN,
_FILE_OPEN_REPARSE_POINT)
if err == nil {
defer f.Close()
err = deleteOnClose(f)
if err == syscall.ERROR_ACCESS_DENIED {
// Maybe the file is marked readonly. Clear the bit and retry.
clearReadOnly(f)
err = deleteOnClose(f)
}
}
if err != nil {
return &os.PathError{Op: "remove", Path: filepath.Join(root.Name(), path), Err: err}
}
return nil
}
// removeAllRelative removes a directory tree relative to a root, failing if any
// intermediate path components are reparse points.
func removeAllRelative(path string, root *os.File) error {
fi, err := lstatRelative(path, root)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
fileAttributes := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes
if fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 || fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 {
// If this is a reparse point, it can't have children. Simple remove will do.
err := removeRelative(path, root)
if err == nil || os.IsNotExist(err) {
return nil
}
return err
}
// It is necessary to use os.Open as Readdirnames does not work with
// openRelative. This is safe because the above lstatrelative fails
// if the target is outside the root, and we know this is not a
// symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check.
fd, err := os.Open(filepath.Join(root.Name(), path))
if err != nil {
if os.IsNotExist(err) {
// Race. It was deleted between the Lstat and Open.
// Return nil per RemoveAll's docs.
return nil
}
return err
}
// Remove contents & return first error.
for {
names, err1 := fd.Readdirnames(100)
for _, name := range names {
err1 := removeAllRelative(path+string(os.PathSeparator)+name, root)
if err == nil {
err = err1
}
}
if err1 == io.EOF {
break
}
// If Readdirnames returned an error, use it.
if err == nil {
err = err1
}
if len(names) == 0 {
break
}
}
fd.Close()
// Remove directory.
err1 := removeRelative(path, root)
if err1 == nil || os.IsNotExist(err1) {
return nil
}
if err == nil {
err = err1
}
return err
}
// mkdirRelative creates a directory relative to a root, failing if any
// intermediate path components are reparse points.
func mkdirRelative(path string, root *os.File) error {
f, err := openRelativeInternal(
path,
root,
0,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_CREATE,
_FILE_DIRECTORY_FILE)
if err == nil {
f.Close()
} else {
err = &os.PathError{Op: "mkdir", Path: filepath.Join(root.Name(), path), Err: err}
}
return err
}
// lstatRelative performs a stat operation on a file relative to a root, failing
// if any intermediate path components are reparse points.
func lstatRelative(path string, root *os.File) (os.FileInfo, error) {
f, err := openRelativeInternal(
path,
root,
_FILE_READ_ATTRIBUTES,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_OPEN,
_FILE_OPEN_REPARSE_POINT)
if err != nil {
return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err}
}
defer f.Close()
return f.Stat()
}
// ensureNotReparsePointRelative validates that a given file (relative to a
// root) and all intermediate path components are not a reparse points.
func ensureNotReparsePointRelative(path string, root *os.File) error {
// Perform an open with OBJ_DONT_REPARSE but without specifying FILE_OPEN_REPARSE_POINT.
f, err := openRelative(
path,
root,
0,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_OPEN,
0)
if err != nil {
return err
}
f.Close()
return nil
}

View File

@ -41,6 +41,8 @@ var (
modole32 = windows.NewLazySystemDLL("ole32.dll")
modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
modvmcompute = windows.NewLazySystemDLL("vmcompute.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procCoTaskMemFree = modole32.NewProc("CoTaskMemFree")
procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId")
@ -94,6 +96,11 @@ var (
procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback")
procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings")
procHNSCall = modvmcompute.NewProc("HNSCall")
procNtCreateFile = modntdll.NewProc("NtCreateFile")
procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLocalFree = modkernel32.NewProc("LocalFree")
)
func coTaskMemFree(buffer unsafe.Pointer) {
@ -1040,3 +1047,34 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16)
}
return
}
func ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) {
r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0)
status = uint32(r0)
return
}
func ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) {
r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0)
status = uint32(r0)
return
}
func rtlNtStatusToDosError(status uint32) (winerr error) {
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
if r0 != 0 {
winerr = syscall.Errno(r0)
}
return
}
func localAlloc(flags uint32, size int) (ptr uintptr) {
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
ptr = uintptr(r0)
return
}
func localFree(ptr uintptr) {
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0)
return
}

View File

@ -1,4 +1,4 @@
![banner](/docs/images/containerd-dark.png?raw=true)
![banner](/docs/static/img/containerd-dark.png?raw=true)
[![GoDoc](https://godoc.org/github.com/containerd/containerd?status.svg)](https://godoc.org/github.com/containerd/containerd)
[![Build Status](https://travis-ci.org/containerd/containerd.svg?branch=master)](https://travis-ci.org/containerd/containerd)

View File

@ -199,7 +199,7 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
if aufsTempdir, err = ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)

View File

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"os"
"path/filepath"
"sync"
"github.com/containerd/containerd/defaults"
@ -213,3 +214,44 @@ type DirectIO struct {
}
var _ IO = &DirectIO{}
// LogFile creates a file on disk that logs the task's STDOUT,STDERR.
// If the log file already exists, the logs will be appended to the file.
func LogFile(path string) Creator {
return func(_ string) (IO, error) {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return nil, err
}
f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
f.Close()
return &logIO{
config: Config{
Stdout: path,
Stderr: path,
},
}, nil
}
}
type logIO struct {
config Config
}
func (l *logIO) Config() Config {
return l.config
}
func (l *logIO) Cancel() {
}
func (l *logIO) Wait() {
}
func (l *logIO) Close() error {
return nil
}

View File

@ -141,8 +141,18 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (pipes, error) {
// NewDirectIO returns an IO implementation that exposes the IO streams as io.ReadCloser
// and io.WriteCloser.
func NewDirectIO(ctx context.Context, fifos *FIFOSet) (*DirectIO, error) {
return newDirectIO(ctx, fifos, false)
}
// NewDirectIOWithTerminal returns an IO implementation that exposes the streams with terminal enabled
func NewDirectIOWithTerminal(ctx context.Context, fifos *FIFOSet) (*DirectIO, error) {
return newDirectIO(ctx, fifos, true)
}
func newDirectIO(ctx context.Context, fifos *FIFOSet, terminal bool) (*DirectIO, error) {
ctx, cancel := context.WithCancel(ctx)
pipes, err := openFifos(ctx, fifos)
fifos.Config.Terminal = terminal
return &DirectIO{
pipes: pipes,
cio: cio{

View File

@ -19,7 +19,6 @@ package containerd
import (
"context"
"fmt"
"io"
"net/http"
"runtime"
"strconv"
@ -339,39 +338,44 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image
}
}
imgrec := images.Image{
img := &image{
client: c,
i: images.Image{
Name: name,
Target: desc,
Labels: pullCtx.Labels,
},
}
if pullCtx.Unpack {
if err := img.Unpack(ctx, pullCtx.Snapshotter); err != nil {
return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter)
}
}
is := c.ImageService()
if created, err := is.Create(ctx, imgrec); err != nil {
for {
if created, err := is.Create(ctx, img.i); err != nil {
if !errdefs.IsAlreadyExists(err) {
return nil, err
}
updated, err := is.Update(ctx, imgrec)
updated, err := is.Update(ctx, img.i)
if err != nil {
// if image was removed, try create again
if errdefs.IsNotFound(err) {
continue
}
return nil, err
}
imgrec = updated
img.i = updated
} else {
imgrec = created
}
img := &image{
client: c,
i: imgrec,
}
if pullCtx.Unpack {
if err := img.Unpack(ctx, pullCtx.Snapshotter); err != nil {
errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter)
}
img.i = created
}
return img, nil
}
}
// Push uploads the provided content to a remote resource
func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, opts ...RemoteOpt) error {
@ -551,98 +555,3 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
Revision: response.Revision,
}, nil
}
type importOpts struct {
}
// ImportOpt allows the caller to specify import specific options
type ImportOpt func(c *importOpts) error
func resolveImportOpt(opts ...ImportOpt) (importOpts, error) {
var iopts importOpts
for _, o := range opts {
if err := o(&iopts); err != nil {
return iopts, err
}
}
return iopts, nil
}
// Import imports an image from a Tar stream using reader.
// Caller needs to specify importer. Future version may use oci.v1 as the default.
// Note that unreferrenced blobs may be imported to the content store as well.
func (c *Client) Import(ctx context.Context, importer images.Importer, reader io.Reader, opts ...ImportOpt) ([]Image, error) {
_, err := resolveImportOpt(opts...) // unused now
if err != nil {
return nil, err
}
ctx, done, err := c.WithLease(ctx)
if err != nil {
return nil, err
}
defer done(ctx)
imgrecs, err := importer.Import(ctx, c.ContentStore(), reader)
if err != nil {
// is.Update() is not called on error
return nil, err
}
is := c.ImageService()
var images []Image
for _, imgrec := range imgrecs {
if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
if !errdefs.IsNotFound(err) {
return nil, err
}
created, err := is.Create(ctx, imgrec)
if err != nil {
return nil, err
}
imgrec = created
} else {
imgrec = updated
}
images = append(images, &image{
client: c,
i: imgrec,
})
}
return images, nil
}
type exportOpts struct {
}
// ExportOpt allows the caller to specify export-specific options
type ExportOpt func(c *exportOpts) error
func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
var eopts exportOpts
for _, o := range opts {
if err := o(&eopts); err != nil {
return eopts, err
}
}
return eopts, nil
}
// Export exports an image to a Tar stream.
// OCI format is used by default.
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
_, err := resolveExportOpt(opts...) // unused now
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
pw.CloseWithError(exporter.Export(ctx, c.ContentStore(), desc, pw))
}()
return pr, nil
}

View File

@ -17,7 +17,7 @@
package command
import (
"context"
gocontext "context"
"fmt"
"io/ioutil"
golog "log"
@ -35,7 +35,6 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
gocontext "golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
)
@ -168,7 +167,7 @@ func App() *cli.App {
return app
}
func serve(ctx context.Context, l net.Listener, serveFunc func(net.Listener) error) {
func serve(ctx gocontext.Context, l net.Listener, serveFunc func(net.Listener) error) {
path := l.Addr().String()
log.G(ctx).WithField("address", path).Info("serving...")
go func() {

View File

@ -506,7 +506,7 @@ var (
)
func edit(rd io.Reader) (io.ReadCloser, error) {
tmp, err := ioutil.TempFile("", "edit-")
tmp, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), "edit-")
if err != nil {
return nil, err
}

View File

@ -61,19 +61,19 @@ Most of this is experimental and there are few leaps to make this work.`,
var (
ref = clicontext.Args().First()
)
_, err := Fetch(ref, clicontext)
client, ctx, cancel, err := commands.NewClient(clicontext)
if err != nil {
return err
}
defer cancel()
_, err = Fetch(ctx, client, ref, clicontext)
return err
},
}
// Fetch loads all resources into the content store and returns the image
func Fetch(ref string, cliContext *cli.Context) (containerd.Image, error) {
client, ctx, cancel, err := commands.NewClient(cliContext)
if err != nil {
return nil, err
}
defer cancel()
func Fetch(ctx context.Context, client *containerd.Client, ref string, cliContext *cli.Context) (containerd.Image, error) {
resolver, err := commands.GetResolver(ctx, cliContext)
if err != nil {
return nil, err

View File

@ -42,18 +42,15 @@ var Command = cli.Command{
defer cancel()
eventsClient := client.EventService()
eventsCh, errCh := eventsClient.Subscribe(ctx, context.Args()...)
for {
open := true
for open {
var e *events.Envelope
select {
case evt, closed := <-eventsCh:
if closed {
return nil
}
e = evt
case err := <-errCh:
case e = <-eventsCh:
case err, open = <-errCh:
return err
}
if e != nil {
var out []byte
if e.Event != nil {
v, err := typeurl.UnmarshalAny(e.Event)
@ -65,7 +62,6 @@ var Command = cli.Command{
return err
}
}
if _, err := fmt.Println(
e.Timestamp,
e.Namespace,
@ -75,5 +71,7 @@ var Command = cli.Command{
return err
}
}
}
return nil
},
}

View File

@ -57,10 +57,20 @@ command. As part of this process, we do the following:
if ref == "" {
return fmt.Errorf("please provide an image reference to pull")
}
ctx, cancel := commands.AppContext(context)
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
img, err := content.Fetch(ref, context)
ctx, done, err := client.WithLease(ctx)
if err != nil {
return err
}
defer done(ctx)
img, err := content.Fetch(ctx, client, ref, context)
if err != nil {
return err
}

View File

@ -19,12 +19,11 @@
package shim
import (
gocontext "context"
"fmt"
"io/ioutil"
"net"
gocontext "context"
"github.com/containerd/console"
"github.com/containerd/containerd/cmd/ctr/commands"
shim "github.com/containerd/containerd/linux/shim/v1"

View File

@ -304,7 +304,11 @@ var prepareCommand = cli.Command{
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
mounts, err := snapshotter.Prepare(ctx, key, parent)
labels := map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
}
mounts, err := snapshotter.Prepare(ctx, key, parent, snapshots.WithLabels(labels))
if err != nil {
return err
}
@ -404,7 +408,10 @@ var commitCommand = cli.Command{
}
defer cancel()
snapshotter := client.SnapshotService(context.GlobalString("snapshotter"))
return snapshotter.Commit(ctx, key, active)
labels := map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
}
return snapshotter.Commit(ctx, key, active, snapshots.WithLabels(labels))
},
}

View File

@ -34,7 +34,7 @@ var listCommand = cli.Command{
Flags: []cli.Flag{
cli.BoolFlag{
Name: "quiet, q",
Usage: "print only the task id & pid",
Usage: "print only the task id",
},
},
Action: func(context *cli.Context) error {

View File

@ -69,7 +69,13 @@ func HandleConsoleResize(ctx gocontext.Context, task resizer, con console.Consol
// NewTask creates a new task
func NewTask(ctx gocontext.Context, client *containerd.Client, container containerd.Container, checkpoint string, tty, nullIO bool, ioOpts []cio.Opt, opts ...containerd.NewTaskOpts) (containerd.Task, error) {
stdio := cio.NewCreator(append([]cio.Opt{cio.WithStdio}, ioOpts...)...)
if checkpoint == "" {
if checkpoint != "" {
im, err := client.GetImage(ctx, checkpoint)
if err != nil {
return nil, err
}
opts = append(opts, containerd.WithTaskCheckpoint(im))
}
ioCreator := stdio
if tty {
ioCreator = cio.NewCreator(append([]cio.Opt{cio.WithStdio, cio.WithTerminal}, ioOpts...)...)
@ -82,13 +88,6 @@ func NewTask(ctx gocontext.Context, client *containerd.Client, container contain
}
return container.NewTask(ctx, ioCreator, opts...)
}
im, err := client.GetImage(ctx, checkpoint)
if err != nil {
return nil, err
}
opts = append(opts, containerd.WithTaskCheckpoint(im))
return container.NewTask(ctx, stdio, opts...)
}
func getNewTaskOpts(context *cli.Context) []containerd.NewTaskOpts {
if context.Bool("no-pivot") {

View File

@ -28,9 +28,9 @@ import (
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/oci"
"github.com/containerd/typeurl"
prototypes "github.com/gogo/protobuf/types"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
@ -45,7 +45,7 @@ type Container interface {
// NewTask creates a new task based on the container metadata
NewTask(context.Context, cio.Creator, ...NewTaskOpts) (Task, error)
// Spec returns the OCI runtime specification
Spec(context.Context) (*specs.Spec, error)
Spec(context.Context) (*oci.Spec, error)
// Task returns the current task for the container
//
// If cio.Attach options are passed the client will reattach to the IO for the running
@ -126,12 +126,12 @@ func (c *container) SetLabels(ctx context.Context, labels map[string]string) (ma
}
// Spec returns the current OCI specification for the container
func (c *container) Spec(ctx context.Context) (*specs.Spec, error) {
func (c *container) Spec(ctx context.Context) (*oci.Spec, error) {
r, err := c.get(ctx)
if err != nil {
return nil, err
}
var s specs.Spec
var s oci.Spec
if err := json.Unmarshal(r.Spec.Value, &s); err != nil {
return nil, err
}

View File

@ -26,7 +26,6 @@ import (
"github.com/containerd/typeurl"
"github.com/gogo/protobuf/types"
"github.com/opencontainers/image-spec/identity"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
@ -196,7 +195,7 @@ func WithNewSpec(opts ...oci.SpecOpts) NewContainerOpts {
}
// WithSpec sets the provided spec on the container
func WithSpec(s *specs.Spec, opts ...oci.SpecOpts) NewContainerOpts {
func WithSpec(s *oci.Spec, opts ...oci.SpecOpts) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
for _, o := range opts {
if err := o(ctx, client, c, s); err != nil {

View File

@ -53,7 +53,7 @@ func WithDefaultProfile(name string) oci.SpecOpts {
if err != nil {
return err
}
f, err := ioutil.TempFile("", p.Name)
f, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), p.Name)
if err != nil {
return err
}

View File

@ -17,8 +17,9 @@
package containerd
import (
"context"
"github.com/containerd/containerd/namespaces"
"golang.org/x/net/context"
"google.golang.org/grpc"
)

121
vendor/github.com/containerd/containerd/import.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package containerd
import (
"context"
"io"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type importOpts struct {
}
// ImportOpt allows the caller to specify import specific options
type ImportOpt func(c *importOpts) error
func resolveImportOpt(opts ...ImportOpt) (importOpts, error) {
var iopts importOpts
for _, o := range opts {
if err := o(&iopts); err != nil {
return iopts, err
}
}
return iopts, nil
}
// Import imports an image from a Tar stream using reader.
// Caller needs to specify importer. Future version may use oci.v1 as the default.
// Note that unreferrenced blobs may be imported to the content store as well.
func (c *Client) Import(ctx context.Context, importer images.Importer, reader io.Reader, opts ...ImportOpt) ([]Image, error) {
_, err := resolveImportOpt(opts...) // unused now
if err != nil {
return nil, err
}
ctx, done, err := c.WithLease(ctx)
if err != nil {
return nil, err
}
defer done(ctx)
imgrecs, err := importer.Import(ctx, c.ContentStore(), reader)
if err != nil {
// is.Update() is not called on error
return nil, err
}
is := c.ImageService()
var images []Image
for _, imgrec := range imgrecs {
if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
if !errdefs.IsNotFound(err) {
return nil, err
}
created, err := is.Create(ctx, imgrec)
if err != nil {
return nil, err
}
imgrec = created
} else {
imgrec = updated
}
images = append(images, &image{
client: c,
i: imgrec,
})
}
return images, nil
}
type exportOpts struct {
}
// ExportOpt allows the caller to specify export-specific options
type ExportOpt func(c *exportOpts) error
func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
var eopts exportOpts
for _, o := range opts {
if err := o(&eopts); err != nil {
return eopts, err
}
}
return eopts, nil
}
// Export exports an image to a Tar stream.
// OCI format is used by default.
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
_, err := resolveExportOpt(opts...) // unused now
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
pw.CloseWithError(exporter.Export(ctx, c.ContentStore(), desc, pw))
}()
return pr, nil
}

View File

@ -17,7 +17,8 @@
package leases
import (
"golang.org/x/net/context"
"context"
"google.golang.org/grpc/metadata"
)

View File

@ -194,10 +194,23 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
s.p.mu.Lock()
defer s.p.mu.Unlock()
p := s.p
sio := p.stdio
var (
err error
socket *runc.Socket
)
if sio.Terminal {
if socket, err = runc.NewTempConsoleSocket(); err != nil {
return errors.Wrap(err, "failed to create OCI runtime console socket")
}
defer socket.Close()
s.opts.ConsoleSocket = socket
}
if _, err := s.p.runtime.Restore(ctx, p.id, p.bundle, s.opts); err != nil {
return p.runtimeError(err, "OCI runtime restore failed")
}
sio := p.stdio
if sio.Stdin != "" {
sc, err := fifo.OpenFifo(ctx, sio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
if err != nil {
@ -207,7 +220,17 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
p.closers = append(p.closers, sc)
}
var copyWaitGroup sync.WaitGroup
if !sio.IsNull() {
if socket != nil {
console, err := socket.ReceiveMaster()
if err != nil {
return errors.Wrap(err, "failed to retrieve console master")
}
console, err = p.platform.CopyConsole(ctx, console, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg, &copyWaitGroup)
if err != nil {
return errors.Wrap(err, "failed to start console copy")
}
p.console = console
} else if !sio.IsNull() {
if err := copyPipes(ctx, p.io, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg, &copyWaitGroup); err != nil {
return errors.Wrap(err, "failed to start io pipe copy")
}
@ -219,7 +242,6 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
return errors.Wrap(err, "failed to retrieve OCI runtime container pid")
}
p.pid = pid
return s.transition("running")
}

View File

@ -79,7 +79,7 @@ func init() {
})
}
var _ = (runtime.Runtime)(&Runtime{})
var _ = (runtime.PlatformRuntime)(&Runtime{})
// Config options for the runtime
type Config struct {
@ -510,6 +510,7 @@ func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, er
LogFormat: runc.JSON,
PdeathSignal: unix.SIGKILL,
Root: filepath.Join(root, ns),
Debug: r.config.ShimDebug,
}, nil
}

View File

@ -16,7 +16,7 @@
limitations under the License.
*/
package reaper
package shim
import (
"os/exec"

View File

@ -34,7 +34,6 @@ import (
shimapi "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/reaper"
"github.com/containerd/containerd/runtime"
runc "github.com/containerd/go-runc"
"github.com/containerd/typeurl"
@ -81,7 +80,7 @@ func NewService(config Config, publisher events.Publisher) (*Service, error) {
context: ctx,
processes: make(map[string]proc.Process),
events: make(chan interface{}, 128),
ec: reaper.Default.Subscribe(),
ec: Default.Subscribe(),
}
go s.processExits()
if err := s.initPlatform(); err != nil {

View File

@ -135,11 +135,7 @@ func getImagesBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
}
func createContainersBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) {
bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers)
if err != nil {
return nil, err
}
return bkt, nil
return createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers)
}
func getContainersBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {

View File

@ -19,6 +19,8 @@
package cgroups
import (
"context"
"github.com/containerd/cgroups"
eventstypes "github.com/containerd/containerd/api/events"
"github.com/containerd/containerd/events"
@ -30,7 +32,6 @@ import (
"github.com/containerd/containerd/runtime"
metrics "github.com/docker/go-metrics"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
// Config for the cgroups monitor

View File

@ -23,27 +23,10 @@ import (
"fmt"
"io"
"os"
"strconv"
"strings"
)
const (
/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
(1) mount ID: unique identifier of the mount (may be reused after umount)
(2) parent ID: ID of parent (or of self for the top of the mount tree)
(3) major:minor: value of st_dev for files on filesystem
(4) root: root of the mount within the filesystem
(5) mount point: mount point relative to the process's root
(6) mount options: per mount options
(7) optional fields: zero or more fields of the form "tag[:value]"
(8) separator: marks the end of the optional fields
(9) filesystem type: name of filesystem of the form "type[.subtype]"
(10) mount source: filesystem specific information or "none"
(11) super options: per super block options*/
mountinfoFormat = "%d %d %d:%d %s %s %s %s"
)
// Self retrieves a list of mounts for the current running process.
func Self() ([]Info, error) {
f, err := os.Open("/proc/self/mountinfo")
@ -56,41 +39,83 @@ func Self() ([]Info, error) {
}
func parseInfoFile(r io.Reader) ([]Info, error) {
var (
s = bufio.NewScanner(r)
out = []Info{}
)
s := bufio.NewScanner(r)
out := []Info{}
for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
var (
p = Info{}
text = s.Text()
optionalFields string
)
/*
36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
(1) mount ID: unique identifier of the mount (may be reused after umount)
(2) parent ID: ID of parent (or of self for the top of the mount tree)
(3) major:minor: value of st_dev for files on filesystem
(4) root: root of the mount within the filesystem
(5) mount point: mount point relative to the process's root
(6) mount options: per mount options
(7) optional fields: zero or more fields of the form "tag[:value]"
(8) separator: marks the end of the optional fields
(9) filesystem type: name of filesystem of the form "type[.subtype]"
(10) mount source: filesystem specific information or "none"
(11) super options: per super block options
*/
if _, err := fmt.Sscanf(text, mountinfoFormat,
&p.ID, &p.Parent, &p.Major, &p.Minor,
&p.Root, &p.Mountpoint, &p.Options, &optionalFields); err != nil {
return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
text := s.Text()
fields := strings.Split(text, " ")
numFields := len(fields)
if numFields < 10 {
// should be at least 10 fields
return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields)
}
// Safe as mountinfo encodes mountpoints with spaces as \040.
index := strings.Index(text, " - ")
postSeparatorFields := strings.Fields(text[index+3:])
if len(postSeparatorFields) < 3 {
return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
p := Info{}
// ignore any numbers parsing errors, as there should not be any
p.ID, _ = strconv.Atoi(fields[0])
p.Parent, _ = strconv.Atoi(fields[1])
mm := strings.Split(fields[2], ":")
if len(mm) != 2 {
return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm)
}
p.Major, _ = strconv.Atoi(mm[0])
p.Minor, _ = strconv.Atoi(mm[1])
if optionalFields != "-" {
p.Optional = optionalFields
}
p.Root = fields[3]
p.Mountpoint = fields[4]
p.Options = fields[5]
// one or more optional fields, when a separator (-)
i := 6
for ; i < numFields && fields[i] != "-"; i++ {
switch i {
case 6:
p.Optional = fields[6]
default:
/* NOTE there might be more optional fields before the separator
such as fields[7]...fields[N] (where N < separatorIndex),
although as of Linux kernel 4.15 the only known ones are
mount propagation flags in fields[6]. The correct
behavior is to ignore any unknown optional fields.
*/
}
}
if i == numFields {
return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text)
}
// There should be 3 fields after the separator...
if i+4 > numFields {
return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text)
}
// ... but in Linux <= 3.9 mounting a cifs with spaces in a share name
// (like "//serv/My Documents") _may_ end up having a space in the last field
// of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs
// option unc= is ignored, so a space should not appear. In here we ignore
// those "extra" fields caused by extra spaces.
p.FSType = fields[i+1]
p.Source = fields[i+2]
p.VFSOptions = fields[i+3]
p.FSType = postSeparatorFields[0]
p.Source = postSeparatorFields[1]
p.VFSOptions = strings.Join(postSeparatorFields[2:], " ")
out = append(out, p)
}
return out, nil

View File

@ -25,7 +25,7 @@ import (
"github.com/pkg/errors"
)
var tempMountLocation = os.TempDir()
var tempMountLocation = getTempDir()
// WithTempMount mounts the provided mounts to a temp dir, and pass the temp dir to f.
// The mounts are valid during the call to the f.
@ -64,3 +64,10 @@ func WithTempMount(ctx context.Context, mounts []Mount, f func(root string) erro
}
return errors.Wrapf(f(root), "mount callback failed on %s", root)
}
func getTempDir() string {
if xdg := os.Getenv("XDG_RUNTIME_DIR"); xdg != "" {
return xdg
}
return os.TempDir()
}

View File

@ -17,7 +17,8 @@
package namespaces
import (
"golang.org/x/net/context"
"context"
"google.golang.org/grpc/metadata"
)

View File

@ -23,9 +23,13 @@ import (
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// Spec is a type alias to the OCI runtime spec to allow third part SpecOpts
// to be created without the "issues" with go vendoring and package imports
type Spec = specs.Spec
// GenerateSpec will generate a default spec from the provided image
// for use as a containerd container
func GenerateSpec(ctx context.Context, client Client, c *containers.Container, opts ...SpecOpts) (*specs.Spec, error) {
func GenerateSpec(ctx context.Context, client Client, c *containers.Container, opts ...SpecOpts) (*Spec, error) {
s, err := createDefaultSpec(ctx, c.ID)
if err != nil {
return nil, err

View File

@ -25,11 +25,11 @@ import (
)
// SpecOpts sets spec specific information to a newly generated OCI spec
type SpecOpts func(context.Context, Client, *containers.Container, *specs.Spec) error
type SpecOpts func(context.Context, Client, *containers.Container, *Spec) error
// Compose converts a sequence of spec operations into a single operation
func Compose(opts ...SpecOpts) SpecOpts {
return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
for _, o := range opts {
if err := o(ctx, client, c, s); err != nil {
return err
@ -40,7 +40,7 @@ func Compose(opts ...SpecOpts) SpecOpts {
}
// setProcess sets Process to empty if unset
func setProcess(s *specs.Spec) {
func setProcess(s *Spec) {
if s.Process == nil {
s.Process = &specs.Process{}
}
@ -48,7 +48,7 @@ func setProcess(s *specs.Spec) {
// WithProcessArgs replaces the args on the generated spec
func WithProcessArgs(args ...string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setProcess(s)
s.Process.Args = args
return nil
@ -57,7 +57,7 @@ func WithProcessArgs(args ...string) SpecOpts {
// WithProcessCwd replaces the current working directory on the generated spec
func WithProcessCwd(cwd string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setProcess(s)
s.Process.Cwd = cwd
return nil
@ -66,7 +66,7 @@ func WithProcessCwd(cwd string) SpecOpts {
// WithHostname sets the container's hostname
func WithHostname(name string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
s.Hostname = name
return nil
}
@ -74,7 +74,7 @@ func WithHostname(name string) SpecOpts {
// WithEnv appends environment variables
func WithEnv(environmentVariables []string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
if len(environmentVariables) > 0 {
setProcess(s)
s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, environmentVariables)
@ -85,7 +85,7 @@ func WithEnv(environmentVariables []string) SpecOpts {
// WithMounts appends mounts
func WithMounts(mounts []specs.Mount) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
s.Mounts = append(s.Mounts, mounts...)
return nil
}

View File

@ -42,7 +42,7 @@ import (
// WithTTY sets the information on the spec as well as the environment variables for
// using a TTY
func WithTTY(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
func WithTTY(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setProcess(s)
s.Process.Terminal = true
s.Process.Env = append(s.Process.Env, "TERM=xterm")
@ -50,21 +50,21 @@ func WithTTY(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec
}
// setRoot sets Root to empty if unset
func setRoot(s *specs.Spec) {
func setRoot(s *Spec) {
if s.Root == nil {
s.Root = &specs.Root{}
}
}
// setLinux sets Linux to empty if unset
func setLinux(s *specs.Spec) {
func setLinux(s *Spec) {
if s.Linux == nil {
s.Linux = &specs.Linux{}
}
}
// setCapabilities sets Linux Capabilities to empty if unset
func setCapabilities(s *specs.Spec) {
func setCapabilities(s *Spec) {
setProcess(s)
if s.Process.Capabilities == nil {
s.Process.Capabilities = &specs.LinuxCapabilities{}
@ -73,7 +73,7 @@ func setCapabilities(s *specs.Spec) {
// WithHostNamespace allows a task to run inside the host's linux namespace
func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setLinux(s)
for i, n := range s.Linux.Namespaces {
if n.Type == ns {
@ -88,7 +88,7 @@ func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts {
// WithLinuxNamespace uses the passed in namespace for the spec. If a namespace of the same type already exists in the
// spec, the existing namespace is replaced by the one provided.
func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setLinux(s)
for i, n := range s.Linux.Namespaces {
if n.Type == ns.Type {
@ -106,7 +106,7 @@ func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts {
// WithImageConfig configures the spec to from the configuration of an Image
func WithImageConfig(image Image) SpecOpts {
return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
ic, err := image.Config(ctx)
if err != nil {
return err
@ -148,7 +148,7 @@ func WithImageConfig(image Image) SpecOpts {
// WithRootFSPath specifies unmanaged rootfs path.
func WithRootFSPath(path string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setRoot(s)
s.Root.Path = path
// Entrypoint is not set here (it's up to caller)
@ -158,7 +158,7 @@ func WithRootFSPath(path string) SpecOpts {
// WithRootFSReadonly sets specs.Root.Readonly to true
func WithRootFSReadonly() SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setRoot(s)
s.Root.Readonly = true
return nil
@ -166,14 +166,14 @@ func WithRootFSReadonly() SpecOpts {
}
// WithNoNewPrivileges sets no_new_privileges on the process for the container
func WithNoNewPrivileges(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
func WithNoNewPrivileges(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setProcess(s)
s.Process.NoNewPrivileges = true
return nil
}
// WithHostHostsFile bind-mounts the host's /etc/hosts into the container as readonly
func WithHostHostsFile(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
func WithHostHostsFile(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
s.Mounts = append(s.Mounts, specs.Mount{
Destination: "/etc/hosts",
Type: "bind",
@ -184,7 +184,7 @@ func WithHostHostsFile(_ context.Context, _ Client, _ *containers.Container, s *
}
// WithHostResolvconf bind-mounts the host's /etc/resolv.conf into the container as readonly
func WithHostResolvconf(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
func WithHostResolvconf(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
s.Mounts = append(s.Mounts, specs.Mount{
Destination: "/etc/resolv.conf",
Type: "bind",
@ -195,7 +195,7 @@ func WithHostResolvconf(_ context.Context, _ Client, _ *containers.Container, s
}
// WithHostLocaltime bind-mounts the host's /etc/localtime into the container as readonly
func WithHostLocaltime(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
func WithHostLocaltime(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
s.Mounts = append(s.Mounts, specs.Mount{
Destination: "/etc/localtime",
Type: "bind",
@ -208,7 +208,7 @@ func WithHostLocaltime(_ context.Context, _ Client, _ *containers.Container, s *
// WithUserNamespace sets the uid and gid mappings for the task
// this can be called multiple times to add more mappings to the generated spec
func WithUserNamespace(container, host, size uint32) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
var hasUserns bool
setLinux(s)
for _, ns := range s.Linux.Namespaces {
@ -235,7 +235,7 @@ func WithUserNamespace(container, host, size uint32) SpecOpts {
// WithCgroup sets the container's cgroup path
func WithCgroup(path string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setLinux(s)
s.Linux.CgroupsPath = path
return nil
@ -245,7 +245,7 @@ func WithCgroup(path string) SpecOpts {
// WithNamespacedCgroup uses the namespace set on the context to create a
// root directory for containers in the cgroup with the id as the subcgroup
func WithNamespacedCgroup() SpecOpts {
return func(ctx context.Context, _ Client, c *containers.Container, s *specs.Spec) error {
return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return err
@ -260,7 +260,7 @@ func WithNamespacedCgroup() SpecOpts {
// It accepts a valid user string in OCI Image Spec v1.0.0:
// user, uid, user:group, uid:gid, uid:group, user:gid
func WithUser(userstr string) SpecOpts {
return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
setProcess(s)
parts := strings.Split(userstr, ":")
switch len(parts) {
@ -338,7 +338,7 @@ func WithUser(userstr string) SpecOpts {
// WithUIDGID allows the UID and GID for the Process to be set
func WithUIDGID(uid, gid uint32) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setProcess(s)
s.Process.User.UID = uid
s.Process.User.GID = gid
@ -351,7 +351,7 @@ func WithUIDGID(uid, gid uint32) SpecOpts {
// or uid is not found in /etc/passwd, it sets gid to be the same with
// uid, and not returns error.
func WithUserID(uid uint32) SpecOpts {
return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) (err error) {
return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) {
setProcess(s)
if c.Snapshotter == "" && c.SnapshotKey == "" {
if !isRootfsAbs(s.Root.Path) {
@ -404,7 +404,7 @@ func WithUserID(uid uint32) SpecOpts {
// does not exist, or the username is not found in /etc/passwd,
// it returns error.
func WithUsername(username string) SpecOpts {
return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) (err error) {
return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) {
setProcess(s)
if c.Snapshotter == "" && c.SnapshotKey == "" {
if !isRootfsAbs(s.Root.Path) {
@ -445,7 +445,7 @@ func WithUsername(username string) SpecOpts {
// WithCapabilities sets Linux capabilities on the process
func WithCapabilities(caps []string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setCapabilities(s)
s.Process.Capabilities.Bounding = caps
@ -518,7 +518,7 @@ func isRootfsAbs(root string) bool {
// WithMaskedPaths sets the masked paths option
func WithMaskedPaths(paths []string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setLinux(s)
s.Linux.MaskedPaths = paths
return nil
@ -527,7 +527,7 @@ func WithMaskedPaths(paths []string) SpecOpts {
// WithReadonlyPaths sets the read only paths option
func WithReadonlyPaths(paths []string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setLinux(s)
s.Linux.ReadonlyPaths = paths
return nil
@ -535,7 +535,7 @@ func WithReadonlyPaths(paths []string) SpecOpts {
}
// WithWriteableSysfs makes any sysfs mounts writeable
func WithWriteableSysfs(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
func WithWriteableSysfs(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
for i, m := range s.Mounts {
if m.Type == "sysfs" {
var options []string
@ -552,7 +552,7 @@ func WithWriteableSysfs(_ context.Context, _ Client, _ *containers.Container, s
}
// WithWriteableCgroupfs makes any cgroup mounts writeable
func WithWriteableCgroupfs(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
func WithWriteableCgroupfs(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
for i, m := range s.Mounts {
if m.Type == "cgroup" {
var options []string
@ -570,7 +570,7 @@ func WithWriteableCgroupfs(_ context.Context, _ Client, _ *containers.Container,
// WithSelinuxLabel sets the process SELinux label
func WithSelinuxLabel(label string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setProcess(s)
s.Process.SelinuxLabel = label
return nil
@ -579,7 +579,7 @@ func WithSelinuxLabel(label string) SpecOpts {
// WithApparmorProfile sets the Apparmor profile for the process
func WithApparmorProfile(profile string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setProcess(s)
s.Process.ApparmorProfile = profile
return nil
@ -587,7 +587,7 @@ func WithApparmorProfile(profile string) SpecOpts {
}
// WithSeccompUnconfined clears the seccomp profile
func WithSeccompUnconfined(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
func WithSeccompUnconfined(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setLinux(s)
s.Linux.Seccomp = nil
return nil

View File

@ -32,7 +32,7 @@ import (
// WithImageConfig configures the spec to from the configuration of an Image
func WithImageConfig(image Image) SpecOpts {
return func(ctx context.Context, client Client, _ *containers.Container, s *specs.Spec) error {
return func(ctx context.Context, client Client, _ *containers.Container, s *Spec) error {
setProcess(s)
ic, err := image.Config(ctx)
if err != nil {
@ -67,7 +67,7 @@ func WithImageConfig(image Image) SpecOpts {
// WithTTY sets the information on the spec as well as the environment variables for
// using a TTY
func WithTTY(width, height int) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setProcess(s)
s.Process.Terminal = true
if s.Process.ConsoleSize == nil {
@ -81,7 +81,7 @@ func WithTTY(width, height int) SpecOpts {
// WithUsername sets the username on the process
func WithUsername(username string) SpecOpts {
return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
setProcess(s)
s.Process.User.Username = username
return nil

View File

@ -76,12 +76,12 @@ func defaultNamespaces() []specs.LinuxNamespace {
}
}
func createDefaultSpec(ctx context.Context, id string) (*specs.Spec, error) {
func createDefaultSpec(ctx context.Context, id string) (*Spec, error) {
ns, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
}
s := &specs.Spec{
s := &Spec{
Version: specs.Version,
Root: &specs.Root{
Path: defaultRootfsPath,

View File

@ -22,8 +22,8 @@ import (
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func createDefaultSpec(ctx context.Context, id string) (*specs.Spec, error) {
return &specs.Spec{
func createDefaultSpec(ctx context.Context, id string) (*Spec, error) {
return &Spec{
Version: specs.Version,
Root: &specs.Root{},
Process: &specs.Process{

View File

@ -54,8 +54,8 @@ type Type string
func (t Type) String() string { return string(t) }
const (
// AllPlugins declares that the plugin should be initialized after all others.
AllPlugins Type = "*"
// InternalPlugin implements an internal plugin to containerd
InternalPlugin Type = "io.containerd.internal.v1"
// RuntimePlugin implements a runtime
RuntimePlugin Type = "io.containerd.runtime.v1"
// ServicePlugin implements a internal service

View File

@ -17,6 +17,7 @@
package rootfs
import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
@ -25,12 +26,12 @@ import (
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/snapshots"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/identity"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// Layer represents the descriptors for a layer diff. These descriptions
@ -47,16 +48,27 @@ type Layer struct {
// Layers are applied in order they are given, making the first layer the
// bottom-most layer in the layer chain.
func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier) (digest.Digest, error) {
var chain []digest.Digest
for _, layer := range layers {
if _, err := ApplyLayer(ctx, layer, chain, sn, a); err != nil {
// TODO: possibly wait and retry if extraction of same chain id was in progress
return "", err
chain := make([]digest.Digest, len(layers))
for i, layer := range layers {
chain[i] = layer.Diff.Digest
}
chainID := identity.ChainID(chain)
// Just stat top layer, remaining layers will have their existence checked
// on prepare. Calling prepare on upper layers first guarantees that upper
// layers are not removed while calling stat on lower layers
_, err := sn.Stat(ctx, chainID.String())
if err != nil {
if !errdefs.IsNotFound(err) {
return "", errors.Wrapf(err, "failed to stat snapshot %s", chainID)
}
chain = append(chain, layer.Diff.Digest)
if err := applyLayers(ctx, layers, chain, sn, a); err != nil && !errdefs.IsAlreadyExists(err) {
return "", err
}
return identity.ChainID(chain), nil
}
return chainID, nil
}
// ApplyLayer applies a single layer on top of the given provided layer chain,
@ -64,59 +76,90 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter,
// is returned, if the layer already exists false is returned.
func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) (bool, error) {
var (
parent = identity.ChainID(chain)
chainID = identity.ChainID(append(chain, layer.Diff.Digest))
diff ocispec.Descriptor
chainID = identity.ChainID(append(chain, layer.Diff.Digest)).String()
applied bool
)
_, err := sn.Stat(ctx, chainID.String())
if err == nil {
log.G(ctx).Debugf("Extraction not needed, layer snapshot %s exists", chainID)
return false, nil
} else if !errdefs.IsNotFound(err) {
if _, err := sn.Stat(ctx, chainID); err != nil {
if !errdefs.IsNotFound(err) {
return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID)
}
key := fmt.Sprintf("extract-%s %s", uniquePart(), chainID)
if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts...); err != nil {
if !errdefs.IsAlreadyExists(err) {
return false, err
}
} else {
applied = true
}
}
return applied, nil
}
func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) error {
var (
parent = identity.ChainID(chain[:len(chain)-1])
chainID = identity.ChainID(chain)
layer = layers[len(layers)-1]
diff ocispec.Descriptor
key string
mounts []mount.Mount
err error
)
for {
key = fmt.Sprintf("extract-%s %s", uniquePart(), chainID)
// Prepare snapshot with from parent, label as root
mounts, err := sn.Prepare(ctx, key, parent.String(), opts...)
mounts, err = sn.Prepare(ctx, key, parent.String(), opts...)
if err != nil {
//TODO: If is snapshot exists error, retry
return false, errors.Wrapf(err, "failed to prepare extraction snapshot %q", key)
if errdefs.IsNotFound(err) && len(layers) > 1 {
if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a); err != nil {
if !errdefs.IsAlreadyExists(err) {
return err
}
}
// Do no try applying layers again
layers = nil
continue
} else if errdefs.IsAlreadyExists(err) {
// Try a different key
continue
}
// Already exists should have the caller retry
return errors.Wrapf(err, "failed to prepare extraction snapshot %q", key)
}
break
}
defer func() {
if err != nil {
log.G(ctx).WithError(err).WithField("key", key).Infof("Apply failure, attempting cleanup")
if !errdefs.IsAlreadyExists(err) {
log.G(ctx).WithError(err).WithField("key", key).Infof("apply failure, attempting cleanup")
}
if rerr := sn.Remove(ctx, key); rerr != nil {
log.G(ctx).WithError(rerr).Warnf("Extraction snapshot %q removal failed", key)
log.G(ctx).WithError(rerr).WithField("key", key).Warnf("extraction snapshot removal failed")
}
}
}()
diff, err = a.Apply(ctx, layer.Blob, mounts)
if err != nil {
return false, errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest)
err = errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest)
return err
}
if diff.Digest != layer.Diff.Digest {
err = errors.Errorf("wrong diff id calculated on extraction %q", diff.Digest)
return false, err
return err
}
if err = sn.Commit(ctx, chainID.String(), key, opts...); err != nil {
if !errdefs.IsAlreadyExists(err) {
return false, errors.Wrapf(err, "failed to commit snapshot %s", key)
err = errors.Wrapf(err, "failed to commit snapshot %s", key)
return err
}
// Destination already exists, cleanup key and return without error
err = nil
if err := sn.Remove(ctx, key); err != nil {
return false, errors.Wrapf(err, "failed to cleanup aborted apply %s", key)
}
return false, nil
}
return true, nil
return nil
}
func uniquePart() string {

View File

@ -17,13 +17,13 @@
package rootfs
import (
"context"
"fmt"
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/snapshots"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/net/context"
)
// CreateDiff creates a layer diff for the given snapshot identifier from the

View File

@ -75,7 +75,7 @@ func createInitLayer(ctx context.Context, parent, initName string, initFn func(s
// TODO: ensure not exist error once added to snapshot package
// Create tempdir
td, err := ioutil.TempDir("", "create-init-")
td, err := ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "create-init-")
if err != nil {
return "", err
}

View File

@ -53,9 +53,9 @@ type Exit struct {
Timestamp time.Time
}
// Runtime is responsible for the creation of containers for a certain platform,
// arch, or custom usage.
type Runtime interface {
// PlatformRuntime is responsible for the creation and management of
// tasks and processes for a platform.
type PlatformRuntime interface {
// ID of the runtime
ID() string
// Create creates a task with the provided id and options.

View File

@ -17,6 +17,7 @@
package server
import (
"context"
"expvar"
"io"
"net"
@ -37,7 +38,6 @@ import (
metrics "github.com/docker/go-metrics"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
@ -146,7 +146,7 @@ func (s *Server) ServeGRPC(l net.Listener) error {
// enable grpc time histograms to measure rpc latencies
grpc_prometheus.EnableHandlingTimeHistogram()
}
// before we start serving the grpc API regster the grpc_prometheus metrics
// before we start serving the grpc API register the grpc_prometheus metrics
// handler. This needs to be the last service registered so that it can collect
// metrics for every other service
grpc_prometheus.Register(s.rpc)

View File

@ -17,6 +17,8 @@
package containers
import (
"context"
"github.com/boltdb/bolt"
eventstypes "github.com/containerd/containerd/api/events"
api "github.com/containerd/containerd/api/services/containers/v1"
@ -27,7 +29,6 @@ import (
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/services"
ptypes "github.com/gogo/protobuf/types"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"

View File

@ -17,12 +17,13 @@
package containers
import (
"context"
api "github.com/containerd/containerd/api/services/containers/v1"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/services"
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)

View File

@ -17,6 +17,7 @@
package content
import (
"context"
"io"
"sync"
@ -30,7 +31,6 @@ import (
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"

View File

@ -17,6 +17,8 @@
package diff
import (
"context"
diffapi "github.com/containerd/containerd/api/services/diff/v1"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/diff"
@ -26,7 +28,6 @@ import (
"github.com/containerd/containerd/services"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)

View File

@ -17,11 +17,12 @@
package diff
import (
"context"
diffapi "github.com/containerd/containerd/api/services/diff/v1"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/services"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)

View File

@ -17,6 +17,8 @@
package events
import (
"context"
api "github.com/containerd/containerd/api/services/events/v1"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/events"
@ -24,7 +26,6 @@ import (
"github.com/containerd/containerd/plugin"
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)

View File

@ -17,7 +17,7 @@
package images
import (
gocontext "context"
"context"
eventstypes "github.com/containerd/containerd/api/events"
imagesapi "github.com/containerd/containerd/api/services/images/v1"
@ -30,7 +30,6 @@ import (
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/services"
ptypes "github.com/gogo/protobuf/types"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -64,7 +63,7 @@ func init() {
}
type gcScheduler interface {
ScheduleAndWait(gocontext.Context) (gc.Stats, error)
ScheduleAndWait(context.Context) (gc.Stats, error)
}
type local struct {

View File

@ -17,12 +17,13 @@
package images
import (
"context"
imagesapi "github.com/containerd/containerd/api/services/images/v1"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/services"
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)

View File

@ -17,6 +17,8 @@
package introspection
import (
context "context"
api "github.com/containerd/containerd/api/services/introspection/v1"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/errdefs"
@ -24,7 +26,6 @@ import (
"github.com/containerd/containerd/plugin"
"github.com/gogo/googleapis/google/rpc"
ptypes "github.com/gogo/protobuf/types"
context "golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
)

View File

@ -17,6 +17,7 @@
package leases
import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
@ -30,7 +31,6 @@ import (
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/services"
ptypes "github.com/gogo/protobuf/types"
"golang.org/x/net/context"
)
func init() {

View File

@ -17,6 +17,8 @@
package leases
import (
"context"
"google.golang.org/grpc"
api "github.com/containerd/containerd/api/services/leases/v1"
@ -24,7 +26,6 @@ import (
"github.com/containerd/containerd/services"
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
func init() {

View File

@ -17,6 +17,7 @@
package namespaces
import (
"context"
"strings"
"github.com/boltdb/bolt"
@ -29,7 +30,6 @@ import (
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/services"
ptypes "github.com/gogo/protobuf/types"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"

View File

@ -17,12 +17,13 @@
package namespaces
import (
"context"
api "github.com/containerd/containerd/api/services/namespaces/v1"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/services"
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)

View File

@ -17,7 +17,7 @@
package snapshots
import (
gocontext "context"
"context"
snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1"
"github.com/containerd/containerd/api/types"
@ -29,7 +29,6 @@ import (
"github.com/containerd/containerd/snapshots"
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
@ -216,7 +215,7 @@ func (s *service) List(sr *snapshotsapi.ListSnapshotsRequest, ss snapshotsapi.Sn
})
}
)
err = sn.Walk(ss.Context(), func(ctx gocontext.Context, info snapshots.Info) error {
err = sn.Walk(ss.Context(), func(ctx context.Context, info snapshots.Info) error {
buffer = append(buffer, fromInfo(info))
if len(buffer) >= 100 {

View File

@ -18,6 +18,7 @@ package tasks
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
@ -45,7 +46,6 @@ import (
"github.com/containerd/typeurl"
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -79,14 +79,14 @@ func initFunc(ic *plugin.InitContext) (interface{}, error) {
return nil, err
}
cs := m.(*metadata.DB).ContentStore()
runtimes := make(map[string]runtime.Runtime)
runtimes := make(map[string]runtime.PlatformRuntime)
for _, rr := range rt {
ri, err := rr.Instance()
if err != nil {
log.G(ic.Context).WithError(err).Warn("could not load runtime instance due to initialization error")
continue
}
r := ri.(runtime.Runtime)
r := ri.(runtime.PlatformRuntime)
runtimes[r.ID()] = r
}
@ -102,7 +102,7 @@ func initFunc(ic *plugin.InitContext) (interface{}, error) {
}
type local struct {
runtimes map[string]runtime.Runtime
runtimes map[string]runtime.PlatformRuntime
db *metadata.DB
store content.Store
publisher events.Publisher
@ -114,7 +114,7 @@ func (l *local) Create(ctx context.Context, r *api.CreateTaskRequest, _ ...grpc.
err error
)
if r.Checkpoint != nil {
checkpointPath, err = ioutil.TempDir("", "ctrd-checkpoint")
checkpointPath, err = ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "ctrd-checkpoint")
if err != nil {
return nil, err
}
@ -450,7 +450,7 @@ func (l *local) Checkpoint(ctx context.Context, r *api.CheckpointTaskRequest, _
if err != nil {
return nil, err
}
image, err := ioutil.TempDir("", "ctd-checkpoint")
image, err := ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "ctd-checkpoint")
if err != nil {
return nil, errdefs.ToGRPC(err)
}
@ -625,7 +625,7 @@ func (l *local) getTaskFromContainer(ctx context.Context, container *containers.
return t, nil
}
func (l *local) getRuntime(name string) (runtime.Runtime, error) {
func (l *local) getRuntime(name string) (runtime.PlatformRuntime, error) {
runtime, ok := l.runtimes[name]
if !ok {
return nil, status.Errorf(codes.NotFound, "unknown runtime %q", name)

View File

@ -17,12 +17,13 @@
package tasks
import (
"context"
api "github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/services"
ptypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)

View File

@ -17,11 +17,12 @@
package version
import (
"context"
api "github.com/containerd/containerd/api/services/version/v1"
"github.com/containerd/containerd/plugin"
ctrdversion "github.com/containerd/containerd/version"
ptypes "github.com/gogo/protobuf/types"
"golang.org/x/net/context"
"google.golang.org/grpc"
)

33
vendor/github.com/containerd/containerd/sys/env.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sys
import "golang.org/x/sys/unix"
// RunningPrivileged returns true if the effective user ID of the
// calling process is 0
func RunningPrivileged() bool {
return unix.Geteuid() == 0
}
// RunningUnprivileged returns true if the effective user ID of the
// calling process is not 0
func RunningUnprivileged() bool {
return !RunningPrivileged()
}

View File

@ -38,7 +38,7 @@ func SetOOMScore(pid, score int) error {
}
defer f.Close()
if _, err = f.WriteString(strconv.Itoa(score)); err != nil {
if os.IsPermission(err) && system.RunningInUserNS() {
if os.IsPermission(err) && (system.RunningInUserNS() || RunningUnprivileged()) {
return nil
}
return err

View File

@ -1,4 +1,4 @@
github.com/containerd/go-runc bcb223a061a3dd7de1a89c0b402a60f4dd9bd307
github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd5
github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925
github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
@ -24,17 +24,17 @@ github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
github.com/sirupsen/logrus v1.0.0
github.com/pmezard/go-difflib v1.0.0
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
google.golang.org/grpc v1.10.1
github.com/pkg/errors v0.8.0
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys
github.com/opencontainers/image-spec v1.0.1
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
github.com/Microsoft/go-winio v0.4.5
github.com/Microsoft/hcsshim v0.6.7
github.com/Microsoft/hcsshim v0.6.10
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
@ -44,7 +44,7 @@ github.com/gotestyourself/gotestyourself 44dbf532bbf5767611f6f2a61bded572e337010
github.com/google/go-cmp v0.1.0
# cri dependencies
github.com/containerd/cri v1.0.0-rc.2
github.com/containerd/cri v1.0.0
github.com/containerd/go-cni f2d7272f12d045b16ed924f50e91f9f9cecc55a7
github.com/blang/semver v3.1.0
github.com/containernetworking/cni v0.6.0

View File

@ -21,7 +21,7 @@ var (
Package = "github.com/containerd/containerd"
// Version holds the complete version number. Filled in at linking time.
Version = "1.1.0-rc.2+unknown"
Version = "1.1.0+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.

View File

@ -516,6 +516,7 @@ type RestoreOpts struct {
PidFile string
NoSubreaper bool
NoPivot bool
ConsoleSocket ConsoleSocket
}
func (o *RestoreOpts) args() ([]string, error) {
@ -530,6 +531,9 @@ func (o *RestoreOpts) args() ([]string, error) {
}
out = append(out, "--pid-file", abs)
}
if o.ConsoleSocket != nil {
out = append(out, "--console-socket", o.ConsoleSocket.Path())
}
if o.NoPivot {
out = append(out, "--no-pivot")
}

View File

@ -88,17 +88,17 @@ the alternatives you tried before submitting a PR.
# Reporting security issues
The maintainers take security seriously. If you discover a security
issue, please bring it to their attention right away!
Please DO NOT file a public issue, instead send your report privately to
security@opencontainers.org.
Please DO NOT file a public issue, instead send your report privately
to security@docker.com.
The maintainers take security seriously. If you discover a security issue,
please bring it to their attention right away!
Security reports are greatly appreciated and we will publicly thank you
for it. We also like to send gifts—if you're into Docker schwag, make
sure to let us know. We currently do not offer a paid security bounty
program, but are not ruling it out in the future.
If you are reporting a security issue, do not create an issue or file a pull
request on GitHub. Instead, disclose the issue responsibly by sending an email
to security@opencontainers.org (which is inhabited only by the maintainers of
the various OCI projects).
# Copyright and license
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.

View File

@ -1,3 +1,17 @@
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digest
import (
@ -5,6 +19,7 @@ import (
"fmt"
"hash"
"io"
"regexp"
)
// Algorithm identifies and implementation of a digester by an identifier.
@ -14,9 +29,9 @@ type Algorithm string
// supported digest types
const (
SHA256 Algorithm = "sha256" // sha256 with hex encoding
SHA384 Algorithm = "sha384" // sha384 with hex encoding
SHA512 Algorithm = "sha512" // sha512 with hex encoding
SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only)
SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only)
SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only)
// Canonical is the primary digest algorithm used with the distribution
// project. Other digests may be used but this one is the primary storage
@ -36,6 +51,14 @@ var (
SHA384: crypto.SHA384,
SHA512: crypto.SHA512,
}
// anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests.
// Note that /A-F/ disallowed.
anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{
SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`),
SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`),
SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`),
}
)
// Available returns true if the digest type is available for use. If this
@ -111,6 +134,14 @@ func (a Algorithm) Hash() hash.Hash {
return algorithms[a].New()
}
// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into
// the encoded portion of the digest.
func (a Algorithm) Encode(d []byte) string {
// TODO(stevvooe): Currently, all algorithms use a hex encoding. When we
// add support for back registration, we can modify this accordingly.
return fmt.Sprintf("%x", d)
}
// FromReader returns the digest of the reader using the algorithm.
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
digester := a.Digester()
@ -142,3 +173,20 @@ func (a Algorithm) FromBytes(p []byte) Digest {
func (a Algorithm) FromString(s string) Digest {
return a.FromBytes([]byte(s))
}
// Validate validates the encoded portion string
func (a Algorithm) Validate(encoded string) error {
r, ok := anchoredEncodedRegexps[a]
if !ok {
return ErrDigestUnsupported
}
// Digests much always be hex-encoded, ensuring that their hex portion will
// always be size*2
if a.Size()*2 != len(encoded) {
return ErrDigestInvalidLength
}
if r.MatchString(encoded) {
return nil
}
return ErrDigestInvalidFormat
}

View File

@ -1,3 +1,17 @@
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digest
import (
@ -31,16 +45,21 @@ func NewDigest(alg Algorithm, h hash.Hash) Digest {
// functions. This is also useful for rebuilding digests from binary
// serializations.
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
return Digest(fmt.Sprintf("%s:%x", alg, p))
return NewDigestFromEncoded(alg, alg.Encode(p))
}
// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded.
func NewDigestFromHex(alg, hex string) Digest {
return Digest(fmt.Sprintf("%s:%s", alg, hex))
return NewDigestFromEncoded(Algorithm(alg), hex)
}
// NewDigestFromEncoded returns a Digest from alg and the encoded digest.
func NewDigestFromEncoded(alg Algorithm, encoded string) Digest {
return Digest(fmt.Sprintf("%s:%s", alg, encoded))
}
// DigestRegexp matches valid digest types.
var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`)
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
@ -82,26 +101,18 @@ func FromString(s string) Digest {
// error if not.
func (d Digest) Validate() error {
s := string(d)
i := strings.Index(s, ":")
// validate i then run through regexp
if i < 0 || i+1 == len(s) || !DigestRegexpAnchored.MatchString(s) {
if i <= 0 || i+1 == len(s) {
return ErrDigestInvalidFormat
}
algorithm := Algorithm(s[:i])
algorithm, encoded := Algorithm(s[:i]), s[i+1:]
if !algorithm.Available() {
if !DigestRegexpAnchored.MatchString(s) {
return ErrDigestInvalidFormat
}
return ErrDigestUnsupported
}
// Digests much always be hex-encoded, ensuring that their hex portion will
// always be size*2
if algorithm.Size()*2 != len(s[i+1:]) {
return ErrDigestInvalidLength
}
return nil
return algorithm.Validate(encoded)
}
// Algorithm returns the algorithm portion of the digest. This will panic if
@ -119,12 +130,17 @@ func (d Digest) Verifier() Verifier {
}
}
// Hex returns the hex digest portion of the digest. This will panic if the
// Encoded returns the encoded portion of the digest. This will panic if the
// underlying digest is not in a valid format.
func (d Digest) Hex() string {
func (d Digest) Encoded() string {
return string(d[d.sepIndex()+1:])
}
// Hex is deprecated. Please use Digest.Encoded.
func (d Digest) Hex() string {
return d.Encoded()
}
func (d Digest) String() string {
return string(d)
}

View File

@ -1,3 +1,17 @@
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digest
import "hash"

View File

@ -1,3 +1,17 @@
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package digest provides a generalized type to opaquely represent message
// digests and their operations within the registry. The Digest type is
// designed to serve as a flexible identifier in a content-addressable system.

View File

@ -1,3 +1,17 @@
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digest
import (

View File

@ -36,103 +36,6 @@
// Contexts.
package context // import "golang.org/x/net/context"
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
@ -149,8 +52,3 @@ func Background() Context {
func TODO() Context {
return todo
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

20
vendor/golang.org/x/net/context/go19.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package context
import "context" // standard library's context, as of Go 1.7
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context = context.Context
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc = context.CancelFunc

109
vendor/golang.org/x/net/context/pre_go19.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.9
package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

View File

@ -56,7 +56,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) {
}
// registerHTTPSProtocol calls Transport.RegisterProtocol but
// convering panics into errors.
// converting panics into errors.
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
defer func() {
if e := recover(); e != nil {

View File

@ -87,13 +87,16 @@ type goAwayFlowError struct{}
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
// connError represents an HTTP/2 ConnectionError error code, along
// with a string (for debugging) explaining why.
//
// Errors of this type are only returned by the frame parser functions
// and converted into ConnectionError(ErrCodeProtocol).
// and converted into ConnectionError(Code), after stashing away
// the Reason into the Framer's errDetail field, accessible via
// the (*Framer).ErrorDetail method.
type connError struct {
Code ErrCode
Reason string
Code ErrCode // the ConnectionError error code
Reason string // additional reason
}
func (e connError) Error() string {

View File

@ -52,3 +52,5 @@ func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
func reqBodyIsNoBody(body io.ReadCloser) bool {
return body == http.NoBody
}
func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only

View File

@ -376,12 +376,16 @@ func (s *sorter) SortStrings(ss []string) {
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// *) a non-empty string starting with '/', but not with with "//",
// *) a non-empty string starting with '/'
// *) the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*"
return (len(v) > 0 && v[0] == '/') || v == "*"
}

View File

@ -25,3 +25,5 @@ func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
}
func reqBodyIsNoBody(io.ReadCloser) bool { return false }
func go18httpNoBody() io.ReadCloser { return nil } // for tests only

View File

@ -50,7 +50,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
if p.breakErr != nil {
return 0, p.breakErr
}
if p.b.Len() > 0 {
if p.b != nil && p.b.Len() > 0 {
return p.b.Read(d)
}
if p.err != nil {

View File

@ -2252,6 +2252,7 @@ type responseWriterState struct {
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished
dirty bool // a Write failed; don't reuse this responseWriterState
sentContentLen int64 // non-zero if handler set a Content-Length header
wroteBytes int64
@ -2333,6 +2334,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
date: date,
})
if err != nil {
rws.dirty = true
return 0, err
}
if endStream {
@ -2354,6 +2356,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
rws.dirty = true
return 0, err
}
}
@ -2365,6 +2368,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
trailers: rws.trailers,
endStream: true,
})
if err != nil {
rws.dirty = true
}
return len(p), err
}
return len(p), nil
@ -2504,7 +2510,7 @@ func cloneHeader(h http.Header) http.Header {
//
// * Handler calls w.Write or w.WriteString ->
// * -> rws.bw (*bufio.Writer) ->
// * (Handler migth call Flush)
// * (Handler might call Flush)
// * -> chunkWriter{rws}
// * -> responseWriterState.writeChunk(p []byte)
// * -> responseWriterState.writeChunk (most of the magic; see comment there)
@ -2543,11 +2549,20 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
func (w *responseWriter) handlerDone() {
rws := w.rws
dirty := rws.dirty
rws.handlerDone = true
w.Flush()
w.rws = nil
if !dirty {
// Only recycle the pool if all prior Write calls to
// the serverConn goroutine completed successfully. If
// they returned earlier due to resets from the peer
// there might still be write goroutines outstanding
// from the serverConn referencing the rws memory. See
// issue 20704.
responseWriterStatePool.Put(rws)
}
}
// Push errors.
var (

View File

@ -694,7 +694,7 @@ func checkConnHeaders(req *http.Request) error {
// req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown.
func actualContentLength(req *http.Request) int64 {
if req.Body == nil {
if req.Body == nil || reqBodyIsNoBody(req.Body) {
return 0
}
if req.ContentLength != 0 {
@ -725,8 +725,8 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
}
body := req.Body
hasBody := body != nil
contentLen := actualContentLength(req)
hasBody := contentLen != 0
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
var requestedGzip bool
@ -1713,16 +1713,27 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
}
// Return any padded flow control now, since we won't
// refund it later on body reads.
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
cs.inflow.add(pad)
cc.inflow.add(pad)
var refund int
if pad := int(f.Length) - len(data); pad > 0 {
refund += pad
}
// Return len(data) now if the stream is already closed,
// since data will never be read.
didReset := cs.didReset
if didReset {
refund += len(data)
}
if refund > 0 {
cc.inflow.add(int32(refund))
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(pad))
cc.fr.WriteWindowUpdate(cs.ID, uint32(pad))
cc.fr.WriteWindowUpdate(0, uint32(refund))
if !didReset {
cs.inflow.add(int32(refund))
cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
}
cc.bw.Flush()
cc.wmu.Unlock()
}
didReset := cs.didReset
cc.mu.Unlock()
if len(data) > 0 && !didReset {

26
vendor/golang.org/x/net/idna/idna.go generated vendored
View File

@ -67,6 +67,15 @@ func VerifyDNSLength(verify bool) Option {
return func(o *options) { o.verifyDNSLength = verify }
}
// RemoveLeadingDots removes leading label separators. Leading runes that map to
// dots, such as U+3002, are removed as well.
//
// This is the behavior suggested by the UTS #46 and is adopted by some
// browsers.
func RemoveLeadingDots(remove bool) Option {
return func(o *options) { o.removeLeadingDots = remove }
}
// ValidateLabels sets whether to check the mandatory label validation criteria
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
// of hyphens ('-'), normalization, validity of runes, and the context rules.
@ -133,6 +142,7 @@ func MapForLookup() Option {
o.mapping = validateAndMap
StrictDomainName(true)(o)
ValidateLabels(true)(o)
RemoveLeadingDots(true)(o)
}
}
@ -141,6 +151,7 @@ type options struct {
useSTD3Rules bool
validateLabels bool
verifyDNSLength bool
removeLeadingDots bool
trie *idnaTrie
@ -243,6 +254,7 @@ var (
transitional: true,
useSTD3Rules: true,
validateLabels: true,
removeLeadingDots: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
@ -251,6 +263,7 @@ var (
display = &Profile{options{
useSTD3Rules: true,
validateLabels: true,
removeLeadingDots: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
@ -293,8 +306,10 @@ func (p *Profile) process(s string, toASCII bool) (string, error) {
s, err = p.mapping(p, s)
}
// Remove leading empty labels.
if p.removeLeadingDots {
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
}
}
// It seems like we should only create this error on ToASCII, but the
// UTS 46 conformance tests suggests we should always check this.
if err == nil && p.verifyDNSLength && s == "" {
@ -373,23 +388,20 @@ func validateRegistration(p *Profile, s string) (string, error) {
if !norm.NFC.IsNormalString(s) {
return s, &labelError{s, "V1"}
}
var err error
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
i += sz
// Copy bytes not copied so far.
switch p.simplify(info(v).category()) {
// TODO: handle the NV8 defined in the Unicode idna data set to allow
// for strict conformance to IDNA2008.
case valid, deviation:
case disallowed, mapped, unknown, ignored:
if err == nil {
r, _ := utf8.DecodeRuneInString(s[i:])
err = runeError(r)
return s, runeError(r)
}
i += sz
}
}
return s, err
return s, nil
}
func validateAndMap(p *Profile, s string) (string, error) {
@ -408,7 +420,7 @@ func validateAndMap(p *Profile, s string) (string, error) {
continue
case disallowed:
if err == nil {
r, _ := utf8.DecodeRuneInString(s[i:])
r, _ := utf8.DecodeRuneInString(s[start:])
err = runeError(r)
}
continue

View File

@ -39,9 +39,9 @@ var buckets = []bucket{
}
// RenderEvents renders the HTML page typically served at /debug/events.
// It does not do any auth checking; see AuthRequest for the default auth check
// used by the handler registered on http.DefaultServeMux.
// req may be nil.
// It does not do any auth checking. The request may be nil.
//
// Most users will use the Events handler.
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
now := time.Now()
data := &struct {

View File

@ -110,7 +110,18 @@ var AuthRequest = func(req *http.Request) (any, sensitive bool) {
}
func init() {
http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) {
// TODO(jbd): Serve Traces from /debug/traces in the future?
// There is no requirement for a request to be present to have traces.
http.HandleFunc("/debug/requests", Traces)
http.HandleFunc("/debug/events", Events)
}
// Traces responds with traces from the program.
// The package initialization registers it in http.DefaultServeMux
// at /debug/requests.
//
// It performs authorization by running AuthRequest.
func Traces(w http.ResponseWriter, req *http.Request) {
any, sensitive := AuthRequest(req)
if !any {
http.Error(w, "not allowed", http.StatusUnauthorized)
@ -118,8 +129,14 @@ func init() {
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
Render(w, req, sensitive)
})
http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) {
}
// Events responds with a page of events collected by EventLogs.
// The package initialization registers it in http.DefaultServeMux
// at /debug/events.
//
// It performs authorization by running AuthRequest.
func Events(w http.ResponseWriter, req *http.Request) {
any, sensitive := AuthRequest(req)
if !any {
http.Error(w, "not allowed", http.StatusUnauthorized)
@ -127,13 +144,12 @@ func init() {
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
RenderEvents(w, req, sensitive)
})
}
// Render renders the HTML page typically served at /debug/requests.
// It does not do any auth checking; see AuthRequest for the default auth check
// used by the handler registered on http.DefaultServeMux.
// req may be nil.
// It does not do any auth checking. The request may be nil.
//
// Most users will use the Traces handler.
func Render(w io.Writer, req *http.Request, sensitive bool) {
data := &struct {
Families []string