containerd/cmd/containerd-shim-runc-v2/manager/manager_linux.go
Derek McGowan dbc74db6a1
Move runtime to core/runtime
Signed-off-by: Derek McGowan <derek@mcg.dev>
2024-01-17 09:58:04 -08:00

275 lines
7.2 KiB
Go

/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package manager
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
goruntime "runtime"
"syscall"
"time"
"github.com/containerd/cgroups/v3"
"github.com/containerd/cgroups/v3/cgroup1"
cgroupsv2 "github.com/containerd/cgroups/v3/cgroup2"
"github.com/containerd/containerd/v2/cmd/containerd-shim-runc-v2/process"
"github.com/containerd/containerd/v2/cmd/containerd-shim-runc-v2/runc"
"github.com/containerd/containerd/v2/core/mount"
"github.com/containerd/containerd/v2/core/runtime/v2/runc/options"
"github.com/containerd/containerd/v2/core/runtime/v2/shim"
"github.com/containerd/containerd/v2/pkg/namespaces"
"github.com/containerd/containerd/v2/pkg/oci"
"github.com/containerd/containerd/v2/pkg/schedcore"
runcC "github.com/containerd/go-runc"
"github.com/containerd/log"
"golang.org/x/sys/unix"
)
// NewShimManager returns an implementation of the shim manager
// using runc
func NewShimManager(name string) shim.Manager {
return &manager{
name: name,
}
}
// group labels specifies how the shim groups services.
// currently supports a runc.v2 specific .group label and the
// standard k8s pod label. Order matters in this list
var groupLabels = []string{
"io.containerd.runc.v2.group",
"io.kubernetes.cri.sandbox-id",
}
// spec is a shallow version of [oci.Spec] containing only the
// fields we need for the hook. We use a shallow struct to reduce
// the overhead of unmarshaling.
type spec struct {
// Annotations contains arbitrary metadata for the container.
Annotations map[string]string `json:"annotations,omitempty"`
}
type manager struct {
name string
}
func newCommand(ctx context.Context, id, containerdAddress, containerdTTRPCAddress string, debug bool) (*exec.Cmd, error) {
ns, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
}
self, err := os.Executable()
if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
args := []string{
"-namespace", ns,
"-id", id,
"-address", containerdAddress,
}
if debug {
args = append(args, "-debug")
}
cmd := exec.Command(self, args...)
cmd.Dir = cwd
cmd.Env = append(os.Environ(), "GOMAXPROCS=4")
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
return cmd, nil
}
func readSpec() (*spec, error) {
f, err := os.Open(oci.ConfigFilename)
if err != nil {
return nil, err
}
defer f.Close()
var s spec
if err := json.NewDecoder(f).Decode(&s); err != nil {
return nil, err
}
return &s, nil
}
func (m manager) Name() string {
return m.name
}
func (manager) Start(ctx context.Context, id string, opts shim.StartOpts) (_ shim.BootstrapParams, retErr error) {
var params shim.BootstrapParams
params.Version = 3
params.Protocol = "ttrpc"
cmd, err := newCommand(ctx, id, opts.Address, opts.TTRPCAddress, opts.Debug)
if err != nil {
return params, err
}
grouping := id
spec, err := readSpec()
if err != nil {
return params, err
}
for _, group := range groupLabels {
if groupID, ok := spec.Annotations[group]; ok {
grouping = groupID
break
}
}
address, err := shim.SocketAddress(ctx, opts.Address, grouping)
if err != nil {
return params, err
}
socket, err := shim.NewSocket(address)
if err != nil {
// the only time where this would happen is if there is a bug and the socket
// was not cleaned up in the cleanup method of the shim or we are using the
// grouping functionality where the new process should be run with the same
// shim as an existing container
if !shim.SocketEaddrinuse(err) {
return params, fmt.Errorf("create new shim socket: %w", err)
}
if shim.CanConnect(address) {
params.Address = address
return params, nil
}
if err := shim.RemoveSocket(address); err != nil {
return params, fmt.Errorf("remove pre-existing socket: %w", err)
}
if socket, err = shim.NewSocket(address); err != nil {
return params, fmt.Errorf("try create new shim socket 2x: %w", err)
}
}
defer func() {
if retErr != nil {
socket.Close()
_ = shim.RemoveSocket(address)
}
}()
f, err := socket.File()
if err != nil {
return params, err
}
cmd.ExtraFiles = append(cmd.ExtraFiles, f)
goruntime.LockOSThread()
if os.Getenv("SCHED_CORE") != "" {
if err := schedcore.Create(schedcore.ProcessGroup); err != nil {
return params, fmt.Errorf("enable sched core support: %w", err)
}
}
if err := cmd.Start(); err != nil {
f.Close()
return params, err
}
goruntime.UnlockOSThread()
defer func() {
if retErr != nil {
cmd.Process.Kill()
}
}()
// make sure to wait after start
go cmd.Wait()
if opts, err := shim.ReadRuntimeOptions[*options.Options](os.Stdin); err == nil {
if opts.ShimCgroup != "" {
if cgroups.Mode() == cgroups.Unified {
cg, err := cgroupsv2.Load(opts.ShimCgroup)
if err != nil {
return params, fmt.Errorf("failed to load cgroup %s: %w", opts.ShimCgroup, err)
}
if err := cg.AddProc(uint64(cmd.Process.Pid)); err != nil {
return params, fmt.Errorf("failed to join cgroup %s: %w", opts.ShimCgroup, err)
}
} else {
cg, err := cgroup1.Load(cgroup1.StaticPath(opts.ShimCgroup))
if err != nil {
return params, fmt.Errorf("failed to load cgroup %s: %w", opts.ShimCgroup, err)
}
if err := cg.AddProc(uint64(cmd.Process.Pid)); err != nil {
return params, fmt.Errorf("failed to join cgroup %s: %w", opts.ShimCgroup, err)
}
}
}
}
if err := shim.AdjustOOMScore(cmd.Process.Pid); err != nil {
return params, fmt.Errorf("failed to adjust OOM score for shim: %w", err)
}
params.Address = address
return params, nil
}
func (manager) Stop(ctx context.Context, id string) (shim.StopStatus, error) {
cwd, err := os.Getwd()
if err != nil {
return shim.StopStatus{}, err
}
path := filepath.Join(filepath.Dir(cwd), id)
ns, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return shim.StopStatus{}, err
}
runtime, err := runc.ReadRuntime(path)
if err != nil {
return shim.StopStatus{}, err
}
opts, err := runc.ReadOptions(path)
if err != nil {
return shim.StopStatus{}, err
}
root := process.RuncRoot
if opts != nil && opts.Root != "" {
root = opts.Root
}
r := process.NewRunc(root, path, ns, runtime, false)
if err := r.Delete(ctx, id, &runcC.DeleteOpts{
Force: true,
}); err != nil {
log.G(ctx).WithError(err).Warn("failed to remove runc container")
}
if err := mount.UnmountRecursive(filepath.Join(path, "rootfs"), 0); err != nil {
log.G(ctx).WithError(err).Warn("failed to cleanup rootfs mount")
}
pid, err := runcC.ReadPidFile(filepath.Join(path, process.InitPidFile))
if err != nil {
log.G(ctx).WithError(err).Warn("failed to read init pid file")
}
return shim.StopStatus{
ExitedAt: time.Now(),
ExitStatus: 128 + int(unix.SIGKILL),
Pid: pid,
}, nil
}