
Closes #603 This adds logging facilities at the shim level to provide minimal I/O overhead and pluggable logging options. Log handling is done within the shim so that all I/O, cpu, and memory can be charged to the container. A sample logging driver setting up logging for a container the systemd journal looks like this: ```go package main import ( "bufio" "context" "fmt" "io" "sync" "github.com/containerd/containerd/runtime/v2/logging" "github.com/coreos/go-systemd/journal" ) func main() { logging.Run(log) } func log(ctx context.Context, config *logging.Config, ready func() error) error { // construct any log metadata for the container vars := map[string]string{ "SYSLOG_IDENTIFIER": fmt.Sprintf("%s:%s", config.Namespace, config.ID), } var wg sync.WaitGroup wg.Add(2) // forward both stdout and stderr to the journal go copy(&wg, config.Stdout, journal.PriInfo, vars) go copy(&wg, config.Stderr, journal.PriErr, vars) // signal that we are ready and setup for the container to be started if err := ready(); err != nil { return err } wg.Wait() return nil } func copy(wg *sync.WaitGroup, r io.Reader, pri journal.Priority, vars map[string]string) { defer wg.Done() s := bufio.NewScanner(r) for s.Scan() { if s.Err() != nil { return } journal.Send(s.Text(), pri, vars) } } ``` A `logging` package has been created to assist log developers create logging plugins for containerd. This uses a URI based approach for logging drivers that can be expanded in the future. Supported URI scheme's are: * binary * fifo * file You can pass the log url via ctr on the command line: ```bash > ctr run --rm --runtime io.containerd.runc.v2 --log-uri binary://shim-journald docker.io/library/redis:alpine redis ``` ```bash > journalctl -f -t default:redis -- Logs begin at Tue 2018-12-11 16:29:51 EST. -- Mar 08 16:08:22 deathstar default:redis[120760]: 1:C 08 Mar 2019 21:08:22.703 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.704 # You requested maxclients of 10000 requiring at least 10032 max file descriptors. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.704 # Server can't set maximum open files to 10032 because of OS error: Operation not permitted. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.704 # Current maximum open files is 1024. maxclients has been reduced to 992 to compensate for low ulimit. If you need higher maxclients increase 'ulimit -n'. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 * Running mode=standalone, port=6379. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 # Server initialized Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 * Ready to accept connections Mar 08 16:08:50 deathstar default:redis[120760]: 1:signal-handler (1552079330) Received SIGINT scheduling shutdown... Mar 08 16:08:50 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:50.405 # User requested shutdown... Mar 08 16:08:50 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:50.406 * Saving the final RDB snapshot before exiting. Mar 08 16:08:50 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:50.452 * DB saved on disk Mar 08 16:08:50 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:50.453 # Redis is now ready to exit, bye bye... ``` The following client side Opts are added: ```go // LogURI provides the raw logging URI func LogURI(uri *url.URL) Creator { } // BinaryIO forwards contianer STDOUT|STDERR directly to a logging binary func BinaryIO(binary string, args map[string]string) Creator {} ``` Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
268 lines
6.1 KiB
Go
268 lines
6.1 KiB
Go
// +build !windows
|
|
|
|
/*
|
|
Copyright The containerd Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package proc
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"path/filepath"
|
|
"sync"
|
|
"syscall"
|
|
"time"
|
|
|
|
"golang.org/x/sys/unix"
|
|
|
|
"github.com/containerd/console"
|
|
"github.com/containerd/containerd/runtime/proc"
|
|
"github.com/containerd/fifo"
|
|
runc "github.com/containerd/go-runc"
|
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
|
"github.com/pkg/errors"
|
|
)
|
|
|
|
type execProcess struct {
|
|
wg sync.WaitGroup
|
|
|
|
execState execState
|
|
|
|
mu sync.Mutex
|
|
id string
|
|
console console.Console
|
|
io *processIO
|
|
status int
|
|
exited time.Time
|
|
pid *safePid
|
|
closers []io.Closer
|
|
stdin io.Closer
|
|
stdio proc.Stdio
|
|
path string
|
|
spec specs.Process
|
|
|
|
parent *Init
|
|
waitBlock chan struct{}
|
|
}
|
|
|
|
func (e *execProcess) Wait() {
|
|
<-e.waitBlock
|
|
}
|
|
|
|
func (e *execProcess) ID() string {
|
|
return e.id
|
|
}
|
|
|
|
func (e *execProcess) Pid() int {
|
|
return e.pid.get()
|
|
}
|
|
|
|
func (e *execProcess) ExitStatus() int {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
return e.status
|
|
}
|
|
|
|
func (e *execProcess) ExitedAt() time.Time {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
return e.exited
|
|
}
|
|
|
|
func (e *execProcess) SetExited(status int) {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
|
|
e.execState.SetExited(status)
|
|
}
|
|
|
|
func (e *execProcess) setExited(status int) {
|
|
e.status = status
|
|
e.exited = time.Now()
|
|
e.parent.Platform.ShutdownConsole(context.Background(), e.console)
|
|
close(e.waitBlock)
|
|
}
|
|
|
|
func (e *execProcess) Delete(ctx context.Context) error {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
|
|
return e.execState.Delete(ctx)
|
|
}
|
|
|
|
func (e *execProcess) delete(ctx context.Context) error {
|
|
e.wg.Wait()
|
|
if e.io != nil {
|
|
for _, c := range e.closers {
|
|
c.Close()
|
|
}
|
|
e.io.Close()
|
|
}
|
|
pidfile := filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id))
|
|
// silently ignore error
|
|
os.Remove(pidfile)
|
|
return nil
|
|
}
|
|
|
|
func (e *execProcess) Resize(ws console.WinSize) error {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
|
|
return e.execState.Resize(ws)
|
|
}
|
|
|
|
func (e *execProcess) resize(ws console.WinSize) error {
|
|
if e.console == nil {
|
|
return nil
|
|
}
|
|
return e.console.Resize(ws)
|
|
}
|
|
|
|
func (e *execProcess) Kill(ctx context.Context, sig uint32, _ bool) error {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
|
|
return e.execState.Kill(ctx, sig, false)
|
|
}
|
|
|
|
func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error {
|
|
pid := e.pid.get()
|
|
if pid != 0 {
|
|
if err := unix.Kill(pid, syscall.Signal(sig)); err != nil {
|
|
return errors.Wrapf(checkKillError(err), "exec kill error")
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (e *execProcess) Stdin() io.Closer {
|
|
return e.stdin
|
|
}
|
|
|
|
func (e *execProcess) Stdio() proc.Stdio {
|
|
return e.stdio
|
|
}
|
|
|
|
func (e *execProcess) Start(ctx context.Context) error {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
|
|
return e.execState.Start(ctx)
|
|
}
|
|
|
|
func (e *execProcess) start(ctx context.Context) (err error) {
|
|
// The reaper may receive exit signal right after
|
|
// the container is started, before the e.pid is updated.
|
|
// In that case, we want to block the signal handler to
|
|
// access e.pid until it is updated.
|
|
e.pid.Lock()
|
|
defer e.pid.Unlock()
|
|
|
|
var (
|
|
socket *runc.Socket
|
|
pio *processIO
|
|
pidFile = newExecPidFile(e.path, e.id)
|
|
)
|
|
if e.stdio.Terminal {
|
|
if socket, err = runc.NewTempConsoleSocket(); err != nil {
|
|
return errors.Wrap(err, "failed to create runc console socket")
|
|
}
|
|
defer socket.Close()
|
|
} else {
|
|
if pio, err = createIO(ctx, e.id, e.parent.IoUID, e.parent.IoGID, e.stdio); err != nil {
|
|
return errors.Wrap(err, "failed to create init process I/O")
|
|
}
|
|
e.io = pio
|
|
}
|
|
opts := &runc.ExecOpts{
|
|
PidFile: pidFile.Path(),
|
|
Detach: true,
|
|
}
|
|
if pio != nil {
|
|
opts.IO = pio.IO()
|
|
}
|
|
if socket != nil {
|
|
opts.ConsoleSocket = socket
|
|
}
|
|
if err := e.parent.runtime.Exec(ctx, e.parent.id, e.spec, opts); err != nil {
|
|
close(e.waitBlock)
|
|
return e.parent.runtimeError(err, "OCI runtime exec failed")
|
|
}
|
|
if e.stdio.Stdin != "" {
|
|
if err := e.openStdin(e.stdio.Stdin); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
|
defer cancel()
|
|
if socket != nil {
|
|
console, err := socket.ReceiveMaster()
|
|
if err != nil {
|
|
return errors.Wrap(err, "failed to retrieve console master")
|
|
}
|
|
if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg); err != nil {
|
|
return errors.Wrap(err, "failed to start console copy")
|
|
}
|
|
} else {
|
|
if err := pio.Copy(ctx, &e.wg); err != nil {
|
|
return errors.Wrap(err, "failed to start io pipe copy")
|
|
}
|
|
}
|
|
pid, err := pidFile.Read()
|
|
if err != nil {
|
|
return errors.Wrap(err, "failed to retrieve OCI runtime exec pid")
|
|
}
|
|
e.pid.pid = pid
|
|
return nil
|
|
}
|
|
|
|
func (e *execProcess) openStdin(path string) error {
|
|
sc, err := fifo.OpenFifo(context.Background(), path, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "failed to open stdin fifo %s", path)
|
|
}
|
|
e.stdin = sc
|
|
e.closers = append(e.closers, sc)
|
|
return nil
|
|
}
|
|
|
|
func (e *execProcess) Status(ctx context.Context) (string, error) {
|
|
s, err := e.parent.Status(ctx)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
// if the container as a whole is in the pausing/paused state, so are all
|
|
// other processes inside the container, use container state here
|
|
switch s {
|
|
case "paused", "pausing":
|
|
return s, nil
|
|
}
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
// if we don't have a pid then the exec process has just been created
|
|
if e.pid.get() == 0 {
|
|
return "created", nil
|
|
}
|
|
// if we have a pid and it can be signaled, the process is running
|
|
if err := unix.Kill(e.pid.get(), 0); err == nil {
|
|
return "running", nil
|
|
}
|
|
// else if we have a pid but it can nolonger be signaled, it has stopped
|
|
return "stopped", nil
|
|
}
|