
Closes #603 This adds logging facilities at the shim level to provide minimal I/O overhead and pluggable logging options. Log handling is done within the shim so that all I/O, cpu, and memory can be charged to the container. A sample logging driver setting up logging for a container the systemd journal looks like this: ```go package main import ( "bufio" "context" "fmt" "io" "sync" "github.com/containerd/containerd/runtime/v2/logging" "github.com/coreos/go-systemd/journal" ) func main() { logging.Run(log) } func log(ctx context.Context, config *logging.Config, ready func() error) error { // construct any log metadata for the container vars := map[string]string{ "SYSLOG_IDENTIFIER": fmt.Sprintf("%s:%s", config.Namespace, config.ID), } var wg sync.WaitGroup wg.Add(2) // forward both stdout and stderr to the journal go copy(&wg, config.Stdout, journal.PriInfo, vars) go copy(&wg, config.Stderr, journal.PriErr, vars) // signal that we are ready and setup for the container to be started if err := ready(); err != nil { return err } wg.Wait() return nil } func copy(wg *sync.WaitGroup, r io.Reader, pri journal.Priority, vars map[string]string) { defer wg.Done() s := bufio.NewScanner(r) for s.Scan() { if s.Err() != nil { return } journal.Send(s.Text(), pri, vars) } } ``` A `logging` package has been created to assist log developers create logging plugins for containerd. This uses a URI based approach for logging drivers that can be expanded in the future. Supported URI scheme's are: * binary * fifo * file You can pass the log url via ctr on the command line: ```bash > ctr run --rm --runtime io.containerd.runc.v2 --log-uri binary://shim-journald docker.io/library/redis:alpine redis ``` ```bash > journalctl -f -t default:redis -- Logs begin at Tue 2018-12-11 16:29:51 EST. -- Mar 08 16:08:22 deathstar default:redis[120760]: 1:C 08 Mar 2019 21:08:22.703 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.704 # You requested maxclients of 10000 requiring at least 10032 max file descriptors. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.704 # Server can't set maximum open files to 10032 because of OS error: Operation not permitted. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.704 # Current maximum open files is 1024. maxclients has been reduced to 992 to compensate for low ulimit. If you need higher maxclients increase 'ulimit -n'. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 * Running mode=standalone, port=6379. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 # Server initialized Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled. Mar 08 16:08:22 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:22.705 * Ready to accept connections Mar 08 16:08:50 deathstar default:redis[120760]: 1:signal-handler (1552079330) Received SIGINT scheduling shutdown... Mar 08 16:08:50 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:50.405 # User requested shutdown... Mar 08 16:08:50 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:50.406 * Saving the final RDB snapshot before exiting. Mar 08 16:08:50 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:50.452 * DB saved on disk Mar 08 16:08:50 deathstar default:redis[120760]: 1:M 08 Mar 2019 21:08:50.453 # Redis is now ready to exit, bye bye... ``` The following client side Opts are added: ```go // LogURI provides the raw logging URI func LogURI(uri *url.URL) Creator { } // BinaryIO forwards contianer STDOUT|STDERR directly to a logging binary func BinaryIO(binary string, args map[string]string) Creator {} ``` Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
312 lines
6.3 KiB
Go
312 lines
6.3 KiB
Go
/*
|
|
Copyright The containerd Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package cio
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"net/url"
|
|
"os"
|
|
"sync"
|
|
|
|
"github.com/containerd/containerd/defaults"
|
|
)
|
|
|
|
var bufPool = sync.Pool{
|
|
New: func() interface{} {
|
|
buffer := make([]byte, 32<<10)
|
|
return &buffer
|
|
},
|
|
}
|
|
|
|
// Config holds the IO configurations.
|
|
type Config struct {
|
|
// Terminal is true if one has been allocated
|
|
Terminal bool
|
|
// Stdin path
|
|
Stdin string
|
|
// Stdout path
|
|
Stdout string
|
|
// Stderr path
|
|
Stderr string
|
|
}
|
|
|
|
// IO holds the io information for a task or process
|
|
type IO interface {
|
|
// Config returns the IO configuration.
|
|
Config() Config
|
|
// Cancel aborts all current io operations.
|
|
Cancel()
|
|
// Wait blocks until all io copy operations have completed.
|
|
Wait()
|
|
// Close cleans up all open io resources. Cancel() is always called before
|
|
// Close()
|
|
Close() error
|
|
}
|
|
|
|
// Creator creates new IO sets for a task
|
|
type Creator func(id string) (IO, error)
|
|
|
|
// Attach allows callers to reattach to running tasks
|
|
//
|
|
// There should only be one reader for a task's IO set
|
|
// because fifo's can only be read from one reader or the output
|
|
// will be sent only to the first reads
|
|
type Attach func(*FIFOSet) (IO, error)
|
|
|
|
// FIFOSet is a set of file paths to FIFOs for a task's standard IO streams
|
|
type FIFOSet struct {
|
|
Config
|
|
close func() error
|
|
}
|
|
|
|
// Close the FIFOSet
|
|
func (f *FIFOSet) Close() error {
|
|
if f.close != nil {
|
|
return f.close()
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// NewFIFOSet returns a new FIFOSet from a Config and a close function
|
|
func NewFIFOSet(config Config, close func() error) *FIFOSet {
|
|
return &FIFOSet{Config: config, close: close}
|
|
}
|
|
|
|
// Streams used to configure a Creator or Attach
|
|
type Streams struct {
|
|
Stdin io.Reader
|
|
Stdout io.Writer
|
|
Stderr io.Writer
|
|
Terminal bool
|
|
FIFODir string
|
|
}
|
|
|
|
// Opt customize options for creating a Creator or Attach
|
|
type Opt func(*Streams)
|
|
|
|
// WithStdio sets stream options to the standard input/output streams
|
|
func WithStdio(opt *Streams) {
|
|
WithStreams(os.Stdin, os.Stdout, os.Stderr)(opt)
|
|
}
|
|
|
|
// WithTerminal sets the terminal option
|
|
func WithTerminal(opt *Streams) {
|
|
opt.Terminal = true
|
|
}
|
|
|
|
// WithStreams sets the stream options to the specified Reader and Writers
|
|
func WithStreams(stdin io.Reader, stdout, stderr io.Writer) Opt {
|
|
return func(opt *Streams) {
|
|
opt.Stdin = stdin
|
|
opt.Stdout = stdout
|
|
opt.Stderr = stderr
|
|
}
|
|
}
|
|
|
|
// WithFIFODir sets the fifo directory.
|
|
// e.g. "/run/containerd/fifo", "/run/users/1001/containerd/fifo"
|
|
func WithFIFODir(dir string) Opt {
|
|
return func(opt *Streams) {
|
|
opt.FIFODir = dir
|
|
}
|
|
}
|
|
|
|
// NewCreator returns an IO creator from the options
|
|
func NewCreator(opts ...Opt) Creator {
|
|
streams := &Streams{}
|
|
for _, opt := range opts {
|
|
opt(streams)
|
|
}
|
|
if streams.FIFODir == "" {
|
|
streams.FIFODir = defaults.DefaultFIFODir
|
|
}
|
|
return func(id string) (IO, error) {
|
|
fifos, err := NewFIFOSetInDir(streams.FIFODir, id, streams.Terminal)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if streams.Stdin == nil {
|
|
fifos.Stdin = ""
|
|
}
|
|
if streams.Stdout == nil {
|
|
fifos.Stdout = ""
|
|
}
|
|
if streams.Stderr == nil {
|
|
fifos.Stderr = ""
|
|
}
|
|
return copyIO(fifos, streams)
|
|
}
|
|
}
|
|
|
|
// NewAttach attaches the existing io for a task to the provided io.Reader/Writers
|
|
func NewAttach(opts ...Opt) Attach {
|
|
streams := &Streams{}
|
|
for _, opt := range opts {
|
|
opt(streams)
|
|
}
|
|
return func(fifos *FIFOSet) (IO, error) {
|
|
if fifos == nil {
|
|
return nil, fmt.Errorf("cannot attach, missing fifos")
|
|
}
|
|
return copyIO(fifos, streams)
|
|
}
|
|
}
|
|
|
|
// NullIO redirects the container's IO into /dev/null
|
|
func NullIO(_ string) (IO, error) {
|
|
return &cio{}, nil
|
|
}
|
|
|
|
// cio is a basic container IO implementation.
|
|
type cio struct {
|
|
config Config
|
|
wg *sync.WaitGroup
|
|
closers []io.Closer
|
|
cancel context.CancelFunc
|
|
}
|
|
|
|
func (c *cio) Config() Config {
|
|
return c.config
|
|
}
|
|
|
|
func (c *cio) Wait() {
|
|
if c.wg != nil {
|
|
c.wg.Wait()
|
|
}
|
|
}
|
|
|
|
func (c *cio) Close() error {
|
|
var lastErr error
|
|
for _, closer := range c.closers {
|
|
if closer == nil {
|
|
continue
|
|
}
|
|
if err := closer.Close(); err != nil {
|
|
lastErr = err
|
|
}
|
|
}
|
|
return lastErr
|
|
}
|
|
|
|
func (c *cio) Cancel() {
|
|
if c.cancel != nil {
|
|
c.cancel()
|
|
}
|
|
}
|
|
|
|
type pipes struct {
|
|
Stdin io.WriteCloser
|
|
Stdout io.ReadCloser
|
|
Stderr io.ReadCloser
|
|
}
|
|
|
|
// DirectIO allows task IO to be handled externally by the caller
|
|
type DirectIO struct {
|
|
pipes
|
|
cio
|
|
}
|
|
|
|
var (
|
|
_ IO = &DirectIO{}
|
|
_ IO = &logURI{}
|
|
)
|
|
|
|
// LogURI provides the raw logging URI
|
|
func LogURI(uri *url.URL) Creator {
|
|
return func(_ string) (IO, error) {
|
|
return &logURI{
|
|
config: Config{
|
|
Stdout: uri.String(),
|
|
Stderr: uri.String(),
|
|
},
|
|
}, nil
|
|
}
|
|
}
|
|
|
|
// BinaryIO forwards container STDOUT|STDERR directly to a logging binary
|
|
func BinaryIO(binary string, args map[string]string) Creator {
|
|
return func(_ string) (IO, error) {
|
|
uri := &url.URL{
|
|
Scheme: "binary",
|
|
Host: binary,
|
|
}
|
|
for k, v := range args {
|
|
uri.Query().Set(k, v)
|
|
}
|
|
return &logURI{
|
|
config: Config{
|
|
Stdout: uri.String(),
|
|
Stderr: uri.String(),
|
|
},
|
|
}, nil
|
|
}
|
|
}
|
|
|
|
// LogFile creates a file on disk that logs the task's STDOUT,STDERR.
|
|
// If the log file already exists, the logs will be appended to the file.
|
|
func LogFile(path string) Creator {
|
|
return func(_ string) (IO, error) {
|
|
uri := &url.URL{
|
|
Scheme: "file",
|
|
Host: path,
|
|
}
|
|
return &logURI{
|
|
config: Config{
|
|
Stdout: uri.String(),
|
|
Stderr: uri.String(),
|
|
},
|
|
}, nil
|
|
}
|
|
}
|
|
|
|
type logURI struct {
|
|
config Config
|
|
}
|
|
|
|
func (l *logURI) Config() Config {
|
|
return l.config
|
|
}
|
|
|
|
func (l *logURI) Cancel() {
|
|
|
|
}
|
|
|
|
func (l *logURI) Wait() {
|
|
|
|
}
|
|
|
|
func (l *logURI) Close() error {
|
|
return nil
|
|
}
|
|
|
|
// Load the io for a container but do not attach
|
|
//
|
|
// Allows io to be loaded on the task for deletion without
|
|
// starting copy routines
|
|
func Load(set *FIFOSet) (IO, error) {
|
|
return &cio{
|
|
config: set.Config,
|
|
closers: []io.Closer{set},
|
|
}, nil
|
|
}
|
|
|
|
func (p *pipes) closers() []io.Closer {
|
|
return []io.Closer{p.Stdin, p.Stdout, p.Stderr}
|
|
}
|