
The rpc only reports one field, i.e. the cgroup driver, to kubelet. Containerd determines the effective cgroup driver by looking at all runtime handlers, starting from the default runtime handler (the rest in alphabetical order), and returning the cgroup driver setting of the first runtime handler that supports one. If no runtime handler supports cgroup driver (i.e. has a config option for it) containerd falls back to auto-detection, returning systemd if systemd is running and cgroupfs otherwise. This patch implements the CRI server side of Kubernetes KEP-4033: https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/4033-group-driver-detection-over-cri Signed-off-by: Markus Lehtonen <markus.lehtonen@intel.com>
103 lines
2.8 KiB
Go
103 lines
2.8 KiB
Go
package systemd
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"sync"
|
|
|
|
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
|
|
dbus "github.com/godbus/dbus/v5"
|
|
)
|
|
|
|
var (
|
|
dbusC *systemdDbus.Conn
|
|
dbusMu sync.RWMutex
|
|
dbusInited bool
|
|
dbusRootless bool
|
|
)
|
|
|
|
type dbusConnManager struct{}
|
|
|
|
// newDbusConnManager initializes systemd dbus connection manager.
|
|
func newDbusConnManager(rootless bool) *dbusConnManager {
|
|
dbusMu.Lock()
|
|
defer dbusMu.Unlock()
|
|
if dbusInited && rootless != dbusRootless {
|
|
panic("can't have both root and rootless dbus")
|
|
}
|
|
dbusInited = true
|
|
dbusRootless = rootless
|
|
return &dbusConnManager{}
|
|
}
|
|
|
|
// getConnection lazily initializes and returns systemd dbus connection.
|
|
func (d *dbusConnManager) getConnection() (*systemdDbus.Conn, error) {
|
|
// In the case where dbusC != nil
|
|
// Use the read lock the first time to ensure
|
|
// that Conn can be acquired at the same time.
|
|
dbusMu.RLock()
|
|
if conn := dbusC; conn != nil {
|
|
dbusMu.RUnlock()
|
|
return conn, nil
|
|
}
|
|
dbusMu.RUnlock()
|
|
|
|
// In the case where dbusC == nil
|
|
// Use write lock to ensure that only one
|
|
// will be created
|
|
dbusMu.Lock()
|
|
defer dbusMu.Unlock()
|
|
if conn := dbusC; conn != nil {
|
|
return conn, nil
|
|
}
|
|
|
|
conn, err := d.newConnection()
|
|
if err != nil {
|
|
// When dbus-user-session is not installed, we can't detect whether we should try to connect to user dbus or system dbus, so d.dbusRootless is set to false.
|
|
// This may fail with a cryptic error "read unix @->/run/systemd/private: read: connection reset by peer: unknown."
|
|
// https://github.com/moby/moby/issues/42793
|
|
return nil, fmt.Errorf("failed to connect to dbus (hint: for rootless containers, maybe you need to install dbus-user-session package, see https://github.com/opencontainers/runc/blob/master/docs/cgroup-v2.md): %w", err)
|
|
}
|
|
dbusC = conn
|
|
return conn, nil
|
|
}
|
|
|
|
func (d *dbusConnManager) newConnection() (*systemdDbus.Conn, error) {
|
|
if dbusRootless {
|
|
return newUserSystemdDbus()
|
|
}
|
|
return systemdDbus.NewWithContext(context.TODO())
|
|
}
|
|
|
|
// resetConnection resets the connection to its initial state
|
|
// (so it can be reconnected if necessary).
|
|
func (d *dbusConnManager) resetConnection(conn *systemdDbus.Conn) {
|
|
dbusMu.Lock()
|
|
defer dbusMu.Unlock()
|
|
if dbusC != nil && dbusC == conn {
|
|
dbusC.Close()
|
|
dbusC = nil
|
|
}
|
|
}
|
|
|
|
// retryOnDisconnect calls op, and if the error it returns is about closed dbus
|
|
// connection, the connection is re-established and the op is retried. This helps
|
|
// with the situation when dbus is restarted and we have a stale connection.
|
|
func (d *dbusConnManager) retryOnDisconnect(op func(*systemdDbus.Conn) error) error {
|
|
for {
|
|
conn, err := d.getConnection()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = op(conn)
|
|
if err == nil {
|
|
return nil
|
|
}
|
|
if !errors.Is(err, dbus.ErrClosed) {
|
|
return err
|
|
}
|
|
d.resetConnection(conn)
|
|
}
|
|
}
|