
This changeset modifies the metadata store to allow one to set a
"content sharing policy" that defines how blobs are shared between
namespaces in the content store.
The default mode "shared" will make blobs available in all namespaces
once it is pulled into any namespace. The blob will be pulled into
the namespace if a writer is opened with the "Expected" digest that
is already present in the backend.
The alternative mode, "isolated" requires that clients prove they have
access to the content by providing all of the content to the ingest
before the blob is added to the namespace.
Both modes share backing data, while "shared" will reduce total
bandwidth across namespaces, at the cost of allowing access to any
blob just by knowing its digest.
Note: Most functional codes and changelog of this commit originate from
Stephen J Day <stephen.day@docker.com>, see
40455aade8
Fixes #1713 Fixes #2865
Signed-off-by: Eric Lin <linxiulei@gmail.com>
395 lines
11 KiB
Go
395 lines
11 KiB
Go
/*
|
|
Copyright The containerd Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package server
|
|
|
|
import (
|
|
"context"
|
|
"expvar"
|
|
"io"
|
|
"net"
|
|
"net/http"
|
|
"net/http/pprof"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
csapi "github.com/containerd/containerd/api/services/content/v1"
|
|
ssapi "github.com/containerd/containerd/api/services/snapshots/v1"
|
|
"github.com/containerd/containerd/content"
|
|
"github.com/containerd/containerd/content/local"
|
|
csproxy "github.com/containerd/containerd/content/proxy"
|
|
"github.com/containerd/containerd/defaults"
|
|
"github.com/containerd/containerd/events/exchange"
|
|
"github.com/containerd/containerd/log"
|
|
"github.com/containerd/containerd/metadata"
|
|
"github.com/containerd/containerd/pkg/dialer"
|
|
"github.com/containerd/containerd/plugin"
|
|
srvconfig "github.com/containerd/containerd/services/server/config"
|
|
"github.com/containerd/containerd/snapshots"
|
|
ssproxy "github.com/containerd/containerd/snapshots/proxy"
|
|
metrics "github.com/docker/go-metrics"
|
|
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
|
"github.com/pkg/errors"
|
|
bolt "go.etcd.io/bbolt"
|
|
"google.golang.org/grpc"
|
|
)
|
|
|
|
// CreateTopLevelDirectories creates the top-level root and state directories.
|
|
func CreateTopLevelDirectories(config *srvconfig.Config) error {
|
|
switch {
|
|
case config.Root == "":
|
|
return errors.New("root must be specified")
|
|
case config.State == "":
|
|
return errors.New("state must be specified")
|
|
case config.Root == config.State:
|
|
return errors.New("root and state must be different paths")
|
|
}
|
|
|
|
if err := os.MkdirAll(config.Root, 0711); err != nil {
|
|
return err
|
|
}
|
|
if err := os.MkdirAll(config.State, 0711); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// New creates and initializes a new containerd server
|
|
func New(ctx context.Context, config *srvconfig.Config) (*Server, error) {
|
|
if err := apply(ctx, config); err != nil {
|
|
return nil, err
|
|
}
|
|
plugins, err := LoadPlugins(ctx, config)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
serverOpts := []grpc.ServerOption{
|
|
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
|
|
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
|
|
}
|
|
if config.GRPC.MaxRecvMsgSize > 0 {
|
|
serverOpts = append(serverOpts, grpc.MaxRecvMsgSize(config.GRPC.MaxRecvMsgSize))
|
|
}
|
|
if config.GRPC.MaxSendMsgSize > 0 {
|
|
serverOpts = append(serverOpts, grpc.MaxSendMsgSize(config.GRPC.MaxSendMsgSize))
|
|
}
|
|
rpc := grpc.NewServer(serverOpts...)
|
|
var (
|
|
services []plugin.Service
|
|
s = &Server{
|
|
rpc: rpc,
|
|
events: exchange.NewExchange(),
|
|
config: config,
|
|
}
|
|
initialized = plugin.NewPluginSet()
|
|
)
|
|
for _, p := range plugins {
|
|
id := p.URI()
|
|
log.G(ctx).WithField("type", p.Type).Infof("loading plugin %q...", id)
|
|
|
|
initContext := plugin.NewContext(
|
|
ctx,
|
|
p,
|
|
initialized,
|
|
config.Root,
|
|
config.State,
|
|
)
|
|
initContext.Events = s.events
|
|
initContext.Address = config.GRPC.Address
|
|
|
|
// load the plugin specific configuration if it is provided
|
|
if p.Config != nil {
|
|
pluginConfig, err := config.Decode(p.ID, p.Config)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
initContext.Config = pluginConfig
|
|
}
|
|
result := p.Init(initContext)
|
|
if err := initialized.Add(result); err != nil {
|
|
return nil, errors.Wrapf(err, "could not add plugin result to plugin set")
|
|
}
|
|
|
|
instance, err := result.Instance()
|
|
if err != nil {
|
|
if plugin.IsSkipPlugin(err) {
|
|
log.G(ctx).WithError(err).WithField("type", p.Type).Infof("skip loading plugin %q...", id)
|
|
} else {
|
|
log.G(ctx).WithError(err).Warnf("failed to load plugin %s", id)
|
|
}
|
|
continue
|
|
}
|
|
// check for grpc services that should be registered with the server
|
|
if service, ok := instance.(plugin.Service); ok {
|
|
services = append(services, service)
|
|
}
|
|
s.plugins = append(s.plugins, result)
|
|
}
|
|
// register services after all plugins have been initialized
|
|
for _, service := range services {
|
|
if err := service.Register(rpc); err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
return s, nil
|
|
}
|
|
|
|
// Server is the containerd main daemon
|
|
type Server struct {
|
|
rpc *grpc.Server
|
|
events *exchange.Exchange
|
|
config *srvconfig.Config
|
|
plugins []*plugin.Plugin
|
|
}
|
|
|
|
// ServeGRPC provides the containerd grpc APIs on the provided listener
|
|
func (s *Server) ServeGRPC(l net.Listener) error {
|
|
if s.config.Metrics.GRPCHistogram {
|
|
// enable grpc time histograms to measure rpc latencies
|
|
grpc_prometheus.EnableHandlingTimeHistogram()
|
|
}
|
|
// before we start serving the grpc API register the grpc_prometheus metrics
|
|
// handler. This needs to be the last service registered so that it can collect
|
|
// metrics for every other service
|
|
grpc_prometheus.Register(s.rpc)
|
|
return trapClosedConnErr(s.rpc.Serve(l))
|
|
}
|
|
|
|
// ServeMetrics provides a prometheus endpoint for exposing metrics
|
|
func (s *Server) ServeMetrics(l net.Listener) error {
|
|
m := http.NewServeMux()
|
|
m.Handle("/v1/metrics", metrics.Handler())
|
|
return trapClosedConnErr(http.Serve(l, m))
|
|
}
|
|
|
|
// ServeDebug provides a debug endpoint
|
|
func (s *Server) ServeDebug(l net.Listener) error {
|
|
// don't use the default http server mux to make sure nothing gets registered
|
|
// that we don't want to expose via containerd
|
|
m := http.NewServeMux()
|
|
m.Handle("/debug/vars", expvar.Handler())
|
|
m.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
|
|
m.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
|
|
m.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
|
|
m.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
|
|
m.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
|
|
return trapClosedConnErr(http.Serve(l, m))
|
|
}
|
|
|
|
// Stop the containerd server canceling any open connections
|
|
func (s *Server) Stop() {
|
|
s.rpc.Stop()
|
|
for i := len(s.plugins) - 1; i >= 0; i-- {
|
|
p := s.plugins[i]
|
|
instance, err := p.Instance()
|
|
if err != nil {
|
|
log.L.WithError(err).WithField("id", p.Registration.ID).
|
|
Errorf("could not get plugin instance")
|
|
continue
|
|
}
|
|
closer, ok := instance.(io.Closer)
|
|
if !ok {
|
|
continue
|
|
}
|
|
if err := closer.Close(); err != nil {
|
|
log.L.WithError(err).WithField("id", p.Registration.ID).
|
|
Errorf("failed to close plugin")
|
|
}
|
|
}
|
|
}
|
|
|
|
// LoadPlugins loads all plugins into containerd and generates an ordered graph
|
|
// of all plugins.
|
|
func LoadPlugins(ctx context.Context, config *srvconfig.Config) ([]*plugin.Registration, error) {
|
|
// load all plugins into containerd
|
|
if err := plugin.Load(filepath.Join(config.Root, "plugins")); err != nil {
|
|
return nil, err
|
|
}
|
|
// load additional plugins that don't automatically register themselves
|
|
plugin.Register(&plugin.Registration{
|
|
Type: plugin.ContentPlugin,
|
|
ID: "content",
|
|
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
|
|
ic.Meta.Exports["root"] = ic.Root
|
|
return local.NewStore(ic.Root)
|
|
},
|
|
})
|
|
plugin.Register(&plugin.Registration{
|
|
Type: plugin.MetadataPlugin,
|
|
ID: "bolt",
|
|
Requires: []plugin.Type{
|
|
plugin.ContentPlugin,
|
|
plugin.SnapshotPlugin,
|
|
},
|
|
Config: &srvconfig.BoltConfig{
|
|
ContentSharingPolicy: srvconfig.SharingPolicyShared,
|
|
},
|
|
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
|
|
if err := os.MkdirAll(ic.Root, 0711); err != nil {
|
|
return nil, err
|
|
}
|
|
cs, err := ic.Get(plugin.ContentPlugin)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
snapshottersRaw, err := ic.GetByType(plugin.SnapshotPlugin)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
snapshotters := make(map[string]snapshots.Snapshotter)
|
|
for name, sn := range snapshottersRaw {
|
|
sn, err := sn.Instance()
|
|
if err != nil {
|
|
if !plugin.IsSkipPlugin(err) {
|
|
log.G(ic.Context).WithError(err).
|
|
Warnf("could not use snapshotter %v in metadata plugin", name)
|
|
}
|
|
continue
|
|
}
|
|
snapshotters[name] = sn.(snapshots.Snapshotter)
|
|
}
|
|
|
|
shared := true
|
|
ic.Meta.Exports["policy"] = srvconfig.SharingPolicyShared
|
|
if cfg, ok := ic.Config.(*srvconfig.BoltConfig); ok {
|
|
if cfg.ContentSharingPolicy != "" {
|
|
if err := cfg.Validate(); err != nil {
|
|
return nil, err
|
|
}
|
|
if cfg.ContentSharingPolicy == srvconfig.SharingPolicyIsolated {
|
|
ic.Meta.Exports["policy"] = srvconfig.SharingPolicyIsolated
|
|
shared = false
|
|
}
|
|
|
|
log.L.WithField("policy", cfg.ContentSharingPolicy).Info("metadata content store policy set")
|
|
}
|
|
}
|
|
|
|
path := filepath.Join(ic.Root, "meta.db")
|
|
ic.Meta.Exports["path"] = path
|
|
|
|
db, err := bolt.Open(path, 0644, nil)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
var dbopts []metadata.DBOpt
|
|
if !shared {
|
|
dbopts = append(dbopts, metadata.WithPolicyIsolated)
|
|
}
|
|
mdb := metadata.NewDB(db, cs.(content.Store), snapshotters, dbopts...)
|
|
if err := mdb.Init(ic.Context); err != nil {
|
|
return nil, err
|
|
}
|
|
return mdb, nil
|
|
},
|
|
})
|
|
|
|
clients := &proxyClients{}
|
|
for name, pp := range config.ProxyPlugins {
|
|
var (
|
|
t plugin.Type
|
|
f func(*grpc.ClientConn) interface{}
|
|
|
|
address = pp.Address
|
|
)
|
|
|
|
switch pp.Type {
|
|
case string(plugin.SnapshotPlugin), "snapshot":
|
|
t = plugin.SnapshotPlugin
|
|
ssname := name
|
|
f = func(conn *grpc.ClientConn) interface{} {
|
|
return ssproxy.NewSnapshotter(ssapi.NewSnapshotsClient(conn), ssname)
|
|
}
|
|
|
|
case string(plugin.ContentPlugin), "content":
|
|
t = plugin.ContentPlugin
|
|
f = func(conn *grpc.ClientConn) interface{} {
|
|
return csproxy.NewContentStore(csapi.NewContentClient(conn))
|
|
}
|
|
default:
|
|
log.G(ctx).WithField("type", pp.Type).Warn("unknown proxy plugin type")
|
|
}
|
|
|
|
plugin.Register(&plugin.Registration{
|
|
Type: t,
|
|
ID: name,
|
|
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
|
|
ic.Meta.Exports["address"] = address
|
|
conn, err := clients.getClient(address)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return f(conn), nil
|
|
},
|
|
})
|
|
|
|
}
|
|
|
|
// return the ordered graph for plugins
|
|
return plugin.Graph(config.DisabledPlugins), nil
|
|
}
|
|
|
|
type proxyClients struct {
|
|
m sync.Mutex
|
|
clients map[string]*grpc.ClientConn
|
|
}
|
|
|
|
func (pc *proxyClients) getClient(address string) (*grpc.ClientConn, error) {
|
|
pc.m.Lock()
|
|
defer pc.m.Unlock()
|
|
if pc.clients == nil {
|
|
pc.clients = map[string]*grpc.ClientConn{}
|
|
} else if c, ok := pc.clients[address]; ok {
|
|
return c, nil
|
|
}
|
|
|
|
gopts := []grpc.DialOption{
|
|
grpc.WithInsecure(),
|
|
grpc.WithBackoffMaxDelay(3 * time.Second),
|
|
grpc.WithDialer(dialer.Dialer),
|
|
|
|
// TODO(stevvooe): We may need to allow configuration of this on the client.
|
|
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
|
|
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
|
|
}
|
|
|
|
conn, err := grpc.Dial(dialer.DialAddress(address), gopts...)
|
|
if err != nil {
|
|
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
|
}
|
|
|
|
pc.clients[address] = conn
|
|
|
|
return conn, nil
|
|
}
|
|
|
|
func trapClosedConnErr(err error) error {
|
|
if err == nil {
|
|
return nil
|
|
}
|
|
if strings.Contains(err.Error(), "use of closed network connection") {
|
|
return nil
|
|
}
|
|
return err
|
|
}
|