From 45b6e758039f3fea7ec34e2081c9ab047871646b Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Wed, 21 Feb 2018 01:53:04 +0000 Subject: [PATCH 1/2] Update containerd to 129167132c5e0dbd1b031badae201a432d1bd681. Signed-off-by: Lantao Liu --- vendor.conf | 18 +- vendor/github.com/beorn7/perks/LICENSE | 20 + vendor/github.com/beorn7/perks/README.md | 31 + .../beorn7/perks/quantile/stream.go | 292 ++ vendor/github.com/boltdb/bolt/LICENSE | 20 + vendor/github.com/boltdb/bolt/README.md | 915 ++++ vendor/github.com/boltdb/bolt/bolt_386.go | 10 + vendor/github.com/boltdb/bolt/bolt_amd64.go | 10 + vendor/github.com/boltdb/bolt/bolt_arm.go | 28 + vendor/github.com/boltdb/bolt/bolt_arm64.go | 12 + vendor/github.com/boltdb/bolt/bolt_linux.go | 10 + vendor/github.com/boltdb/bolt/bolt_openbsd.go | 27 + vendor/github.com/boltdb/bolt/bolt_ppc.go | 9 + vendor/github.com/boltdb/bolt/bolt_ppc64.go | 12 + vendor/github.com/boltdb/bolt/bolt_ppc64le.go | 12 + vendor/github.com/boltdb/bolt/bolt_s390x.go | 12 + vendor/github.com/boltdb/bolt/bolt_unix.go | 89 + .../boltdb/bolt/bolt_unix_solaris.go | 90 + vendor/github.com/boltdb/bolt/bolt_windows.go | 144 + .../github.com/boltdb/bolt/boltsync_unix.go | 8 + vendor/github.com/boltdb/bolt/bucket.go | 777 +++ vendor/github.com/boltdb/bolt/cursor.go | 400 ++ vendor/github.com/boltdb/bolt/db.go | 1039 ++++ vendor/github.com/boltdb/bolt/doc.go | 44 + vendor/github.com/boltdb/bolt/errors.go | 71 + vendor/github.com/boltdb/bolt/freelist.go | 252 + vendor/github.com/boltdb/bolt/node.go | 604 +++ vendor/github.com/boltdb/bolt/page.go | 197 + vendor/github.com/boltdb/bolt/tx.go | 684 +++ vendor/github.com/containerd/console/LICENSE | 201 + .../github.com/containerd/console/README.md | 17 + .../github.com/containerd/console/console.go | 62 + .../containerd/console/console_linux.go | 255 + .../containerd/console/console_unix.go | 142 + .../containerd/console/console_windows.go | 200 + .../containerd/console/tc_darwin.go | 37 + .../containerd/console/tc_freebsd.go | 29 + .../github.com/containerd/console/tc_linux.go | 37 + .../containerd/console/tc_solaris_cgo.go | 35 + .../containerd/console/tc_solaris_nocgo.go | 31 + .../github.com/containerd/console/tc_unix.go | 75 + .../containerd/containerd/api/events/doc.go | 16 + .../containerd/api/services/events/v1/doc.go | 16 + .../containerd/api/services/images/v1/docs.go | 16 + .../api/services/introspection/v1/doc.go | 16 + .../containerd/api/services/leases/v1/doc.go | 16 + .../containerd/containerd/api/types/doc.go | 16 + .../archive/compression/compression.go | 141 + .../containerd/containerd/archive/strconv.go | 68 + .../containerd/containerd/archive/tar.go | 647 +++ .../containerd/containerd/archive/tar_opts.go | 20 + .../containerd/archive/tar_opts_unix.go | 23 + .../containerd/archive/tar_opts_windows.go | 44 + .../containerd/containerd/archive/tar_unix.go | 156 + .../containerd/archive/tar_windows.go | 446 ++ .../containerd/containerd/archive/time.go | 54 + .../containerd/archive/time_darwin.go | 30 + .../containerd/archive/time_unix.go | 39 + .../containerd/archive/time_windows.go | 42 + .../containerd/containerd/cio/io.go | 16 + .../containerd/containerd/cio/io_unix.go | 16 + .../containerd/containerd/cio/io_windows.go | 16 + .../containerd/containerd/client.go | 35 +- .../containerd/containerd/client_opts.go | 16 + .../cmd/containerd/command/config.go | 69 + .../cmd/containerd/command/config_linux.go | 32 + .../containerd/command/config_unsupported.go | 38 + .../cmd/containerd/command/config_windows.go | 32 + .../containerd/cmd/containerd/command/main.go | 216 + .../cmd/containerd/command/main_unix.go | 83 + .../cmd/containerd/command/main_windows.go | 58 + .../cmd/containerd/command/publish.go | 108 + .../containerd/containerd/container.go | 16 + .../containerd/containerd/container_opts.go | 16 + .../containerd/container_opts_unix.go | 18 +- .../containerd/containers/containers.go | 16 + .../containerd/containerd/containerstore.go | 16 + .../containerd/containerd/content/content.go | 16 + .../containerd/containerd/content/helpers.go | 16 + .../containerd/content/local/locks.go | 53 + .../containerd/content/local/readerat.go | 40 + .../containerd/content/local/store.go | 609 +++ .../containerd/content/local/store_unix.go | 35 + .../containerd/content/local/store_windows.go | 26 + .../containerd/content/local/writer.go | 191 + .../containerd/containerd/content_reader.go | 16 + .../containerd/containerd/content_store.go | 16 + .../containerd/containerd/content_writer.go | 16 + .../containerd/contrib/apparmor/apparmor.go | 16 + .../containerd/contrib/apparmor/template.go | 16 + .../containerd/contrib/seccomp/seccomp.go | 16 + .../contrib/seccomp/seccomp_default.go | 16 + .../containerd/defaults/defaults_unix.go | 16 + .../containerd/defaults/defaults_windows.go | 16 + .../containerd/containerd/defaults/doc.go | 16 + .../containerd/containerd/dialer/dialer.go | 16 + .../containerd/dialer/dialer_unix.go | 16 + .../containerd/dialer/dialer_windows.go | 16 + .../github.com/containerd/containerd/diff.go | 16 + .../containerd/containerd/diff/apply/apply.go | 128 + .../containerd/containerd/diff/diff.go | 16 + .../containerd/diff/walking/differ.go | 166 + .../containerd/diff/walking/plugin/plugin.go | 55 + .../containerd/containerd/errdefs/errors.go | 16 + .../containerd/containerd/errdefs/grpc.go | 16 + .../containerd/containerd/events/events.go | 16 + .../containerd/events/exchange/exchange.go | 16 + .../containerd/containerd/filters/adaptor.go | 16 + .../containerd/containerd/filters/filter.go | 16 + .../containerd/containerd/filters/parser.go | 16 + .../containerd/containerd/filters/quote.go | 16 + .../containerd/containerd/filters/scanner.go | 16 + .../github.com/containerd/containerd/gc/gc.go | 182 + .../containerd/gc/scheduler/scheduler.go | 352 ++ .../github.com/containerd/containerd/grpc.go | 16 + .../containerd/identifiers/validate.go | 16 + .../github.com/containerd/containerd/image.go | 16 + .../containerd/containerd/image_store.go | 16 + .../containerd/containerd/images/handlers.go | 91 +- .../containerd/containerd/images/image.go | 38 +- .../containerd/images/importexport.go | 18 +- .../containerd/images/mediatypes.go | 16 + .../containerd/containerd/labels/validate.go | 37 + .../github.com/containerd/containerd/lease.go | 16 + .../containerd/containerd/leases/context.go | 16 + .../containerd/containerd/leases/grpc.go | 16 + .../containerd/containerd/linux/bundle.go | 152 + .../containerd/linux/proc/deleted_state.go | 70 + .../containerd/containerd/linux/proc/exec.go | 219 + .../containerd/linux/proc/exec_state.go | 188 + .../containerd/containerd/linux/proc/init.go | 451 ++ .../containerd/linux/proc/init_state.go | 522 ++ .../containerd/containerd/linux/proc/io.go | 98 + .../containerd/linux/proc/process.go | 105 + .../containerd/containerd/linux/proc/types.go | 60 + .../containerd/containerd/linux/proc/utils.go | 104 + .../containerd/containerd/linux/process.go | 151 + .../containerd/containerd/linux/runtime.go | 545 ++ .../containerd/linux/shim/client/client.go | 278 ++ .../linux/shim/client/client_linux.go | 46 + .../linux/shim/client/client_unix.go | 34 + .../containerd/containerd/linux/shim/local.go | 107 + .../containerd/linux/shim/service.go | 547 ++ .../containerd/linux/shim/service_linux.go | 111 + .../containerd/linux/shim/service_unix.go | 84 + .../containerd/linux/shim/v1/shim.pb.go | 4431 +++++++++++++++++ .../containerd/linux/shim/v1/shim.proto | 165 + .../containerd/containerd/linux/task.go | 346 ++ .../containerd/containerd/log/context.go | 16 + .../containerd/metadata/adaptors.go | 129 + .../containerd/containerd/metadata/bolt.go | 61 + .../containerd/metadata/boltutil/helpers.go | 127 + .../containerd/containerd/metadata/buckets.go | 191 + .../containerd/metadata/containers.go | 430 ++ .../containerd/containerd/metadata/content.go | 618 +++ .../containerd/containerd/metadata/db.go | 410 ++ .../containerd/containerd/metadata/gc.go | 405 ++ .../containerd/containerd/metadata/images.go | 369 ++ .../containerd/containerd/metadata/leases.go | 257 + .../containerd/metadata/migrations.go | 101 + .../containerd/metadata/namespaces.go | 186 + .../containerd/metadata/snapshot.go | 754 +++ .../containerd/metrics/cgroups/blkio.go | 132 + .../containerd/metrics/cgroups/cgroups.go | 121 + .../containerd/metrics/cgroups/cpu.go | 146 + .../containerd/metrics/cgroups/hugetlb.go | 88 + .../containerd/metrics/cgroups/memory.go | 796 +++ .../containerd/metrics/cgroups/metric.go | 61 + .../containerd/metrics/cgroups/metrics.go | 153 + .../containerd/metrics/cgroups/oom.go | 157 + .../containerd/metrics/cgroups/pids.go | 60 + .../containerd/mount/lookup_unix.go | 16 + .../containerd/mount/lookup_unsupported.go | 16 + .../containerd/containerd/mount/mount.go | 16 + .../containerd/mount/mount_linux.go | 16 + .../containerd/containerd/mount/mount_unix.go | 16 + .../containerd/mount/mount_windows.go | 16 + .../containerd/containerd/mount/mountinfo.go | 16 + .../containerd/mount/mountinfo_freebsd.go | 16 + .../containerd/mount/mountinfo_linux.go | 16 + .../containerd/mount/mountinfo_unsupported.go | 16 + .../containerd/containerd/namespaces.go | 16 + .../containerd/namespaces/context.go | 16 + .../containerd/containerd/namespaces/grpc.go | 16 + .../containerd/containerd/namespaces/store.go | 16 + .../containerd/namespaces/validate.go | 16 + .../containerd/containerd/oci/client.go | 16 + .../containerd/containerd/oci/spec.go | 16 + .../containerd/containerd/oci/spec_opts.go | 22 +- .../containerd/oci/spec_opts_unix.go | 16 + .../containerd/oci/spec_opts_windows.go | 16 + .../containerd/containerd/oci/spec_unix.go | 16 + .../containerd/containerd/oci/spec_windows.go | 16 + .../containerd/platforms/cpuinfo.go | 16 + .../containerd/platforms/database.go | 16 + .../containerd/platforms/defaults.go | 16 + .../containerd/platforms/platforms.go | 16 + .../containerd/containerd/plugin/context.go | 16 + .../containerd/containerd/plugin/plugin.go | 16 + .../containerd/plugin/plugin_go18.go | 16 + .../containerd/plugin/plugin_other.go | 16 + .../containerd/containerd/process.go | 16 + .../containerd/protobuf/google/rpc/doc.go | 16 + .../containerd/containerd/reaper/reaper.go | 110 + .../containerd/reference/reference.go | 16 + .../containerd/remotes/docker/auth.go | 16 + .../containerd/remotes/docker/fetcher.go | 16 + .../remotes/docker/httpreadseeker.go | 16 + .../containerd/remotes/docker/pusher.go | 16 + .../containerd/remotes/docker/resolver.go | 16 + .../remotes/docker/schema1/converter.go | 16 + .../containerd/remotes/docker/scope.go | 16 + .../containerd/remotes/docker/status.go | 16 + .../containerd/containerd/remotes/handlers.go | 105 +- .../containerd/containerd/remotes/resolver.go | 16 + .../containerd/containerd/rootfs/apply.go | 16 + .../containerd/containerd/rootfs/diff.go | 16 + .../containerd/containerd/rootfs/init.go | 16 + .../containerd/rootfs/init_linux.go | 16 + .../containerd/rootfs/init_other.go | 16 + .../containerd/containerd/runtime/events.go | 42 + .../containerd/containerd/runtime/monitor.go | 70 + .../containerd/containerd/runtime/runtime.go | 70 + .../containerd/containerd/runtime/task.go | 132 + .../containerd/runtime/task_list.go | 122 + .../containerd/containerd/runtime/typeurl.go | 34 + .../containerd/containerd/server/config.go | 99 + .../containerd/containerd/server/server.go | 272 + .../containerd/server/server_linux.go | 54 + .../containerd/server/server_solaris.go | 23 + .../containerd/server/server_unsupported.go | 25 + .../containerd/server/server_windows.go | 27 + .../containerd/services/containers/helpers.go | 70 + .../containerd/services/containers/service.go | 193 + .../containerd/services/content/service.go | 490 ++ .../containerd/services/diff/service.go | 180 + .../containerd/services/diff/service_unix.go | 23 + .../services/diff/service_windows.go | 23 + .../containerd/services/events/service.go | 108 + .../services/healthcheck/service.go | 50 + .../containerd/services/images/helpers.go | 70 + .../containerd/services/images/service.go | 190 + .../services/introspection/service.go | 163 + .../containerd/services/leases/service.go | 131 + .../containerd/services/namespaces/service.go | 229 + .../containerd/services/snapshots/service.go | 332 ++ .../containerd/services/tasks/service.go | 638 +++ .../containerd/services/version/service.go | 55 + .../containerd/containerd/snapshot.go | 16 + .../containerd/snapshots/overlay/overlay.go | 413 ++ .../containerd/snapshots/snapshotter.go | 16 + .../containerd/snapshots/storage/bolt.go | 586 +++ .../containerd/snapshots/storage/metastore.go | 115 + .../containerd/snapshotter_default_linux.go | 16 + .../containerd/snapshotter_default_unix.go | 16 + .../containerd/snapshotter_default_windows.go | 16 + .../containerd/containerd/sys/epoll.go | 16 + .../containerd/containerd/sys/fds.go | 16 + .../containerd/containerd/sys/filesys_unix.go | 16 + .../containerd/sys/filesys_windows.go | 16 + .../containerd/containerd/sys/oom_unix.go | 16 + .../containerd/containerd/sys/oom_windows.go | 16 + .../containerd/containerd/sys/proc.go | 16 + .../containerd/containerd/sys/reaper.go | 16 + .../containerd/containerd/sys/socket_unix.go | 16 + .../containerd/sys/socket_windows.go | 16 + .../containerd/containerd/sys/stat_bsd.go | 16 + .../containerd/containerd/sys/stat_unix.go | 16 + .../github.com/containerd/containerd/task.go | 18 +- .../containerd/containerd/task_opts.go | 16 + .../containerd/containerd/task_opts_linux.go | 16 + .../containerd/task_opts_windows.go | 16 + .../containerd/containerd/vendor.conf | 6 +- .../containerd/containerd/version/version.go | 29 + .../containerd/continuity/fs/diff.go | 2 +- .../containerd/continuity/fs/stat_linux.go | 3 +- .../continuity/sysx/xattr_openbsd.go | 7 + .../continuity/sysx/xattr_unsupported.go | 2 +- vendor/github.com/containerd/go-runc/LICENSE | 201 + .../github.com/containerd/go-runc/README.md | 14 + .../containerd/go-runc/command_linux.go | 23 + .../containerd/go-runc/command_other.go | 16 + .../github.com/containerd/go-runc/console.go | 141 + .../containerd/go-runc/container.go | 14 + .../github.com/containerd/go-runc/events.go | 84 + vendor/github.com/containerd/go-runc/io.go | 213 + .../github.com/containerd/go-runc/monitor.go | 60 + vendor/github.com/containerd/go-runc/runc.go | 660 +++ vendor/github.com/containerd/go-runc/utils.go | 45 + vendor/github.com/dmcgowan/go-tar/LICENSE | 27 + vendor/github.com/dmcgowan/go-tar/common.go | 796 +++ vendor/github.com/dmcgowan/go-tar/format.go | 301 ++ vendor/github.com/dmcgowan/go-tar/reader.go | 852 ++++ .../github.com/dmcgowan/go-tar/sparse_unix.go | 77 + .../dmcgowan/go-tar/sparse_windows.go | 129 + .../dmcgowan/go-tar/stat_actime1.go | 20 + .../dmcgowan/go-tar/stat_actime2.go | 20 + .../github.com/dmcgowan/go-tar/stat_unix.go | 96 + vendor/github.com/dmcgowan/go-tar/strconv.go | 326 ++ vendor/github.com/dmcgowan/go-tar/writer.go | 629 +++ .../github.com/docker/go-metrics/LICENSE.code | 191 + .../github.com/docker/go-metrics/LICENSE.docs | 425 ++ vendor/github.com/docker/go-metrics/NOTICE | 16 + vendor/github.com/docker/go-metrics/README.md | 79 + .../github.com/docker/go-metrics/counter.go | 52 + vendor/github.com/docker/go-metrics/docs.go | 3 + vendor/github.com/docker/go-metrics/gauge.go | 72 + .../github.com/docker/go-metrics/handler.go | 13 + .../github.com/docker/go-metrics/helpers.go | 10 + .../github.com/docker/go-metrics/namespace.go | 181 + .../github.com/docker/go-metrics/register.go | 15 + vendor/github.com/docker/go-metrics/timer.go | 85 + vendor/github.com/docker/go-metrics/unit.go | 12 + .../grpc-ecosystem/go-grpc-prometheus/LICENSE | 201 + .../go-grpc-prometheus/README.md | 245 + .../go-grpc-prometheus/client.go | 72 + .../go-grpc-prometheus/client_reporter.go | 111 + .../go-grpc-prometheus/server.go | 74 + .../go-grpc-prometheus/server_reporter.go | 157 + .../grpc-ecosystem/go-grpc-prometheus/util.go | 27 + .../golang_protobuf_extensions/LICENSE | 201 + .../golang_protobuf_extensions/NOTICE | 1 + .../golang_protobuf_extensions/README.md | 20 + .../pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + .../prometheus/client_golang/LICENSE | 201 + .../prometheus/client_golang/NOTICE | 23 + .../prometheus/client_golang/README.md | 49 + .../client_golang/prometheus/README.md | 1 + .../client_golang/prometheus/collector.go | 75 + .../client_golang/prometheus/counter.go | 277 ++ .../client_golang/prometheus/desc.go | 188 + .../client_golang/prometheus/doc.go | 186 + .../prometheus/expvar_collector.go | 119 + .../client_golang/prometheus/fnv.go | 29 + .../client_golang/prometheus/gauge.go | 286 ++ .../client_golang/prometheus/go_collector.go | 284 ++ .../client_golang/prometheus/histogram.go | 505 ++ .../client_golang/prometheus/http.go | 524 ++ .../client_golang/prometheus/labels.go | 57 + .../client_golang/prometheus/metric.go | 158 + .../client_golang/prometheus/observer.go | 52 + .../prometheus/process_collector.go | 140 + .../client_golang/prometheus/registry.go | 807 +++ .../client_golang/prometheus/summary.go | 609 +++ .../client_golang/prometheus/timer.go | 51 + .../client_golang/prometheus/untyped.go | 42 + .../client_golang/prometheus/value.go | 160 + .../client_golang/prometheus/vec.go | 469 ++ .../prometheus/client_model/LICENSE | 201 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/README.md | 26 + .../prometheus/client_model/go/metrics.pb.go | 364 ++ vendor/github.com/prometheus/common/LICENSE | 201 + vendor/github.com/prometheus/common/NOTICE | 5 + vendor/github.com/prometheus/common/README.md | 12 + .../prometheus/common/expfmt/decode.go | 429 ++ .../prometheus/common/expfmt/encode.go | 88 + .../prometheus/common/expfmt/expfmt.go | 38 + .../prometheus/common/expfmt/fuzz.go | 36 + .../prometheus/common/expfmt/text_create.go | 303 ++ .../prometheus/common/expfmt/text_parse.go | 757 +++ .../bitbucket.org/ww/goautoneg/README.txt | 67 + .../bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../prometheus/common/model/alert.go | 136 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 210 + .../prometheus/common/model/labelset.go | 169 + .../prometheus/common/model/metric.go | 103 + .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../prometheus/common/model/silence.go | 106 + .../prometheus/common/model/time.go | 264 + .../prometheus/common/model/value.go | 416 ++ vendor/github.com/prometheus/procfs/LICENSE | 201 + vendor/github.com/prometheus/procfs/NOTICE | 7 + vendor/github.com/prometheus/procfs/README.md | 11 + .../github.com/prometheus/procfs/buddyinfo.go | 95 + vendor/github.com/prometheus/procfs/doc.go | 45 + vendor/github.com/prometheus/procfs/fs.go | 69 + .../prometheus/procfs/internal/util/parse.go | 46 + vendor/github.com/prometheus/procfs/ipvs.go | 246 + vendor/github.com/prometheus/procfs/mdstat.go | 138 + .../prometheus/procfs/mountstats.go | 556 +++ .../github.com/prometheus/procfs/net_dev.go | 203 + .../github.com/prometheus/procfs/nfs/nfs.go | 263 + .../github.com/prometheus/procfs/nfs/parse.go | 308 ++ .../prometheus/procfs/nfs/parse_nfs.go | 67 + .../prometheus/procfs/nfs/parse_nfsd.go | 89 + vendor/github.com/prometheus/procfs/proc.go | 224 + .../github.com/prometheus/procfs/proc_io.go | 52 + .../prometheus/procfs/proc_limits.go | 137 + .../github.com/prometheus/procfs/proc_ns.go | 55 + .../github.com/prometheus/procfs/proc_stat.go | 175 + vendor/github.com/prometheus/procfs/stat.go | 219 + vendor/github.com/prometheus/procfs/xfrm.go | 187 + .../github.com/prometheus/procfs/xfs/parse.go | 330 ++ .../github.com/prometheus/procfs/xfs/xfs.go | 163 + vendor/github.com/stevvooe/ttrpc/LICENSE | 201 + vendor/github.com/stevvooe/ttrpc/README.md | 52 + vendor/github.com/stevvooe/ttrpc/channel.go | 138 + vendor/github.com/stevvooe/ttrpc/client.go | 264 + vendor/github.com/stevvooe/ttrpc/codec.go | 26 + vendor/github.com/stevvooe/ttrpc/config.go | 23 + vendor/github.com/stevvooe/ttrpc/handshake.go | 34 + vendor/github.com/stevvooe/ttrpc/server.go | 441 ++ vendor/github.com/stevvooe/ttrpc/services.go | 134 + vendor/github.com/stevvooe/ttrpc/types.go | 26 + .../stevvooe/ttrpc/unixcreds_linux.go | 92 + vendor/github.com/urfave/cli/LICENSE | 21 + vendor/github.com/urfave/cli/README.md | 1391 ++++++ vendor/github.com/urfave/cli/app.go | 509 ++ vendor/github.com/urfave/cli/category.go | 44 + vendor/github.com/urfave/cli/cli.go | 22 + vendor/github.com/urfave/cli/command.go | 304 ++ vendor/github.com/urfave/cli/context.go | 278 ++ vendor/github.com/urfave/cli/errors.go | 115 + vendor/github.com/urfave/cli/flag.go | 807 +++ .../github.com/urfave/cli/flag_generated.go | 627 +++ vendor/github.com/urfave/cli/funcs.go | 41 + vendor/github.com/urfave/cli/help.go | 339 ++ .../google.golang.org/grpc/health/health.go | 72 + 424 files changed, 59732 insertions(+), 113 deletions(-) create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/README.md create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/boltdb/bolt/LICENSE create mode 100644 vendor/github.com/boltdb/bolt/README.md create mode 100644 vendor/github.com/boltdb/bolt/bolt_386.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_amd64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_arm.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_arm64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_linux.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_openbsd.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64le.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_s390x.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_unix.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_unix_solaris.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_windows.go create mode 100644 vendor/github.com/boltdb/bolt/boltsync_unix.go create mode 100644 vendor/github.com/boltdb/bolt/bucket.go create mode 100644 vendor/github.com/boltdb/bolt/cursor.go create mode 100644 vendor/github.com/boltdb/bolt/db.go create mode 100644 vendor/github.com/boltdb/bolt/doc.go create mode 100644 vendor/github.com/boltdb/bolt/errors.go create mode 100644 vendor/github.com/boltdb/bolt/freelist.go create mode 100644 vendor/github.com/boltdb/bolt/node.go create mode 100644 vendor/github.com/boltdb/bolt/page.go create mode 100644 vendor/github.com/boltdb/bolt/tx.go create mode 100644 vendor/github.com/containerd/console/LICENSE create mode 100644 vendor/github.com/containerd/console/README.md create mode 100644 vendor/github.com/containerd/console/console.go create mode 100644 vendor/github.com/containerd/console/console_linux.go create mode 100644 vendor/github.com/containerd/console/console_unix.go create mode 100644 vendor/github.com/containerd/console/console_windows.go create mode 100644 vendor/github.com/containerd/console/tc_darwin.go create mode 100644 vendor/github.com/containerd/console/tc_freebsd.go create mode 100644 vendor/github.com/containerd/console/tc_linux.go create mode 100644 vendor/github.com/containerd/console/tc_solaris_cgo.go create mode 100644 vendor/github.com/containerd/console/tc_solaris_nocgo.go create mode 100644 vendor/github.com/containerd/console/tc_unix.go create mode 100644 vendor/github.com/containerd/containerd/archive/compression/compression.go create mode 100644 vendor/github.com/containerd/containerd/archive/strconv.go create mode 100644 vendor/github.com/containerd/containerd/archive/tar.go create mode 100644 vendor/github.com/containerd/containerd/archive/tar_opts.go create mode 100644 vendor/github.com/containerd/containerd/archive/tar_opts_unix.go create mode 100644 vendor/github.com/containerd/containerd/archive/tar_opts_windows.go create mode 100644 vendor/github.com/containerd/containerd/archive/tar_unix.go create mode 100644 vendor/github.com/containerd/containerd/archive/tar_windows.go create mode 100644 vendor/github.com/containerd/containerd/archive/time.go create mode 100644 vendor/github.com/containerd/containerd/archive/time_darwin.go create mode 100644 vendor/github.com/containerd/containerd/archive/time_unix.go create mode 100644 vendor/github.com/containerd/containerd/archive/time_windows.go create mode 100644 vendor/github.com/containerd/containerd/cmd/containerd/command/config.go create mode 100644 vendor/github.com/containerd/containerd/cmd/containerd/command/config_linux.go create mode 100644 vendor/github.com/containerd/containerd/cmd/containerd/command/config_unsupported.go create mode 100644 vendor/github.com/containerd/containerd/cmd/containerd/command/config_windows.go create mode 100644 vendor/github.com/containerd/containerd/cmd/containerd/command/main.go create mode 100644 vendor/github.com/containerd/containerd/cmd/containerd/command/main_unix.go create mode 100644 vendor/github.com/containerd/containerd/cmd/containerd/command/main_windows.go create mode 100644 vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go create mode 100644 vendor/github.com/containerd/containerd/content/local/locks.go create mode 100644 vendor/github.com/containerd/containerd/content/local/readerat.go create mode 100644 vendor/github.com/containerd/containerd/content/local/store.go create mode 100644 vendor/github.com/containerd/containerd/content/local/store_unix.go create mode 100644 vendor/github.com/containerd/containerd/content/local/store_windows.go create mode 100644 vendor/github.com/containerd/containerd/content/local/writer.go create mode 100644 vendor/github.com/containerd/containerd/diff/apply/apply.go create mode 100644 vendor/github.com/containerd/containerd/diff/walking/differ.go create mode 100644 vendor/github.com/containerd/containerd/diff/walking/plugin/plugin.go create mode 100644 vendor/github.com/containerd/containerd/gc/gc.go create mode 100644 vendor/github.com/containerd/containerd/gc/scheduler/scheduler.go create mode 100644 vendor/github.com/containerd/containerd/labels/validate.go create mode 100644 vendor/github.com/containerd/containerd/linux/bundle.go create mode 100644 vendor/github.com/containerd/containerd/linux/proc/deleted_state.go create mode 100644 vendor/github.com/containerd/containerd/linux/proc/exec.go create mode 100644 vendor/github.com/containerd/containerd/linux/proc/exec_state.go create mode 100644 vendor/github.com/containerd/containerd/linux/proc/init.go create mode 100644 vendor/github.com/containerd/containerd/linux/proc/init_state.go create mode 100644 vendor/github.com/containerd/containerd/linux/proc/io.go create mode 100644 vendor/github.com/containerd/containerd/linux/proc/process.go create mode 100644 vendor/github.com/containerd/containerd/linux/proc/types.go create mode 100644 vendor/github.com/containerd/containerd/linux/proc/utils.go create mode 100644 vendor/github.com/containerd/containerd/linux/process.go create mode 100644 vendor/github.com/containerd/containerd/linux/runtime.go create mode 100644 vendor/github.com/containerd/containerd/linux/shim/client/client.go create mode 100644 vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go create mode 100644 vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go create mode 100644 vendor/github.com/containerd/containerd/linux/shim/local.go create mode 100644 vendor/github.com/containerd/containerd/linux/shim/service.go create mode 100644 vendor/github.com/containerd/containerd/linux/shim/service_linux.go create mode 100644 vendor/github.com/containerd/containerd/linux/shim/service_unix.go create mode 100644 vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go create mode 100644 vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto create mode 100644 vendor/github.com/containerd/containerd/linux/task.go create mode 100644 vendor/github.com/containerd/containerd/metadata/adaptors.go create mode 100644 vendor/github.com/containerd/containerd/metadata/bolt.go create mode 100644 vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go create mode 100644 vendor/github.com/containerd/containerd/metadata/buckets.go create mode 100644 vendor/github.com/containerd/containerd/metadata/containers.go create mode 100644 vendor/github.com/containerd/containerd/metadata/content.go create mode 100644 vendor/github.com/containerd/containerd/metadata/db.go create mode 100644 vendor/github.com/containerd/containerd/metadata/gc.go create mode 100644 vendor/github.com/containerd/containerd/metadata/images.go create mode 100644 vendor/github.com/containerd/containerd/metadata/leases.go create mode 100644 vendor/github.com/containerd/containerd/metadata/migrations.go create mode 100644 vendor/github.com/containerd/containerd/metadata/namespaces.go create mode 100644 vendor/github.com/containerd/containerd/metadata/snapshot.go create mode 100644 vendor/github.com/containerd/containerd/metrics/cgroups/blkio.go create mode 100644 vendor/github.com/containerd/containerd/metrics/cgroups/cgroups.go create mode 100644 vendor/github.com/containerd/containerd/metrics/cgroups/cpu.go create mode 100644 vendor/github.com/containerd/containerd/metrics/cgroups/hugetlb.go create mode 100644 vendor/github.com/containerd/containerd/metrics/cgroups/memory.go create mode 100644 vendor/github.com/containerd/containerd/metrics/cgroups/metric.go create mode 100644 vendor/github.com/containerd/containerd/metrics/cgroups/metrics.go create mode 100644 vendor/github.com/containerd/containerd/metrics/cgroups/oom.go create mode 100644 vendor/github.com/containerd/containerd/metrics/cgroups/pids.go create mode 100644 vendor/github.com/containerd/containerd/reaper/reaper.go create mode 100644 vendor/github.com/containerd/containerd/runtime/events.go create mode 100644 vendor/github.com/containerd/containerd/runtime/monitor.go create mode 100644 vendor/github.com/containerd/containerd/runtime/runtime.go create mode 100644 vendor/github.com/containerd/containerd/runtime/task.go create mode 100644 vendor/github.com/containerd/containerd/runtime/task_list.go create mode 100644 vendor/github.com/containerd/containerd/runtime/typeurl.go create mode 100644 vendor/github.com/containerd/containerd/server/config.go create mode 100644 vendor/github.com/containerd/containerd/server/server.go create mode 100644 vendor/github.com/containerd/containerd/server/server_linux.go create mode 100644 vendor/github.com/containerd/containerd/server/server_solaris.go create mode 100644 vendor/github.com/containerd/containerd/server/server_unsupported.go create mode 100644 vendor/github.com/containerd/containerd/server/server_windows.go create mode 100644 vendor/github.com/containerd/containerd/services/containers/helpers.go create mode 100644 vendor/github.com/containerd/containerd/services/containers/service.go create mode 100644 vendor/github.com/containerd/containerd/services/content/service.go create mode 100644 vendor/github.com/containerd/containerd/services/diff/service.go create mode 100644 vendor/github.com/containerd/containerd/services/diff/service_unix.go create mode 100644 vendor/github.com/containerd/containerd/services/diff/service_windows.go create mode 100644 vendor/github.com/containerd/containerd/services/events/service.go create mode 100644 vendor/github.com/containerd/containerd/services/healthcheck/service.go create mode 100644 vendor/github.com/containerd/containerd/services/images/helpers.go create mode 100644 vendor/github.com/containerd/containerd/services/images/service.go create mode 100644 vendor/github.com/containerd/containerd/services/introspection/service.go create mode 100644 vendor/github.com/containerd/containerd/services/leases/service.go create mode 100644 vendor/github.com/containerd/containerd/services/namespaces/service.go create mode 100644 vendor/github.com/containerd/containerd/services/snapshots/service.go create mode 100644 vendor/github.com/containerd/containerd/services/tasks/service.go create mode 100644 vendor/github.com/containerd/containerd/services/version/service.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/storage/bolt.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/storage/metastore.go create mode 100644 vendor/github.com/containerd/containerd/version/version.go create mode 100644 vendor/github.com/containerd/continuity/sysx/xattr_openbsd.go create mode 100644 vendor/github.com/containerd/go-runc/LICENSE create mode 100644 vendor/github.com/containerd/go-runc/README.md create mode 100644 vendor/github.com/containerd/go-runc/command_linux.go create mode 100644 vendor/github.com/containerd/go-runc/command_other.go create mode 100644 vendor/github.com/containerd/go-runc/console.go create mode 100644 vendor/github.com/containerd/go-runc/container.go create mode 100644 vendor/github.com/containerd/go-runc/events.go create mode 100644 vendor/github.com/containerd/go-runc/io.go create mode 100644 vendor/github.com/containerd/go-runc/monitor.go create mode 100644 vendor/github.com/containerd/go-runc/runc.go create mode 100644 vendor/github.com/containerd/go-runc/utils.go create mode 100644 vendor/github.com/dmcgowan/go-tar/LICENSE create mode 100644 vendor/github.com/dmcgowan/go-tar/common.go create mode 100644 vendor/github.com/dmcgowan/go-tar/format.go create mode 100644 vendor/github.com/dmcgowan/go-tar/reader.go create mode 100644 vendor/github.com/dmcgowan/go-tar/sparse_unix.go create mode 100644 vendor/github.com/dmcgowan/go-tar/sparse_windows.go create mode 100644 vendor/github.com/dmcgowan/go-tar/stat_actime1.go create mode 100644 vendor/github.com/dmcgowan/go-tar/stat_actime2.go create mode 100644 vendor/github.com/dmcgowan/go-tar/stat_unix.go create mode 100644 vendor/github.com/dmcgowan/go-tar/strconv.go create mode 100644 vendor/github.com/dmcgowan/go-tar/writer.go create mode 100644 vendor/github.com/docker/go-metrics/LICENSE.code create mode 100644 vendor/github.com/docker/go-metrics/LICENSE.docs create mode 100644 vendor/github.com/docker/go-metrics/NOTICE create mode 100644 vendor/github.com/docker/go-metrics/README.md create mode 100644 vendor/github.com/docker/go-metrics/counter.go create mode 100644 vendor/github.com/docker/go-metrics/docs.go create mode 100644 vendor/github.com/docker/go-metrics/gauge.go create mode 100644 vendor/github.com/docker/go-metrics/handler.go create mode 100644 vendor/github.com/docker/go-metrics/helpers.go create mode 100644 vendor/github.com/docker/go-metrics/namespace.go create mode 100644 vendor/github.com/docker/go-metrics/register.go create mode 100644 vendor/github.com/docker/go-metrics/timer.go create mode 100644 vendor/github.com/docker/go-metrics/unit.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/README.md create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/README.md create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/README.md create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/README.md create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/mountstats.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/prometheus/procfs/xfrm.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/xfs.go create mode 100644 vendor/github.com/stevvooe/ttrpc/LICENSE create mode 100644 vendor/github.com/stevvooe/ttrpc/README.md create mode 100644 vendor/github.com/stevvooe/ttrpc/channel.go create mode 100644 vendor/github.com/stevvooe/ttrpc/client.go create mode 100644 vendor/github.com/stevvooe/ttrpc/codec.go create mode 100644 vendor/github.com/stevvooe/ttrpc/config.go create mode 100644 vendor/github.com/stevvooe/ttrpc/handshake.go create mode 100644 vendor/github.com/stevvooe/ttrpc/server.go create mode 100644 vendor/github.com/stevvooe/ttrpc/services.go create mode 100644 vendor/github.com/stevvooe/ttrpc/types.go create mode 100644 vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go create mode 100644 vendor/github.com/urfave/cli/LICENSE create mode 100644 vendor/github.com/urfave/cli/README.md create mode 100644 vendor/github.com/urfave/cli/app.go create mode 100644 vendor/github.com/urfave/cli/category.go create mode 100644 vendor/github.com/urfave/cli/cli.go create mode 100644 vendor/github.com/urfave/cli/command.go create mode 100644 vendor/github.com/urfave/cli/context.go create mode 100644 vendor/github.com/urfave/cli/errors.go create mode 100644 vendor/github.com/urfave/cli/flag.go create mode 100644 vendor/github.com/urfave/cli/flag_generated.go create mode 100644 vendor/github.com/urfave/cli/funcs.go create mode 100644 vendor/github.com/urfave/cli/help.go create mode 100644 vendor/google.golang.org/grpc/health/health.go diff --git a/vendor.conf b/vendor.conf index db92e52bf..2c5175318 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,18 +1,24 @@ +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/blang/semver v3.1.0 +github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 github.com/containerd/cgroups c0710c92e8b3a44681d1321dcfd1360fc5c6c089 -github.com/containerd/containerd ee6ffdd91e8fd9ec2b9684fd385e81a16c40e99c -github.com/containerd/continuity 1a794c0014a7b786879312d1dab309ba96ead8bc +github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e +github.com/containerd/containerd 129167132c5e0dbd1b031badae201a432d1bd681 +github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 +github.com/containerd/go-runc 4f6e87ae043f859a38255247b49c9abc262d002f github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 github.com/containernetworking/cni v0.6.0 github.com/containernetworking/plugins v0.6.0 github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6 github.com/cri-o/ocicni 9b451e26eb7c694d564991fbf44f77d0afb9b03c github.com/davecgh/go-spew v1.1.0 +github.com/dmcgowan/go-tar go1.10 github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 +github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098 github.com/docker/go-units v0.3.1 github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528 github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46 @@ -23,10 +29,12 @@ github.com/gogo/protobuf v0.5 github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed github.com/golang/protobuf 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c +github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 github.com/json-iterator/go 1.0.4 +github.com/matttproud/golang_protobuf_extensions v1.0.0 github.com/Microsoft/go-winio v0.4.5 github.com/Microsoft/hcsshim v0.6.7 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 @@ -37,14 +45,20 @@ github.com/opencontainers/runtime-tools 6073aff4ac61897f75895123f7e24135204a404d github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206 github.com/pkg/errors v0.8.0 github.com/pmezard/go-difflib v1.0.0 +github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823 +github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c +github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 +github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd github.com/renstrom/dedent 020d11c3b9c0c7a3c2efcc8e5cf5b9ef7bcea21f github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 github.com/sirupsen/logrus v1.0.0 github.com/spf13/cobra v0.0.1 github.com/spf13/pflag v1.0.0 +github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577 github.com/stretchr/testify v1.1.4 github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc +github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 000000000..339177be6 --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/README.md b/vendor/github.com/beorn7/perks/README.md new file mode 100644 index 000000000..fc0577770 --- /dev/null +++ b/vendor/github.com/beorn7/perks/README.md @@ -0,0 +1,31 @@ +# Perks for Go (golang.org) + +Perks contains the Go package quantile that computes approximate quantiles over +an unbounded data stream within low memory and CPU bounds. + +For more information and examples, see: +http://godoc.org/github.com/bmizerany/perks + +A very special thank you and shout out to Graham Cormode (Rutgers University), +Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and +Divesh Srivastava (AT&T Labs–Research) for their research and publication of +[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf) + +Thank you, also: +* Armon Dadgar (@armon) +* Andrew Gerrand (@nf) +* Brad Fitzpatrick (@bradfitz) +* Keith Rarick (@kr) + +FAQ: + +Q: Why not move the quantile package into the project root? +A: I want to add more packages to perks later. + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 000000000..f4cabd669 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,292 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targets map[float64]float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for quantile, epsilon := range targets { + if quantile*s.n <= r { + f = (2 * epsilon * r) / quantile + } else { + f = (2 * epsilon * (s.n - r)) / (1 - quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE new file mode 100644 index 000000000..004e77fe5 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/boltdb/bolt/README.md new file mode 100644 index 000000000..d7f80e977 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/README.md @@ -0,0 +1,915 @@ +Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) +==== + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. + +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get github.com/boltdb/bolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/boltdb/bolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `DB.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ := b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + +Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +constructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. +* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. +* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. +* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. +* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. +* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go new file mode 100644 index 000000000..820d533c1 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_386.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go new file mode 100644 index 000000000..98fafdb47 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_amd64.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go new file mode 100644 index 000000000..7e5cb4b94 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm.go @@ -0,0 +1,28 @@ +package bolt + +import "unsafe" + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 000000000..b26d84f91 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,12 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go new file mode 100644 index 000000000..2b6766614 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go new file mode 100644 index 000000000..7058c3d73 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go new file mode 100644 index 000000000..645ddc3ed --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go new file mode 100644 index 000000000..9331d9771 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64.go @@ -0,0 +1,12 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 000000000..8c143bc5d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 000000000..d7c39af92 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go new file mode 100644 index 000000000..cad62dda1 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix.go @@ -0,0 +1,89 @@ +// +build !windows,!plan9,!solaris + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go new file mode 100644 index 000000000..307bf2b3e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -0,0 +1,90 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go new file mode 100644 index 000000000..b00fb0720 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_windows.go @@ -0,0 +1,144 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + lockExt = ".lock" + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path + lockExt) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go new file mode 100644 index 000000000..f50442523 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go new file mode 100644 index 000000000..0c5bf2746 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bucket.go @@ -0,0 +1,777 @@ +package bolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable && !unaligned { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } + return nil, ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + _, _, flags := c.seek(key) + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go new file mode 100644 index 000000000..1be9f35e3 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cursor.go @@ -0,0 +1,400 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.index >= ref.count() { + return nil, nil, 0 + } + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go new file mode 100644 index 000000000..f352ff14f --- /dev/null +++ b/vendor/github.com/boltdb/bolt/db.go @@ -0,0 +1,1039 @@ +package bolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + file *os.File + lockfile *os.File // windows only + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + freelist *freelist + stats Stats + + pagePool sync.Pool + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + var db = &DB{opened: true} + + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + // If we can't read the page size, we can assume it's the same + // as the OS -- since that's how the page size was chosen in the + // first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + db.pageSize = os.Getpagesize() + } else { + db.pageSize = int(m.pageSize) + } + } + } + + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) + + // Mark the database as opened and return. + return db, nil +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + m.checksum = m.sum64() + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.RLock() + defer db.mmaplock.RUnlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + db.path = "" + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } + } + if minid > 0 { + db.freelist.release(minid - 1) + } + + return t, nil +} + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + if err := t.Rollback(); err != nil { + return err + } + + return nil +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 + } + + // Use higher meta page if valid. Otherwise fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(count int) (*page, error) { + // Allocate a temporary buffer for the page. + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = s.TxN - other.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } else if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid { + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go new file mode 100644 index 000000000..cc937845d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/doc.go @@ -0,0 +1,44 @@ +/* +Package bolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bolt diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go new file mode 100644 index 000000000..a3620a3eb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/errors.go @@ -0,0 +1,71 @@ +package bolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go new file mode 100644 index 000000000..aba48f58c --- /dev/null +++ b/vendor/github.com/boltdb/bolt/freelist.go @@ -0,0 +1,252 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + pending: make(map[txid][]pgid), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, list := range f.pending { + count += len(list) + } + return count +} + +// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, list := range f.pending { + m = append(m, list...) + } + sort.Sort(m) + mergepgids(dst, f.ids, m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + var ids = f.pending[txid] + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + + // Add to the freelist and cache. + ids = append(ids, id) + f.cache[id] = true + } + f.pending[txid] = ids +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, ids := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + for _, id := range f.pending[txid] { + delete(f.cache, id) + } + + // Remove pages from pending list. + delete(f.pending, txid) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + } + + // Rebuild the page cache. + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + lenids := f.count() + if lenids == 0 { + p.count = uint16(lenids) + } else if lenids < 0xFFFF { + p.count = uint16(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool, len(f.ids)) + for _, id := range f.ids { + f.cache[id] = true + } + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + f.cache[pendingID] = true + } + } +} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go new file mode 100644 index 000000000..159318b22 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/node.go @@ -0,0 +1,604 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go new file mode 100644 index 000000000..cde403ae8 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/page.go @@ -0,0 +1,197 @@ +package bolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go new file mode 100644 index 000000000..6700308a2 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/tx.go @@ -0,0 +1,684 @@ +package bolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + opgid := tx.meta.pgid + + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { _ = f.Close() }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, f.Close() +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Check if any pages are double freed. + freed := make(map[pgid]bool) + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount++ + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) + } + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/vendor/github.com/containerd/console/LICENSE b/vendor/github.com/containerd/console/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/containerd/console/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md new file mode 100644 index 000000000..4c56d9d13 --- /dev/null +++ b/vendor/github.com/containerd/console/README.md @@ -0,0 +1,17 @@ +# console + +[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console) + +Golang package for dealing with consoles. Light on deps and a simple API. + +## Modifying the current process + +```go +current := console.Current() +defer current.Reset() + +if err := current.SetRaw(); err != nil { +} +ws, err := current.Size() +current.Resize(ws) +``` diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go new file mode 100644 index 000000000..bf2798fda --- /dev/null +++ b/vendor/github.com/containerd/console/console.go @@ -0,0 +1,62 @@ +package console + +import ( + "errors" + "io" + "os" +) + +var ErrNotAConsole = errors.New("provided file is not a console") + +type Console interface { + io.Reader + io.Writer + io.Closer + + // Resize resizes the console to the provided window size + Resize(WinSize) error + // ResizeFrom resizes the calling console to the size of the + // provided console + ResizeFrom(Console) error + // SetRaw sets the console in raw mode + SetRaw() error + // DisableEcho disables echo on the console + DisableEcho() error + // Reset restores the console to its orignal state + Reset() error + // Size returns the window size of the console + Size() (WinSize, error) + // Fd returns the console's file descriptor + Fd() uintptr + // Name returns the console's file name + Name() string +} + +// WinSize specifies the window size of the console +type WinSize struct { + // Height of the console + Height uint16 + // Width of the console + Width uint16 + x uint16 + y uint16 +} + +// Current returns the current processes console +func Current() Console { + c, err := ConsoleFromFile(os.Stdin) + if err != nil { + // stdin should always be a console for the design + // of this function + panic(err) + } + return c +} + +// ConsoleFromFile returns a console using the provided file +func ConsoleFromFile(f *os.File) (Console, error) { + if err := checkConsole(f); err != nil { + return nil, err + } + return newMaster(f) +} diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go new file mode 100644 index 000000000..c96372929 --- /dev/null +++ b/vendor/github.com/containerd/console/console_linux.go @@ -0,0 +1,255 @@ +// +build linux + +package console + +import ( + "io" + "os" + "sync" + + "golang.org/x/sys/unix" +) + +const ( + maxEvents = 128 +) + +// Epoller manages multiple epoll consoles using edge-triggered epoll api so we +// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP. +// For more details, see: +// - https://github.com/systemd/systemd/pull/4262 +// - https://github.com/moby/moby/issues/27202 +// +// Example usage of Epoller and EpollConsole can be as follow: +// +// epoller, _ := NewEpoller() +// epollConsole, _ := epoller.Add(console) +// go epoller.Wait() +// var ( +// b bytes.Buffer +// wg sync.WaitGroup +// ) +// wg.Add(1) +// go func() { +// io.Copy(&b, epollConsole) +// wg.Done() +// }() +// // perform I/O on the console +// epollConsole.Shutdown(epoller.CloseConsole) +// wg.Wait() +// epollConsole.Close() +type Epoller struct { + efd int + mu sync.Mutex + fdMapping map[int]*EpollConsole +} + +// NewEpoller returns an instance of epoller with a valid epoll fd. +func NewEpoller() (*Epoller, error) { + efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if err != nil { + return nil, err + } + return &Epoller{ + efd: efd, + fdMapping: make(map[int]*EpollConsole), + }, nil +} + +// Add creates a epoll console based on the provided console. The console will +// be registered with EPOLLET (i.e. using edge-triggered notification) and its +// file descriptor will be set to non-blocking mode. After this, user should use +// the return console to perform I/O. +func (e *Epoller) Add(console Console) (*EpollConsole, error) { + sysfd := int(console.Fd()) + // Set sysfd to non-blocking mode + if err := unix.SetNonblock(sysfd, true); err != nil { + return nil, err + } + + ev := unix.EpollEvent{ + Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET, + Fd: int32(sysfd), + } + if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil { + return nil, err + } + ef := &EpollConsole{ + Console: console, + sysfd: sysfd, + readc: sync.NewCond(&sync.Mutex{}), + writec: sync.NewCond(&sync.Mutex{}), + } + e.mu.Lock() + e.fdMapping[sysfd] = ef + e.mu.Unlock() + return ef, nil +} + +// Wait starts the loop to wait for its consoles' notifications and signal +// appropriate console that it can perform I/O. +func (e *Epoller) Wait() error { + events := make([]unix.EpollEvent, maxEvents) + for { + n, err := unix.EpollWait(e.efd, events, -1) + if err != nil { + // EINTR: The call was interrupted by a signal handler before either + // any of the requested events occurred or the timeout expired + if err == unix.EINTR { + continue + } + return err + } + for i := 0; i < n; i++ { + ev := &events[i] + // the console is ready to be read from + if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalRead() + } + } + // the console is ready to be written to + if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalWrite() + } + } + } + } +} + +// Close unregister the console's file descriptor from epoll interface +func (e *Epoller) CloseConsole(fd int) error { + e.mu.Lock() + defer e.mu.Unlock() + delete(e.fdMapping, fd) + return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{}) +} + +func (e *Epoller) getConsole(sysfd int) *EpollConsole { + e.mu.Lock() + f := e.fdMapping[sysfd] + e.mu.Unlock() + return f +} + +// Close the epoll fd +func (e *Epoller) Close() error { + return unix.Close(e.efd) +} + +// EpollConsole acts like a console but register its file descriptor with a +// epoll fd and uses epoll API to perform I/O. +type EpollConsole struct { + Console + readc *sync.Cond + writec *sync.Cond + sysfd int + closed bool +} + +// Read reads up to len(p) bytes into p. It returns the number of bytes read +// (0 <= n <= len(p)) and any error encountered. +// +// If the console's read returns EAGAIN or EIO, we assumes that its a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Read(p []byte) (n int, err error) { + var read int + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + for { + read, err = ec.Console.Read(p[n:]) + n += read + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappear, assume this is temporary and wait for the + // signal to continue again. Unless we didnt read anything and the + // console is already marked as closed then we should exit + if hangup && !(n == 0 && len(p) > 0 && ec.closed) { + ec.readc.Wait() + continue + } + } + break + } + // if we didnt read anything then return io.EOF to end gracefully + if n == 0 && len(p) > 0 && err == nil { + err = io.EOF + } + // signal for others that we finished the read + ec.readc.Signal() + return n, err +} + +// Writes len(p) bytes from p to the console. It returns the number of bytes +// written from p (0 <= n <= len(p)) and any error encountered that caused +// the write to stop early. +// +// If writes to the console returns EAGAIN or EIO, we assumes that its a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Write(p []byte) (n int, err error) { + var written int + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + for { + written, err = ec.Console.Write(p[n:]) + n += written + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappear, assume this is temporary and wait for the + // signal to continue again. + if hangup { + ec.writec.Wait() + continue + } + } + // unrecoverable error, break the loop and return the error + break + } + if n < len(p) && err == nil { + err = io.ErrShortWrite + } + // signal for others that we finished the write + ec.writec.Signal() + return n, err +} + +// Close closed the file descriptor and signal call waiters for this fd. +// It accepts a callback which will be called with the console's fd. The +// callback typically will be used to do further cleanup such as unregister the +// console's fd from the epoll interface. +// User should call Shutdown and wait for all I/O operation to be finished +// before closing the console. +func (ec *EpollConsole) Shutdown(close func(int) error) error { + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + + ec.readc.Broadcast() + ec.writec.Broadcast() + ec.closed = true + return close(ec.sysfd) +} + +// signalRead signals that the console is readable. +func (ec *EpollConsole) signalRead() { + ec.readc.Signal() +} + +// signalWrite signals that the console is writable. +func (ec *EpollConsole) signalWrite() { + ec.writec.Signal() +} diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go new file mode 100644 index 000000000..118c8c3ab --- /dev/null +++ b/vendor/github.com/containerd/console/console_unix.go @@ -0,0 +1,142 @@ +// +build darwin freebsd linux solaris + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// NewPty creates a new pty pair +// The master is returned as the first console and a string +// with the path to the pty slave is returned as the second +func NewPty() (Console, string, error) { + f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + slave, err := ptsname(f) + if err != nil { + return nil, "", err + } + if err := unlockpt(f); err != nil { + return nil, "", err + } + m, err := newMaster(f) + if err != nil { + return nil, "", err + } + return m, slave, nil +} + +type master struct { + f *os.File + original *unix.Termios +} + +func (m *master) Read(b []byte) (int, error) { + return m.f.Read(b) +} + +func (m *master) Write(b []byte) (int, error) { + return m.f.Write(b) +} + +func (m *master) Close() error { + return m.f.Close() +} + +func (m *master) Resize(ws WinSize) error { + return tcswinsz(m.f.Fd(), ws) +} + +func (m *master) ResizeFrom(c Console) error { + ws, err := c.Size() + if err != nil { + return err + } + return m.Resize(ws) +} + +func (m *master) Reset() error { + if m.original == nil { + return nil + } + return tcset(m.f.Fd(), m.original) +} + +func (m *master) getCurrent() (unix.Termios, error) { + var termios unix.Termios + if err := tcget(m.f.Fd(), &termios); err != nil { + return unix.Termios{}, err + } + return termios, nil +} + +func (m *master) SetRaw() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState = cfmakeraw(rawState) + rawState.Oflag = rawState.Oflag | unix.OPOST + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) DisableEcho() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState.Lflag = rawState.Lflag &^ unix.ECHO + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) Size() (WinSize, error) { + return tcgwinsz(m.f.Fd()) +} + +func (m *master) Fd() uintptr { + return m.f.Fd() +} + +func (m *master) Name() string { + return m.f.Name() +} + +// checkConsole checks if the provided file is a console +func checkConsole(f *os.File) error { + var termios unix.Termios + if tcget(f.Fd(), &termios) != nil { + return ErrNotAConsole + } + return nil +} + +func newMaster(f *os.File) (Console, error) { + m := &master{ + f: f, + } + t, err := m.getCurrent() + if err != nil { + return nil, err + } + m.original = &t + return m, nil +} + +// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts normally. In particular, a not-very-well-known default of +// Linux unix98 ptys is that they have +onlcr by default. While this isn't a +// problem for terminal emulators, because we relay data from the terminal we +// also relay that funky line discipline. +func ClearONLCR(fd uintptr) error { + return setONLCR(fd, false) +} + +// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts as intended for a terminal emulator. +func SetONLCR(fd uintptr) error { + return setONLCR(fd, true) +} diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go new file mode 100644 index 000000000..d78a0b841 --- /dev/null +++ b/vendor/github.com/containerd/console/console_windows.go @@ -0,0 +1,200 @@ +package console + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +var ( + vtInputSupported bool + ErrNotImplemented = errors.New("not implemented") +) + +func (m *master) initStdios() { + m.in = windows.Handle(os.Stdin.Fd()) + if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil { + // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + windows.SetConsoleMode(m.in, m.inMode) + } else { + fmt.Printf("failed to get console mode for stdin: %v\n", err) + } + + m.out = windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil { + if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.out, m.outMode) + } + } else { + fmt.Printf("failed to get console mode for stdout: %v\n", err) + } + + m.err = windows.Handle(os.Stderr.Fd()) + if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil { + if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.err, m.errMode) + } + } else { + fmt.Printf("failed to get console mode for stderr: %v\n", err) + } +} + +type master struct { + in windows.Handle + inMode uint32 + + out windows.Handle + outMode uint32 + + err windows.Handle + errMode uint32 +} + +func (m *master) SetRaw() error { + if err := makeInputRaw(m.in, m.inMode); err != nil { + return err + } + + // Set StdOut and StdErr to raw mode, we ignore failures since + // windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of + // Windows. + + windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + return nil +} + +func (m *master) Reset() error { + for _, s := range []struct { + fd windows.Handle + mode uint32 + }{ + {m.in, m.inMode}, + {m.out, m.outMode}, + {m.err, m.errMode}, + } { + if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { + return errors.Wrap(err, "unable to restore console mode") + } + } + + return nil +} + +func (m *master) Size() (WinSize, error) { + var info windows.ConsoleScreenBufferInfo + err := windows.GetConsoleScreenBufferInfo(m.out, &info) + if err != nil { + return WinSize{}, errors.Wrap(err, "unable to get console info") + } + + winsize := WinSize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +func (m *master) Resize(ws WinSize) error { + return ErrNotImplemented +} + +func (m *master) ResizeFrom(c Console) error { + return ErrNotImplemented +} + +func (m *master) DisableEcho() error { + mode := m.inMode &^ windows.ENABLE_ECHO_INPUT + mode |= windows.ENABLE_PROCESSED_INPUT + mode |= windows.ENABLE_LINE_INPUT + + if err := windows.SetConsoleMode(m.in, mode); err != nil { + return errors.Wrap(err, "unable to set console to disable echo") + } + + return nil +} + +func (m *master) Close() error { + return nil +} + +func (m *master) Read(b []byte) (int, error) { + panic("not implemented on windows") +} + +func (m *master) Write(b []byte) (int, error) { + panic("not implemented on windows") +} + +func (m *master) Fd() uintptr { + return uintptr(m.in) +} + +// on windows, console can only be made from os.Std{in,out,err}, hence there +// isnt a single name here we can use. Return a dummy "console" value in this +// case should be sufficient. +func (m *master) Name() string { + return "console" +} + +// makeInputRaw puts the terminal (Windows Console) connected to the given +// file descriptor into raw mode +func makeInputRaw(fd windows.Handle, mode uint32) error { + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE + + if vtInputSupported { + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + if err := windows.SetConsoleMode(fd, mode); err != nil { + return errors.Wrap(err, "unable to set console to raw mode") + } + + return nil +} + +func checkConsole(f *os.File) error { + var mode uint32 + if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil { + return err + } + return nil +} + +func newMaster(f *os.File) (Console, error) { + if f != os.Stdin && f != os.Stdout && f != os.Stderr { + return nil, errors.New("creating a console from a file is not supported on windows") + } + m := &master{} + m.initStdios() + return m, nil +} diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/vendor/github.com/containerd/console/tc_darwin.go new file mode 100644 index 000000000..b102bad74 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_darwin.go @@ -0,0 +1,37 @@ +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_freebsd.go b/vendor/github.com/containerd/console/tc_freebsd.go new file mode 100644 index 000000000..e2a10e441 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_freebsd.go @@ -0,0 +1,29 @@ +package console + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +// This does not exist on FreeBSD, it does not allocate controlling terminals on open +func unlockpt(f *os.File) error { + return nil +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go new file mode 100644 index 000000000..80ef2f6fb --- /dev/null +++ b/vendor/github.com/containerd/console/tc_linux.go @@ -0,0 +1,37 @@ +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go new file mode 100644 index 000000000..f8066d8e3 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_cgo.go @@ -0,0 +1,35 @@ +// +build solaris,cgo + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +//#include +import "C" + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + ptspath, err := C.ptsname(C.int(f.Fd())) + if err != nil { + return "", err + } + return C.GoString(ptspath), nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + if _, err := C.grantpt(C.int(f.Fd())); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go new file mode 100644 index 000000000..0aefa0d2b --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_nocgo.go @@ -0,0 +1,31 @@ +// +build solaris,!cgo + +// +// Implementing the functions below requires cgo support. Non-cgo stubs +// versions are defined below to enable cross-compilation of source code +// that depends on these functions, but the resultant cross-compiled +// binaries cannot actually be used. If the stub function(s) below are +// actually invoked they will display an error message and cause the +// calling process to exit. +// + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +func ptsname(f *os.File) (string, error) { + panic("ptsname() support requires cgo.") +} + +func unlockpt(f *os.File) error { + panic("unlockpt() support requires cgo.") +} diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go new file mode 100644 index 000000000..df7dcb933 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_unix.go @@ -0,0 +1,75 @@ +// +build darwin freebsd linux solaris + +package console + +import ( + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *unix.Termios) error { + termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet) + if err != nil { + return err + } + *p = *termios + return nil +} + +func tcset(fd uintptr, p *unix.Termios) error { + return unix.IoctlSetTermios(int(fd), cmdTcSet, p) +} + +func tcgwinsz(fd uintptr) (WinSize, error) { + var ws WinSize + + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + if err != nil { + return ws, err + } + + // Translate from unix.Winsize to console.WinSize + ws.Height = uws.Row + ws.Width = uws.Col + ws.x = uws.Xpixel + ws.y = uws.Ypixel + return ws, nil +} + +func tcswinsz(fd uintptr, ws WinSize) error { + // Translate from console.WinSize to unix.Winsize + + var uws unix.Winsize + uws.Row = ws.Height + uws.Col = ws.Width + uws.Xpixel = ws.x + uws.Ypixel = ws.y + + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws) +} + +func setONLCR(fd uintptr, enable bool) error { + var termios unix.Termios + if err := tcget(fd, &termios); err != nil { + return err + } + if enable { + // Set +onlcr so we can act like a real terminal + termios.Oflag |= unix.ONLCR + } else { + // Set -onlcr so we don't have to deal with \r. + termios.Oflag &^= unix.ONLCR + } + return tcset(fd, &termios) +} + +func cfmakeraw(t unix.Termios) unix.Termios { + t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + t.Oflag &^= unix.OPOST + t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + t.Cflag &^= (unix.CSIZE | unix.PARENB) + t.Cflag &^= unix.CS8 + t.Cc[unix.VMIN] = 1 + t.Cc[unix.VTIME] = 0 + + return t +} diff --git a/vendor/github.com/containerd/containerd/api/events/doc.go b/vendor/github.com/containerd/containerd/api/events/doc.go index ac1e83fb7..354bef79f 100644 --- a/vendor/github.com/containerd/containerd/api/events/doc.go +++ b/vendor/github.com/containerd/containerd/api/events/doc.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + // Package events has protobuf types for various events that are used in // containerd. package events diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go index 070604bb8..b7f86da86 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go +++ b/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go @@ -1,2 +1,18 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + // Package events defines the event pushing and subscription service. package events diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/docs.go b/vendor/github.com/containerd/containerd/api/services/images/v1/docs.go index a8d61a31e..4170f38af 100644 --- a/vendor/github.com/containerd/containerd/api/services/images/v1/docs.go +++ b/vendor/github.com/containerd/containerd/api/services/images/v1/docs.go @@ -1 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package images diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go index 3b9d7947c..f6f65eadf 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go @@ -1 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package introspection diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/doc.go index 3685b6455..db2422a8b 100644 --- a/vendor/github.com/containerd/containerd/api/services/leases/v1/doc.go +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/doc.go @@ -1 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package leases diff --git a/vendor/github.com/containerd/containerd/api/types/doc.go b/vendor/github.com/containerd/containerd/api/types/doc.go index ab1254f4c..475b465ed 100644 --- a/vendor/github.com/containerd/containerd/api/types/doc.go +++ b/vendor/github.com/containerd/containerd/api/types/doc.go @@ -1 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package types diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go new file mode 100644 index 000000000..de85438b9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -0,0 +1,141 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compression + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "sync" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Gzip is gzip compression algorithm. + Gzip +) + +var ( + bufioReader32KPool = &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, + } +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + if r.closer != nil { + return r.closer() + } + return nil +} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (w *writeCloserWrapper) Close() error { + if w.closer != nil { + w.closer() + } + return nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Gzip: {0x1F, 0x8B, 0x08}, + } { + if len(source) < len(m) { + // Len too short + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + buf := bufioReader32KPool.Get().(*bufio.Reader) + buf.Reset(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue docker/docker#18170 + return nil, err + } + + closer := func() error { + buf.Reset(nil) + bufioReader32KPool.Put(buf) + return nil + } + switch compression := DetectCompression(bs); compression { + case Uncompressed: + readBufWrapper := &readCloserWrapper{buf, closer} + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := &readCloserWrapper{gzReader, closer} + return readBufWrapper, nil + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresseses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + switch compression { + case Uncompressed: + return &writeCloserWrapper{dest, nil}, nil + case Gzip: + return gzip.NewWriter(dest), nil + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Gzip: + return "gz" + } + return "" +} diff --git a/vendor/github.com/containerd/containerd/archive/strconv.go b/vendor/github.com/containerd/containerd/archive/strconv.go new file mode 100644 index 000000000..d262e9059 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/strconv.go @@ -0,0 +1,68 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "strconv" + "strings" + "time" + + "github.com/dmcgowan/go-tar" +) + +// Forked from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go +// as archive/tar doesn't support CreationTime, but does handle PAX time parsing, +// and there's no need to re-invent the wheel. + +// parsePAXTime takes a string of the form %d.%d as described in the PAX +// specification. Note that this implementation allows for negative timestamps, +// which is allowed for by the PAX specification, but not always portable. +func parsePAXTime(s string) (time.Time, error) { + const maxNanoSecondDigits = 9 + + // Split string into seconds and sub-seconds parts. + ss, sn := s, "" + if pos := strings.IndexByte(s, '.'); pos >= 0 { + ss, sn = s[:pos], s[pos+1:] + } + + // Parse the seconds. + secs, err := strconv.ParseInt(ss, 10, 64) + if err != nil { + return time.Time{}, tar.ErrHeader + } + if len(sn) == 0 { + return time.Unix(secs, 0), nil // No sub-second values + } + + // Parse the nanoseconds. + if strings.Trim(sn, "0123456789") != "" { + return time.Time{}, tar.ErrHeader + } + if len(sn) < maxNanoSecondDigits { + sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad + } else { + sn = sn[:maxNanoSecondDigits] // Right truncate + } + nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed + if len(ss) > 0 && ss[0] == '-' { + return time.Unix(secs, -nsecs), nil // Negative correction + } + return time.Unix(secs, nsecs), nil +} diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go new file mode 100644 index 000000000..7e9182c8f --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/tar.go @@ -0,0 +1,647 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "github.com/containerd/containerd/log" + "github.com/containerd/continuity/fs" + "github.com/dmcgowan/go-tar" + "github.com/pkg/errors" +) + +var bufPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32*1024) + return &buffer + }, +} + +var errInvalidArchive = errors.New("invalid archive") + +// Diff returns a tar stream of the computed filesystem +// difference between the provided directories. +// +// Produces a tar using OCI style file markers for deletions. Deleted +// files will be prepended with the prefix ".wh.". This style is +// based off AUFS whiteouts. +// See https://github.com/opencontainers/image-spec/blob/master/layer.md +func Diff(ctx context.Context, a, b string) io.ReadCloser { + r, w := io.Pipe() + + go func() { + err := WriteDiff(ctx, w, a, b) + if err = w.CloseWithError(err); err != nil { + log.G(ctx).WithError(err).Debugf("closing tar pipe failed") + } + }() + + return r +} + +// WriteDiff writes a tar stream of the computed difference between the +// provided directories. +// +// Produces a tar using OCI style file markers for deletions. Deleted +// files will be prepended with the prefix ".wh.". This style is +// based off AUFS whiteouts. +// See https://github.com/opencontainers/image-spec/blob/master/layer.md +func WriteDiff(ctx context.Context, w io.Writer, a, b string) error { + cw := newChangeWriter(w, b) + err := fs.Changes(ctx, a, b, cw.HandleChange) + if err != nil { + return errors.Wrap(err, "failed to create diff tar stream") + } + return cw.Close() +} + +const ( + // whiteoutPrefix prefix means file is a whiteout. If this is followed by a + // filename this means that file has been removed from the base layer. + // See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts + whiteoutPrefix = ".wh." + + // whiteoutMetaPrefix prefix means whiteout has a special meaning and is not + // for removing an actual file. Normally these files are excluded from exported + // archives. + whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix + + // whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other + // layers. Normally these should not go into exported archives and all changed + // hardlinks should be copied to the top layer. + whiteoutLinkDir = whiteoutMetaPrefix + "plnk" + + // whiteoutOpaqueDir file means directory has been made opaque - meaning + // readdir calls to this directory do not follow to lower layers. + whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq" + + paxSchilyXattr = "SCHILY.xattrs." +) + +// Apply applies a tar stream of an OCI style diff tar. +// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets +func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int64, error) { + root = filepath.Clean(root) + + var options ApplyOptions + for _, opt := range opts { + if err := opt(&options); err != nil { + return 0, errors.Wrap(err, "failed to apply option") + } + } + + return apply(ctx, root, tar.NewReader(r), options) +} + +// applyNaive applies a tar stream of an OCI style diff tar. +// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets +func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { + var ( + dirs []*tar.Header + + // Used for handling opaque directory markers which + // may occur out of order + unpackedPaths = make(map[string]struct{}) + + // Used for aufs plink directory + aufsTempdir = "" + aufsHardlinks = make(map[string]*tar.Header) + ) + + // Iterate through the files in the archive. + for { + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + } + + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + if skipFile(hdr) { + log.G(ctx).Warnf("file %q ignored: archive may not be supported on system", hdr.Name) + continue + } + + // Split name and resolve symlinks for root directory. + ppath, base := filepath.Split(hdr.Name) + ppath, err = fs.RootPath(root, ppath) + if err != nil { + return 0, errors.Wrap(err, "failed to get root path") + } + + // Join to root before joining to parent path to ensure relative links are + // already resolved based on the root before adding to parent. + path := filepath.Join(ppath, filepath.Join("/", base)) + if path == root { + log.G(ctx).Debugf("file %q ignored: resolved to root", hdr.Name) + continue + } + + // If file is not directly under root, ensure parent directory + // exists or is created. + if ppath != root { + parentPath := ppath + if base == "" { + parentPath = filepath.Dir(path) + } + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = mkdirAll(parentPath, 0700) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, whiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, whiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + p, err := fs.RootPath(aufsTempdir, basename) + if err != nil { + return 0, err + } + if err := createTarFile(ctx, p, root, hdr, tr); err != nil { + return 0, err + } + } + + if hdr.Name != whiteoutOpaqueDir { + continue + } + } + + if strings.HasPrefix(base, whiteoutPrefix) { + dir := filepath.Dir(path) + if base == whiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + continue + } + + originalBase := base[len(whiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + // Ensure originalPath is under dir + if dir[len(dir)-1] != filepath.Separator { + dir += string(filepath.Separator) + } + if !strings.HasPrefix(originalPath, dir) { + return 0, errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base) + } + + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + continue + } + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + srcData := io.Reader(tr) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), whiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + p, err := fs.RootPath(aufsTempdir, linkBasename) + if err != nil { + return 0, err + } + tmpFile, err := os.Open(p) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + + for _, hdr := range dirs { + path, err := fs.RootPath(root, hdr.Name) + if err != nil { + return 0, err + } + if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil { + return 0, err + } + } + + return size, nil +} + +func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader) error { + // hdr.Mode is in linux format, which we can use for syscalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + file, err := openFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, hdrInfo.Mode()) + if err != nil { + return err + } + + _, err = copyBuffered(ctx, file, reader) + if err1 := file.Close(); err == nil { + err = err1 + } + if err != nil { + return err + } + + case tar.TypeBlock, tar.TypeChar: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath, err := fs.RootPath(extractDir, hdr.Linkname) + if err != nil { + return err + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + log.G(ctx).Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return errors.Errorf("unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if runtime.GOOS != "windows" { + if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { + return err + } + } + + for key, value := range hdr.PAXRecords { + if strings.HasPrefix(key, paxSchilyXattr) { + key = key[len(paxSchilyXattr):] + if err := setxattr(path, key, value); err != nil { + if errors.Cause(err) == syscall.ENOTSUP { + log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key) + continue + } + return err + } + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)) +} + +type changeWriter struct { + tw *tar.Writer + source string + whiteoutT time.Time + inodeSrc map[uint64]string + inodeRefs map[uint64][]string + addedDirs map[string]struct{} +} + +func newChangeWriter(w io.Writer, source string) *changeWriter { + return &changeWriter{ + tw: tar.NewWriter(w), + source: source, + whiteoutT: time.Now(), + inodeSrc: map[uint64]string{}, + inodeRefs: map[uint64][]string{}, + addedDirs: map[string]struct{}{}, + } +} + +func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error { + if err != nil { + return err + } + if k == fs.ChangeKindDelete { + whiteOutDir := filepath.Dir(p) + whiteOutBase := filepath.Base(p) + whiteOut := filepath.Join(whiteOutDir, whiteoutPrefix+whiteOutBase) + hdr := &tar.Header{ + Typeflag: tar.TypeReg, + Name: whiteOut[1:], + Size: 0, + ModTime: cw.whiteoutT, + AccessTime: cw.whiteoutT, + ChangeTime: cw.whiteoutT, + } + if err := cw.includeParents(hdr); err != nil { + return err + } + if err := cw.tw.WriteHeader(hdr); err != nil { + return errors.Wrap(err, "failed to write whiteout header") + } + } else { + var ( + link string + err error + source = filepath.Join(cw.source, p) + ) + + if f.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(source); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(f, link) + if err != nil { + return err + } + + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name := p + if strings.HasPrefix(name, string(filepath.Separator)) { + name, err = filepath.Rel(string(filepath.Separator), name) + if err != nil { + return errors.Wrap(err, "failed to make path relative") + } + } + name, err = tarName(name) + if err != nil { + return errors.Wrap(err, "cannot canonicalize path") + } + // suffix with '/' for directories + if f.IsDir() && !strings.HasSuffix(name, "/") { + name += "/" + } + hdr.Name = name + + if err := setHeaderForSpecialDevice(hdr, name, f); err != nil { + return errors.Wrap(err, "failed to set device headers") + } + + // additionalLinks stores file names which must be linked to + // this file when this file is added + var additionalLinks []string + inode, isHardlink := fs.GetLinkInfo(f) + if isHardlink { + // If the inode has a source, always link to it + if source, ok := cw.inodeSrc[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = source + hdr.Size = 0 + } else { + if k == fs.ChangeKindUnmodified { + cw.inodeRefs[inode] = append(cw.inodeRefs[inode], name) + return nil + } + cw.inodeSrc[inode] = name + additionalLinks = cw.inodeRefs[inode] + delete(cw.inodeRefs, inode) + } + } else if k == fs.ChangeKindUnmodified { + // Nothing to write to diff + return nil + } + + if capability, err := getxattr(source, "security.capability"); err != nil { + return errors.Wrap(err, "failed to get capabilities xattr") + } else if capability != nil { + if hdr.PAXRecords == nil { + hdr.PAXRecords = map[string]string{} + } + hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability) + } + + if err := cw.includeParents(hdr); err != nil { + return err + } + if err := cw.tw.WriteHeader(hdr); err != nil { + return errors.Wrap(err, "failed to write file header") + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + file, err := open(source) + if err != nil { + return errors.Wrapf(err, "failed to open path: %v", source) + } + defer file.Close() + + n, err := copyBuffered(context.TODO(), cw.tw, file) + if err != nil { + return errors.Wrap(err, "failed to copy") + } + if n != hdr.Size { + return errors.New("short write copying file") + } + } + + if additionalLinks != nil { + source = hdr.Name + for _, extra := range additionalLinks { + hdr.Name = extra + hdr.Typeflag = tar.TypeLink + hdr.Linkname = source + hdr.Size = 0 + + if err := cw.includeParents(hdr); err != nil { + return err + } + if err := cw.tw.WriteHeader(hdr); err != nil { + return errors.Wrap(err, "failed to write file header") + } + } + } + } + return nil +} + +func (cw *changeWriter) Close() error { + if err := cw.tw.Close(); err != nil { + return errors.Wrap(err, "failed to close tar writer") + } + return nil +} + +func (cw *changeWriter) includeParents(hdr *tar.Header) error { + name := strings.TrimRight(hdr.Name, "/") + fname := filepath.Join(cw.source, name) + parent := filepath.Dir(name) + pname := filepath.Join(cw.source, parent) + + // Do not include root directory as parent + if fname != cw.source && pname != cw.source { + _, ok := cw.addedDirs[parent] + if !ok { + cw.addedDirs[parent] = struct{}{} + fi, err := os.Stat(pname) + if err != nil { + return err + } + if err := cw.HandleChange(fs.ChangeKindModify, parent, fi, nil); err != nil { + return err + } + } + } + if hdr.Typeflag == tar.TypeDir { + cw.addedDirs[name] = struct{}{} + } + return nil +} + +func copyBuffered(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) { + buf := bufPool.Get().(*[]byte) + defer bufPool.Put(buf) + + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + default: + } + + nr, er := src.Read(*buf) + if nr > 0 { + nw, ew := dst.Write((*buf)[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er != nil { + if er != io.EOF { + err = er + } + break + } + } + return written, err + +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts.go b/vendor/github.com/containerd/containerd/archive/tar_opts.go new file mode 100644 index 000000000..b0f86abdf --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/tar_opts.go @@ -0,0 +1,20 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +// ApplyOpt allows setting mutable archive apply properties on creation +type ApplyOpt func(options *ApplyOptions) error diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts_unix.go b/vendor/github.com/containerd/containerd/archive/tar_opts_unix.go new file mode 100644 index 000000000..e19afddd2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/tar_opts_unix.go @@ -0,0 +1,23 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +// ApplyOptions provides additional options for an Apply operation +type ApplyOptions struct { +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go b/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go new file mode 100644 index 000000000..0991ab094 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go @@ -0,0 +1,44 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +// ApplyOptions provides additional options for an Apply operation +type ApplyOptions struct { + ParentLayerPaths []string // Parent layer paths used for Windows layer apply + IsWindowsContainerLayer bool // True if the tar stream to be applied is a Windows Container Layer +} + +// WithParentLayers adds parent layers to the apply process this is required +// for all Windows layers except the base layer. +func WithParentLayers(parentPaths []string) ApplyOpt { + return func(options *ApplyOptions) error { + options.ParentLayerPaths = parentPaths + return nil + } +} + +// AsWindowsContainerLayer indicates that the tar stream to apply is that of +// a Windows Container Layer. The caller must be holding SeBackupPrivilege and +// SeRestorePrivilege. +func AsWindowsContainerLayer() ApplyOpt { + return func(options *ApplyOptions) error { + options.IsWindowsContainerLayer = true + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_unix.go b/vendor/github.com/containerd/containerd/archive/tar_unix.go new file mode 100644 index 000000000..f577996ee --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/tar_unix.go @@ -0,0 +1,156 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "context" + "os" + "sync" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/dmcgowan/go-tar" + "github.com/opencontainers/runc/libcontainer/system" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func tarName(p string) (string, error) { + return p, nil +} + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, fi os.FileInfo) error { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + + // Rdev is int32 on darwin/bsd, int64 on linux/solaris + rdev := uint64(s.Rdev) // nolint: unconvert + + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(rdev)) + hdr.Devminor = int64(unix.Minor(rdev)) + } + + return nil +} + +func open(p string) (*os.File, error) { + return os.Open(p) +} + +func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { + f, err := os.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + // Call chmod to avoid permission mask + if err := os.Chmod(name, perm); err != nil { + return nil, err + } + return f, err +} + +func mkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func mkdir(path string, perm os.FileMode) error { + if err := os.Mkdir(path, perm); err != nil { + return err + } + // Only final created directory gets explicit permission + // call to avoid permission mask + return os.Chmod(path, perm) +} + +func skipFile(*tar.Header) bool { + return false +} + +var ( + inUserNS bool + nsOnce sync.Once +) + +func setInUserNS() { + inUserNS = system.RunningInUserNS() +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + nsOnce.Do(setInUserNS) + if inUserNS { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return unix.Mknod(path, mode, int(unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor)))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} + +func getxattr(path, attr string) ([]byte, error) { + b, err := sysx.LGetxattr(path, attr) + if err == unix.ENOTSUP || err == sysx.ENODATA { + return nil, nil + } + return b, err +} + +func setxattr(path, key, value string) error { + return sysx.LSetxattr(path, key, []byte(value), 0) +} + +// apply applies a tar stream of an OCI style diff tar. +// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets +func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { + return applyNaive(ctx, root, tr, options) +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_windows.go b/vendor/github.com/containerd/containerd/archive/tar_windows.go new file mode 100644 index 000000000..7172e735c --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/tar_windows.go @@ -0,0 +1,446 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "bufio" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/sys" + "github.com/dmcgowan/go-tar" +) + +const ( + // MSWINDOWS pax vendor extensions + hdrMSWindowsPrefix = "MSWINDOWS." + + hdrFileAttributes = hdrMSWindowsPrefix + "fileattr" + hdrSecurityDescriptor = hdrMSWindowsPrefix + "sd" + hdrRawSecurityDescriptor = hdrMSWindowsPrefix + "rawsd" + hdrMountPoint = hdrMSWindowsPrefix + "mountpoint" + hdrEaPrefix = hdrMSWindowsPrefix + "xattr." + + // LIBARCHIVE pax vendor extensions + hdrLibArchivePrefix = "LIBARCHIVE." + + hdrCreateTime = hdrLibArchivePrefix + "creationtime" +) + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } +) + +// tarName returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func tarName(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(*tar.Header, string, os.FileInfo) error { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return nil +} + +func open(p string) (*os.File, error) { + // We use sys.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + return sys.OpenSequential(p) +} + +func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { + // Source is regular file. We use sys.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + return sys.OpenFileSequential(name, flag, perm) +} + +func mkdirAll(path string, perm os.FileMode) error { + return sys.MkdirAll(path, perm) +} + +func mkdir(path string, perm os.FileMode) error { + return os.Mkdir(path, perm) +} + +func skipFile(hdr *tar.Header) bool { + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if strings.Contains(hdr.Name, ":") { + return true + } + + return false +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getxattr(path, attr string) ([]byte, error) { + return nil, nil +} + +func setxattr(path, key, value string) error { + // Return not support error, do not wrap underlying not supported + // since xattrs should not exist in windows diff archives + return errors.New("xattrs not supported on Windows") +} + +// apply applies a tar stream of an OCI style diff tar of a Windows layer. +// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets +func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { + if options.IsWindowsContainerLayer { + return applyWindowsLayer(ctx, root, tr, options) + } + return applyNaive(ctx, root, tr, options) +} + +// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer. +// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets +func applyWindowsLayer(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { + home, id := filepath.Split(root) + info := hcsshim.DriverInfo{ + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, options.ParentLayerPaths) + if err != nil { + return 0, err + } + defer func() { + if err := w.Close(); err != nil { + log.G(ctx).Errorf("failed to close layer writer: %v", err) + } + }() + + buf := bufio.NewWriter(nil) + hdr, nextErr := tr.Next() + // Iterate through the files in the archive. + for { + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + } + + if nextErr == io.EOF { + // end of tar archive + break + } + if nextErr != nil { + return 0, nextErr + } + + // Note: path is used instead of filepath to prevent OS specific handling + // of the tar path + base := path.Base(hdr.Name) + if strings.HasPrefix(base, whiteoutPrefix) { + dir := path.Dir(hdr.Name) + originalBase := base[len(whiteoutPrefix):] + originalPath := path.Join(dir, originalBase) + if err := w.Remove(filepath.FromSlash(originalPath)); err != nil { + return 0, err + } + hdr, nextErr = tr.Next() + } else if hdr.Typeflag == tar.TypeLink { + err := w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, nextErr = tr.Next() + } else { + name, fileSize, fileInfo, err := fileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + if err := w.Add(filepath.FromSlash(name), fileInfo); err != nil { + return 0, err + } + size += fileSize + hdr, nextErr = tarToBackupStreamWithMutatedFiles(buf, w, tr, hdr, root) + } + } + + return +} + +// fileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by +// WriteTarFileFromBackupStream. +func fileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { + name = hdr.Name + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + size = hdr.Size + } + fileInfo = &winio.FileBasicInfo{ + LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()), + LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()), + ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()), + + // Default CreationTime to ModTime, updated below if MSWINDOWS.createtime exists + CreationTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()), + } + if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok { + attr, err := strconv.ParseUint(attrStr, 10, 32) + if err != nil { + return "", 0, nil, err + } + fileInfo.FileAttributes = uintptr(attr) + } else { + if hdr.Typeflag == tar.TypeDir { + fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY + } + } + if createStr, ok := hdr.PAXRecords[hdrCreateTime]; ok { + createTime, err := parsePAXTime(createStr) + if err != nil { + return "", 0, nil, err + } + fileInfo.CreationTime = syscall.NsecToFiletime(createTime.UnixNano()) + } + return +} + +// tarToBackupStreamWithMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func tarToBackupStreamWithMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var ( + bcdBackup *os.File + bcdBackupWriter *winio.BackupFileWriter + ) + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return writeBackupStreamFromTarFile(buf, t, hdr) +} + +// writeBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple +// tar file entries in order to collect all the alternate data streams for the file, it returns the next +// tar file that was not processed, or io.EOF is there are no more. +func writeBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { + bw := winio.NewBackupStreamWriter(w) + var sd []byte + var err error + // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written + // by this library will have raw binary for the security descriptor. + if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { + sd, err = winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return nil, err + } + } + if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { + sd, err = base64.StdEncoding.DecodeString(sdraw) + if err != nil { + return nil, err + } + } + if len(sd) != 0 { + bhdr := winio.BackupHeader{ + Id: winio.BackupSecurity, + Size: int64(len(sd)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(sd) + if err != nil { + return nil, err + } + } + var eas []winio.ExtendedAttribute + for k, v := range hdr.PAXRecords { + if !strings.HasPrefix(k, hdrEaPrefix) { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, err + } + eas = append(eas, winio.ExtendedAttribute{ + Name: k[len(hdrEaPrefix):], + Value: data, + }) + } + if len(eas) != 0 { + eadata, err := winio.EncodeExtendedAttributes(eas) + if err != nil { + return nil, err + } + bhdr := winio.BackupHeader{ + Id: winio.BackupEaData, + Size: int64(len(eadata)), + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(eadata) + if err != nil { + return nil, err + } + } + if hdr.Typeflag == tar.TypeSymlink { + _, isMountPoint := hdr.PAXRecords[hdrMountPoint] + rp := winio.ReparsePoint{ + Target: filepath.FromSlash(hdr.Linkname), + IsMountPoint: isMountPoint, + } + reparse := winio.EncodeReparsePoint(&rp) + bhdr := winio.BackupHeader{ + Id: winio.BackupReparseData, + Size: int64(len(reparse)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(reparse) + if err != nil { + return nil, err + } + } + + buf := bufPool.Get().(*[]byte) + defer bufPool.Put(buf) + + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + bhdr := winio.BackupHeader{ + Id: winio.BackupData, + Size: hdr.Size, + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.CopyBuffer(bw, t, *buf) + if err != nil { + return nil, err + } + } + // Copy all the alternate data streams and return the next non-ADS header. + for { + ahdr, err := t.Next() + if err != nil { + return nil, err + } + if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { + return ahdr, nil + } + bhdr := winio.BackupHeader{ + Id: winio.BackupAlternateData, + Size: ahdr.Size, + Name: ahdr.Name[len(hdr.Name):] + ":$DATA", + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.CopyBuffer(bw, t, *buf) + if err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/containerd/containerd/archive/time.go b/vendor/github.com/containerd/containerd/archive/time.go new file mode 100644 index 000000000..16651a4d0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/time.go @@ -0,0 +1,54 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "syscall" + "time" + "unsafe" +) + +var ( + minTime = time.Unix(0, 0) + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + +func boundTime(t time.Time) time.Time { + if t.Before(minTime) || t.After(maxTime) { + return minTime + } + + return t +} + +func latestTime(t1, t2 time.Time) time.Time { + if t1.Before(t2) { + return t2 + } + return t1 +} diff --git a/vendor/github.com/containerd/containerd/archive/time_darwin.go b/vendor/github.com/containerd/containerd/archive/time_darwin.go new file mode 100644 index 000000000..9c2b656b0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/time_darwin.go @@ -0,0 +1,30 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "time" + + "github.com/pkg/errors" +) + +// as at MacOS 10.12 there is apparently no way to set timestamps +// with nanosecond precision. We could fall back to utimes/lutimes +// and lose the precision as a temporary workaround. +func chtimes(path string, atime, mtime time.Time) error { + return errors.New("OSX missing UtimesNanoAt") +} diff --git a/vendor/github.com/containerd/containerd/archive/time_unix.go b/vendor/github.com/containerd/containerd/archive/time_unix.go new file mode 100644 index 000000000..4a69cb7d0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/time_unix.go @@ -0,0 +1,39 @@ +// +build linux freebsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "time" + + "golang.org/x/sys/unix" + + "github.com/pkg/errors" +) + +func chtimes(path string, atime, mtime time.Time) error { + var utimes [2]unix.Timespec + utimes[0] = unix.NsecToTimespec(atime.UnixNano()) + utimes[1] = unix.NsecToTimespec(mtime.UnixNano()) + + if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrap(err, "failed call to UtimesNanoAt") + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/archive/time_windows.go b/vendor/github.com/containerd/containerd/archive/time_windows.go new file mode 100644 index 000000000..71f397821 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/time_windows.go @@ -0,0 +1,42 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "time" + + "golang.org/x/sys/windows" +) + +// chtimes will set the create time on a file using the given modtime. +// This requires calling SetFileTime and explicitly including the create time. +func chtimes(path string, atime, mtime time.Time) error { + ctimespec := windows.NsecToTimespec(mtime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/containerd/containerd/cio/io.go b/vendor/github.com/containerd/containerd/cio/io.go index 87b713e14..a49c11735 100644 --- a/vendor/github.com/containerd/containerd/cio/io.go +++ b/vendor/github.com/containerd/containerd/cio/io.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cio import ( diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go index 1eadcd50a..eac8a45cb 100644 --- a/vendor/github.com/containerd/containerd/cio/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cio import ( diff --git a/vendor/github.com/containerd/containerd/cio/io_windows.go b/vendor/github.com/containerd/containerd/cio/io_windows.go index 4c1eefac6..fa9532a3b 100644 --- a/vendor/github.com/containerd/containerd/cio/io_windows.go +++ b/vendor/github.com/containerd/containerd/cio/io_windows.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package cio import ( diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go index b2caa37a1..7d91432d3 100644 --- a/vendor/github.com/containerd/containerd/client.go +++ b/vendor/github.com/containerd/containerd/client.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( @@ -234,10 +250,17 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image schema1Converter = schema1.NewConverter(store, fetcher) handler = images.Handlers(append(pullCtx.BaseHandlers, schema1Converter)...) } else { + // Get all the children for a descriptor + childrenHandler := images.ChildrenHandler(store) + // Set any children labels for that content + childrenHandler = images.SetChildrenLabels(store, childrenHandler) + // Filter the childen by the platform + childrenHandler = images.FilterPlatform(platforms.Default(), childrenHandler) + handler = images.Handlers(append(pullCtx.BaseHandlers, remotes.FetchHandler(store, fetcher), - images.ChildrenHandler(store, platforms.Default()))..., - ) + childrenHandler, + )...) } if err := images.Dispatch(ctx, handler, desc); err != nil { @@ -331,11 +354,11 @@ func (c *Client) ListImages(ctx context.Context, filters ...string) ([]Image, er // Subscribe to events that match one or more of the provided filters. // -// Callers should listen on both the envelope channel and errs channel. If the -// errs channel returns nil or an error, the subscriber should terminate. +// Callers should listen on both the envelope and errs channels. If the errs +// channel returns nil or an error, the subscriber should terminate. // -// To cancel shutdown reciept of events, cancel the provided context. The errs -// channel will be closed and return a nil error. +// The subscriber can stop receiving events by canceling the provided context. +// The errs channel will be closed and return a nil error. func (c *Client) Subscribe(ctx context.Context, filters ...string) (ch <-chan *eventsapi.Envelope, errs <-chan error) { var ( evq = make(chan *eventsapi.Envelope) diff --git a/vendor/github.com/containerd/containerd/client_opts.go b/vendor/github.com/containerd/containerd/client_opts.go index c1e93bae9..dfa02ee7b 100644 --- a/vendor/github.com/containerd/containerd/client_opts.go +++ b/vendor/github.com/containerd/containerd/client_opts.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/config.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/config.go new file mode 100644 index 000000000..cbcc087ae --- /dev/null +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/config.go @@ -0,0 +1,69 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package command + +import ( + "io" + "os" + + "github.com/BurntSushi/toml" + "github.com/containerd/containerd/server" + "github.com/urfave/cli" +) + +// Config is a wrapper of server config for printing out. +type Config struct { + *server.Config + // Plugins overrides `Plugins map[string]toml.Primitive` in server config. + Plugins map[string]interface{} `toml:"plugins"` +} + +// WriteTo marshals the config to the provided writer +func (c *Config) WriteTo(w io.Writer) (int64, error) { + return 0, toml.NewEncoder(w).Encode(c) +} + +var configCommand = cli.Command{ + Name: "config", + Usage: "information on the containerd config", + Subcommands: []cli.Command{ + { + Name: "default", + Usage: "see the output of the default config", + Action: func(context *cli.Context) error { + config := &Config{ + Config: defaultConfig(), + } + plugins, err := server.LoadPlugins(config.Config) + if err != nil { + return err + } + if len(plugins) != 0 { + config.Plugins = make(map[string]interface{}) + for _, p := range plugins { + if p.Config == nil { + continue + } + config.Plugins[p.ID] = p.Config + } + } + _, err = config.WriteTo(os.Stdout) + return err + }, + }, + }, +} diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/config_linux.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/config_linux.go new file mode 100644 index 000000000..bb083c0e0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/config_linux.go @@ -0,0 +1,32 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package command + +import ( + "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/server" +) + +func defaultConfig() *server.Config { + return &server.Config{ + Root: defaults.DefaultRootDir, + State: defaults.DefaultStateDir, + GRPC: server.GRPCConfig{ + Address: defaults.DefaultAddress, + }, + } +} diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/config_unsupported.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/config_unsupported.go new file mode 100644 index 000000000..1d3825c78 --- /dev/null +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/config_unsupported.go @@ -0,0 +1,38 @@ +// +build !linux,!windows,!solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package command + +import ( + "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/server" +) + +func defaultConfig() *server.Config { + return &server.Config{ + Root: defaults.DefaultRootDir, + State: defaults.DefaultStateDir, + GRPC: server.GRPCConfig{ + Address: defaults.DefaultAddress, + }, + Debug: server.Debug{ + Level: "info", + Address: defaults.DefaultDebugAddress, + }, + } +} diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/config_windows.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/config_windows.go new file mode 100644 index 000000000..bb083c0e0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/config_windows.go @@ -0,0 +1,32 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package command + +import ( + "github.com/containerd/containerd/defaults" + "github.com/containerd/containerd/server" +) + +func defaultConfig() *server.Config { + return &server.Config{ + Root: defaults.DefaultRootDir, + State: defaults.DefaultStateDir, + GRPC: server.GRPCConfig{ + Address: defaults.DefaultAddress, + }, + } +} diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/main.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/main.go new file mode 100644 index 000000000..8abb37745 --- /dev/null +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/main.go @@ -0,0 +1,216 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package command + +import ( + "context" + "fmt" + "io/ioutil" + golog "log" + "net" + "os" + "os/signal" + "path/filepath" + "time" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/server" + "github.com/containerd/containerd/sys" + "github.com/containerd/containerd/version" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" + gocontext "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +const usage = ` + __ _ __ + _________ ____ / /_____ _(_)___ ___ _________/ / + / ___/ __ \/ __ \/ __/ __ ` + "`" + `/ / __ \/ _ \/ ___/ __ / +/ /__/ /_/ / / / / /_/ /_/ / / / / / __/ / / /_/ / +\___/\____/_/ /_/\__/\__,_/_/_/ /_/\___/_/ \__,_/ + +high performance container runtime +` + +func init() { + // Discard grpc logs so that they don't mess with our stdio + grpclog.SetLogger(golog.New(ioutil.Discard, "", golog.LstdFlags)) + + cli.VersionPrinter = func(c *cli.Context) { + fmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision) + } +} + +// App returns a *cli.App instance. +func App() *cli.App { + app := cli.NewApp() + app.Name = "containerd" + app.Version = version.Version + app.Usage = usage + app.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "config,c", + Usage: "path to the configuration file", + Value: defaultConfigPath, + }, + cli.StringFlag{ + Name: "log-level,l", + Usage: "set the logging level [trace, debug, info, warn, error, fatal, panic]", + }, + cli.StringFlag{ + Name: "address,a", + Usage: "address for containerd's GRPC server", + }, + cli.StringFlag{ + Name: "root", + Usage: "containerd root directory", + }, + cli.StringFlag{ + Name: "state", + Usage: "containerd state directory", + }, + } + app.Commands = []cli.Command{ + configCommand, + publishCommand, + } + app.Action = func(context *cli.Context) error { + var ( + start = time.Now() + signals = make(chan os.Signal, 2048) + serverC = make(chan *server.Server, 1) + ctx = gocontext.Background() + config = defaultConfig() + ) + + done := handleSignals(ctx, signals, serverC) + // start the signal handler as soon as we can to make sure that + // we don't miss any signals during boot + signal.Notify(signals, handledSignals...) + + if err := server.LoadConfig(context.GlobalString("config"), config); err != nil && !os.IsNotExist(err) { + return err + } + // apply flags to the config + if err := applyFlags(context, config); err != nil { + return err + } + address := config.GRPC.Address + if address == "" { + return errors.New("grpc address cannot be empty") + } + log.G(ctx).WithFields(logrus.Fields{ + "version": version.Version, + "revision": version.Revision, + }).Info("starting containerd") + + server, err := server.New(ctx, config) + if err != nil { + return err + } + serverC <- server + if config.Debug.Address != "" { + var l net.Listener + if filepath.IsAbs(config.Debug.Address) { + if l, err = sys.GetLocalListener(config.Debug.Address, config.Debug.UID, config.Debug.GID); err != nil { + return errors.Wrapf(err, "failed to get listener for debug endpoint") + } + } else { + if l, err = net.Listen("tcp", config.Debug.Address); err != nil { + return errors.Wrapf(err, "failed to get listener for debug endpoint") + } + } + serve(ctx, l, server.ServeDebug) + } + if config.Metrics.Address != "" { + l, err := net.Listen("tcp", config.Metrics.Address) + if err != nil { + return errors.Wrapf(err, "failed to get listener for metrics endpoint") + } + serve(ctx, l, server.ServeMetrics) + } + + l, err := sys.GetLocalListener(address, config.GRPC.UID, config.GRPC.GID) + if err != nil { + return errors.Wrapf(err, "failed to get listener for main endpoint") + } + serve(ctx, l, server.ServeGRPC) + + log.G(ctx).Infof("containerd successfully booted in %fs", time.Since(start).Seconds()) + <-done + return nil + } + return app +} + +func serve(ctx context.Context, l net.Listener, serveFunc func(net.Listener) error) { + path := l.Addr().String() + log.G(ctx).WithField("address", path).Info("serving...") + go func() { + defer l.Close() + if err := serveFunc(l); err != nil { + log.G(ctx).WithError(err).WithField("address", path).Fatal("serve failure") + } + }() +} + +func applyFlags(context *cli.Context, config *server.Config) error { + // the order for config vs flag values is that flags will always override + // the config values if they are set + if err := setLevel(context, config); err != nil { + return err + } + for _, v := range []struct { + name string + d *string + }{ + { + name: "root", + d: &config.Root, + }, + { + name: "state", + d: &config.State, + }, + { + name: "address", + d: &config.GRPC.Address, + }, + } { + if s := context.GlobalString(v.name); s != "" { + *v.d = s + } + } + return nil +} + +func setLevel(context *cli.Context, config *server.Config) error { + l := context.GlobalString("log-level") + if l == "" { + l = config.Debug.Level + } + if l != "" { + lvl, err := log.ParseLevel(l) + if err != nil { + return err + } + logrus.SetLevel(lvl) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/main_unix.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/main_unix.go new file mode 100644 index 000000000..9dc287ca0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/main_unix.go @@ -0,0 +1,83 @@ +// +build linux darwin freebsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package command + +import ( + "context" + "os" + "runtime" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/server" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const defaultConfigPath = "/etc/containerd/config.toml" + +var handledSignals = []os.Signal{ + unix.SIGTERM, + unix.SIGINT, + unix.SIGUSR1, + unix.SIGPIPE, +} + +func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *server.Server) chan struct{} { + done := make(chan struct{}, 1) + go func() { + var server *server.Server + for { + select { + case s := <-serverC: + server = s + case s := <-signals: + log.G(ctx).WithField("signal", s).Debug("received signal") + switch s { + case unix.SIGUSR1: + dumpStacks() + case unix.SIGPIPE: + continue + default: + if server == nil { + close(done) + return + } + server.Stop() + close(done) + } + } + } + }() + return done +} + +func dumpStacks() { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) +} diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/main_windows.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/main_windows.go new file mode 100644 index 000000000..7af904801 --- /dev/null +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/main_windows.go @@ -0,0 +1,58 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package command + +import ( + "context" + "os" + "path/filepath" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/server" + + "golang.org/x/sys/windows" +) + +var ( + defaultConfigPath = filepath.Join(os.Getenv("programfiles"), "containerd", "config.toml") + handledSignals = []os.Signal{ + windows.SIGTERM, + windows.SIGINT, + } +) + +func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *server.Server) chan struct{} { + done := make(chan struct{}) + go func() { + var server *server.Server + for { + select { + case s := <-serverC: + server = s + case s := <-signals: + log.G(ctx).WithField("signal", s).Debug("received signal") + if server == nil { + close(done) + return + } + server.Stop() + close(done) + } + } + }() + return done +} diff --git a/vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go b/vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go new file mode 100644 index 000000000..d297732bf --- /dev/null +++ b/vendor/github.com/containerd/containerd/cmd/containerd/command/publish.go @@ -0,0 +1,108 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package command + +import ( + gocontext "context" + "io" + "io/ioutil" + "net" + "os" + "time" + + eventsapi "github.com/containerd/containerd/api/services/events/v1" + "github.com/containerd/containerd/dialer" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/namespaces" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/urfave/cli" + "google.golang.org/grpc" +) + +var publishCommand = cli.Command{ + Name: "publish", + Usage: "binary to publish events to containerd", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "namespace", + Usage: "namespace to publish to", + }, + cli.StringFlag{ + Name: "topic", + Usage: "topic of the event", + }, + }, + Action: func(context *cli.Context) error { + ctx := namespaces.WithNamespace(gocontext.Background(), context.String("namespace")) + topic := context.String("topic") + if topic == "" { + return errors.New("topic required to publish event") + } + payload, err := getEventPayload(os.Stdin) + if err != nil { + return err + } + client, err := connectEvents(context.GlobalString("address")) + if err != nil { + return err + } + if _, err := client.Publish(ctx, &eventsapi.PublishRequest{ + Topic: topic, + Event: payload, + }); err != nil { + return errdefs.FromGRPC(err) + } + return nil + }, +} + +func getEventPayload(r io.Reader) (*types.Any, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + var any types.Any + if err := any.Unmarshal(data); err != nil { + return nil, err + } + return &any, nil +} + +func connectEvents(address string) (eventsapi.EventsClient, error) { + conn, err := connect(address, dialer.Dialer) + if err != nil { + return nil, errors.Wrapf(err, "failed to dial %q", address) + } + return eventsapi.NewEventsClient(conn), nil +} + +func connect(address string, d func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { + gopts := []grpc.DialOption{ + grpc.WithBlock(), + grpc.WithInsecure(), + grpc.WithTimeout(60 * time.Second), + grpc.WithDialer(d), + grpc.FailOnNonTempDialError(true), + grpc.WithBackoffMaxDelay(3 * time.Second), + } + conn, err := grpc.Dial(dialer.DialAddress(address), gopts...) + if err != nil { + return nil, errors.Wrapf(err, "failed to dial %q", address) + } + return conn, nil +} diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go index ad60c69ea..895e793ae 100644 --- a/vendor/github.com/containerd/containerd/container.go +++ b/vendor/github.com/containerd/containerd/container.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/container_opts.go b/vendor/github.com/containerd/containerd/container_opts.go index fb22a9096..4f586a4ec 100644 --- a/vendor/github.com/containerd/containerd/container_opts.go +++ b/vendor/github.com/containerd/containerd/container_opts.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go index deda0f70f..8ae955113 100644 --- a/vendor/github.com/containerd/containerd/container_opts_unix.go +++ b/vendor/github.com/containerd/containerd/container_opts_unix.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( @@ -115,7 +131,7 @@ func WithTaskCheckpoint(im Image) NewTaskOpts { } } -func decodeIndex(ctx context.Context, store content.Store, id digest.Digest) (*v1.Index, error) { +func decodeIndex(ctx context.Context, store content.Provider, id digest.Digest) (*v1.Index, error) { var index v1.Index p, err := content.ReadBlob(ctx, store, id) if err != nil { diff --git a/vendor/github.com/containerd/containerd/containers/containers.go b/vendor/github.com/containerd/containerd/containers/containers.go index df4ad83c9..c624164e8 100644 --- a/vendor/github.com/containerd/containerd/containers/containers.go +++ b/vendor/github.com/containerd/containerd/containers/containers.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containers import ( diff --git a/vendor/github.com/containerd/containerd/containerstore.go b/vendor/github.com/containerd/containerd/containerstore.go index 4db2350b0..ee3d8c727 100644 --- a/vendor/github.com/containerd/containerd/containerstore.go +++ b/vendor/github.com/containerd/containerd/containerstore.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go index 05fd4aebe..de9dd48f5 100644 --- a/vendor/github.com/containerd/containerd/content/content.go +++ b/vendor/github.com/containerd/containerd/content/content.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package content import ( diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go index c125e2a2b..ac0b5a3db 100644 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package content import ( diff --git a/vendor/github.com/containerd/containerd/content/local/locks.go b/vendor/github.com/containerd/containerd/content/local/locks.go new file mode 100644 index 000000000..411c29a9d --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/locks.go @@ -0,0 +1,53 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "sync" + + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +// Handles locking references + +var ( + // locks lets us lock in process + locks = map[string]struct{}{} + locksMu sync.Mutex +) + +func tryLock(ref string) error { + locksMu.Lock() + defer locksMu.Unlock() + + if _, ok := locks[ref]; ok { + return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked", ref) + } + + locks[ref] = struct{}{} + return nil +} + +func unlock(ref string) { + locksMu.Lock() + defer locksMu.Unlock() + + if _, ok := locks[ref]; ok { + delete(locks, ref) + } +} diff --git a/vendor/github.com/containerd/containerd/content/local/readerat.go b/vendor/github.com/containerd/containerd/content/local/readerat.go new file mode 100644 index 000000000..42b99dc42 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/readerat.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" +) + +// readerat implements io.ReaderAt in a completely stateless manner by opening +// the referenced file for each call to ReadAt. +type sizeReaderAt struct { + size int64 + fp *os.File +} + +func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) { + return ra.fp.ReadAt(p, offset) +} + +func (ra sizeReaderAt) Size() int64 { + return ra.size +} + +func (ra sizeReaderAt) Close() error { + return ra.fp.Close() +} diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go new file mode 100644 index 000000000..69437dfd1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -0,0 +1,609 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/log" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +// LabelStore is used to store mutable labels for digests +type LabelStore interface { + // Get returns all the labels for the given digest + Get(digest.Digest) (map[string]string, error) + + // Set sets all the labels for a given digest + Set(digest.Digest, map[string]string) error + + // Update replaces the given labels for a digest, + // a key with an empty value removes a label. + Update(digest.Digest, map[string]string) (map[string]string, error) +} + +// Store is digest-keyed store for content. All data written into the store is +// stored under a verifiable digest. +// +// Store can generally support multi-reader, single-writer ingest of data, +// including resumable ingest. +type store struct { + root string + ls LabelStore +} + +// NewStore returns a local content store +func NewStore(root string) (content.Store, error) { + return NewLabeledStore(root, nil) +} + +// NewLabeledStore returns a new content store using the provided label store +// +// Note: content stores which are used underneath a metadata store may not +// require labels and should use `NewStore`. `NewLabeledStore` is primarily +// useful for tests or standalone implementations. +func NewLabeledStore(root string, ls LabelStore) (content.Store, error) { + if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil { + return nil, err + } + + return &store{ + root: root, + ls: ls, + }, nil +} + +func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + p := s.blobPath(dgst) + fi, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) + } + + return content.Info{}, err + } + var labels map[string]string + if s.ls != nil { + labels, err = s.ls.Get(dgst) + if err != nil { + return content.Info{}, err + } + } + return s.info(dgst, fi, labels), nil +} + +func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info { + return content.Info{ + Digest: dgst, + Size: fi.Size(), + CreatedAt: fi.ModTime(), + UpdatedAt: getATime(fi), + Labels: labels, + } +} + +// ReaderAt returns an io.ReaderAt for the blob. +func (s *store) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) { + p := s.blobPath(dgst) + fi, err := os.Stat(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", dgst, p) + } + + fp, err := os.Open(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", dgst, p) + } + + return sizeReaderAt{size: fi.Size(), fp: fp}, nil +} + +// Delete removes a blob by its digest. +// +// While this is safe to do concurrently, safe exist-removal logic must hold +// some global lock on the store. +func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { + if err := os.RemoveAll(s.blobPath(dgst)); err != nil { + if !os.IsNotExist(err) { + return err + } + + return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) + } + + return nil +} + +func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + if s.ls == nil { + return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store") + } + + p := s.blobPath(info.Digest) + fi, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest) + } + + return content.Info{}, err + } + + var ( + all bool + labels map[string]string + ) + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if labels == nil { + labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + all = true + labels = info.Labels + default: + return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest) + } + } + } else { + all = true + labels = info.Labels + } + + if all { + err = s.ls.Set(info.Digest, labels) + } else { + labels, err = s.ls.Update(info.Digest, labels) + } + if err != nil { + return content.Info{}, err + } + + info = s.info(info.Digest, fi, labels) + info.UpdatedAt = time.Now() + + if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil { + log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest) + } + + return info, nil +} + +func (s *store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + // TODO: Support filters + root := filepath.Join(s.root, "blobs") + var alg digest.Algorithm + return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if !fi.IsDir() && !alg.Available() { + return nil + } + + // TODO(stevvooe): There are few more cases with subdirs that should be + // handled in case the layout gets corrupted. This isn't strict enough + // and may spew bad data. + + if path == root { + return nil + } + if filepath.Dir(path) == root { + alg = digest.Algorithm(filepath.Base(path)) + + if !alg.Available() { + alg = "" + return filepath.SkipDir + } + + // descending into a hash directory + return nil + } + + dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path)) + if err := dgst.Validate(); err != nil { + // log error but don't report + log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path") + // if we see this, it could mean some sort of corruption of the + // store or extra paths not expected previously. + } + + var labels map[string]string + if s.ls != nil { + labels, err = s.ls.Get(dgst) + if err != nil { + return err + } + } + return fn(s.info(dgst, fi, labels)) + }) +} + +func (s *store) Status(ctx context.Context, ref string) (content.Status, error) { + return s.status(s.ingestRoot(ref)) +} + +func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { + fp, err := os.Open(filepath.Join(s.root, "ingest")) + if err != nil { + return nil, err + } + + defer fp.Close() + + fis, err := fp.Readdir(-1) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, err + } + + var active []content.Status + for _, fi := range fis { + p := filepath.Join(s.root, "ingest", fi.Name()) + stat, err := s.status(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + // TODO(stevvooe): This is a common error if uploads are being + // completed while making this listing. Need to consider taking a + // lock on the whole store to coordinate this aspect. + // + // Another option is to cleanup downloads asynchronously and + // coordinate this method with the cleanup process. + // + // For now, we just skip them, as they really don't exist. + continue + } + + if filter.Match(adaptStatus(stat)) { + active = append(active, stat) + } + } + + return active, nil +} + +// status works like stat above except uses the path to the ingest. +func (s *store) status(ingestPath string) (content.Status, error) { + dp := filepath.Join(ingestPath, "data") + fi, err := os.Stat(dp) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + } + return content.Status{}, err + } + + ref, err := readFileString(filepath.Join(ingestPath, "ref")) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + } + return content.Status{}, err + } + + startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat")) + if err != nil { + return content.Status{}, errors.Wrapf(err, "could not read startedat") + } + + updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat")) + if err != nil { + return content.Status{}, errors.Wrapf(err, "could not read updatedat") + } + + // because we don't write updatedat on every write, the mod time may + // actually be more up to date. + if fi.ModTime().After(updatedAt) { + updatedAt = fi.ModTime() + } + + return content.Status{ + Ref: ref, + Offset: fi.Size(), + Total: s.total(ingestPath), + UpdatedAt: updatedAt, + StartedAt: startedAt, + }, nil +} + +func adaptStatus(status content.Status) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "ref": + return status.Ref, true + } + + return "", false + }) +} + +// total attempts to resolve the total expected size for the write. +func (s *store) total(ingestPath string) int64 { + totalS, err := readFileString(filepath.Join(ingestPath, "total")) + if err != nil { + return 0 + } + + total, err := strconv.ParseInt(totalS, 10, 64) + if err != nil { + // represents a corrupted file, should probably remove. + return 0 + } + + return total +} + +// Writer begins or resumes the active writer identified by ref. If the writer +// is already in use, an error is returned. Only one writer may be in use per +// ref at a time. +// +// The argument `ref` is used to uniquely identify a long-lived writer transaction. +func (s *store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) { + var lockErr error + for count := uint64(0); count < 10; count++ { + time.Sleep(time.Millisecond * time.Duration(rand.Intn(1< 0 && status.Total > 0 && total != status.Total { + return nil, errors.Errorf("provided total differs from status: %v != %v", total, status.Total) + } + + // TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes + fp, err := os.Open(data) + if err != nil { + return nil, err + } + defer fp.Close() + + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + offset, err = io.CopyBuffer(digester.Hash(), fp, *p) + if err != nil { + return nil, err + } + + updatedAt = status.UpdatedAt + startedAt = status.StartedAt + total = status.Total + } else { + startedAt = time.Now() + updatedAt = startedAt + + // the ingest is new, we need to setup the target location. + // write the ref to a file for later use + if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil { + return nil, err + } + + if writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil { + return nil, err + } + + if writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil { + return nil, err + } + + if total > 0 { + if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { + return nil, err + } + } + } + + fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return nil, errors.Wrap(err, "failed to open data file") + } + + if _, err := fp.Seek(offset, io.SeekStart); err != nil { + return nil, errors.Wrap(err, "could not seek to current write offset") + } + + return &writer{ + s: s, + fp: fp, + ref: ref, + path: path, + offset: offset, + total: total, + digester: digester, + startedAt: startedAt, + updatedAt: updatedAt, + }, nil +} + +// Abort an active transaction keyed by ref. If the ingest is active, it will +// be cancelled. Any resources associated with the ingest will be cleaned. +func (s *store) Abort(ctx context.Context, ref string) error { + root := s.ingestRoot(ref) + if err := os.RemoveAll(root); err != nil { + if os.IsNotExist(err) { + return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref) + } + + return err + } + + return nil +} + +func (s *store) blobPath(dgst digest.Digest) string { + return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()) +} + +func (s *store) ingestRoot(ref string) string { + dgst := digest.FromString(ref) + return filepath.Join(s.root, "ingest", dgst.Hex()) +} + +// ingestPaths are returned. The paths are the following: +// +// - root: entire ingest directory +// - ref: name of the starting ref, must be unique +// - data: file where data is written +// +func (s *store) ingestPaths(ref string) (string, string, string) { + var ( + fp = s.ingestRoot(ref) + rp = filepath.Join(fp, "ref") + dp = filepath.Join(fp, "data") + ) + + return fp, rp, dp +} + +func readFileString(path string) (string, error) { + p, err := ioutil.ReadFile(path) + return string(p), err +} + +// readFileTimestamp reads a file with just a timestamp present. +func readFileTimestamp(p string) (time.Time, error) { + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + } + return time.Time{}, err + } + + var t time.Time + if err := t.UnmarshalText(b); err != nil { + return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p) + } + + return t, nil +} + +func writeTimestampFile(p string, t time.Time) error { + b, err := t.MarshalText() + if err != nil { + return err + } + + return ioutil.WriteFile(p, b, 0666) +} diff --git a/vendor/github.com/containerd/containerd/content/local/store_unix.go b/vendor/github.com/containerd/containerd/content/local/store_unix.go new file mode 100644 index 000000000..f5f34fd0c --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store_unix.go @@ -0,0 +1,35 @@ +// +build linux solaris darwin freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "syscall" + "time" + + "github.com/containerd/containerd/sys" +) + +func getATime(fi os.FileInfo) time.Time { + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + return sys.StatATimeAsTime(st) + } + + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/content/local/store_windows.go b/vendor/github.com/containerd/containerd/content/local/store_windows.go new file mode 100644 index 000000000..bce849979 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store_windows.go @@ -0,0 +1,26 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/content/local/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go new file mode 100644 index 000000000..a6579a9d2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/writer.go @@ -0,0 +1,191 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "io" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// writer represents a write transaction against the blob store. +type writer struct { + s *store + fp *os.File // opened data file + path string // path to writer dir + ref string // ref key + offset int64 + total int64 + digester digest.Digester + startedAt time.Time + updatedAt time.Time +} + +func (w *writer) Status() (content.Status, error) { + return content.Status{ + Ref: w.ref, + Offset: w.offset, + Total: w.total, + StartedAt: w.startedAt, + UpdatedAt: w.updatedAt, + }, nil +} + +// Digest returns the current digest of the content, up to the current write. +// +// Cannot be called concurrently with `Write`. +func (w *writer) Digest() digest.Digest { + return w.digester.Digest() +} + +// Write p to the transaction. +// +// Note that writes are unbuffered to the backing file. When writing, it is +// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer. +func (w *writer) Write(p []byte) (n int, err error) { + n, err = w.fp.Write(p) + w.digester.Hash().Write(p[:n]) + w.offset += int64(len(p)) + w.updatedAt = time.Now() + return n, err +} + +func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + + if w.fp == nil { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") + } + + if err := w.fp.Sync(); err != nil { + return errors.Wrap(err, "sync failed") + } + + fi, err := w.fp.Stat() + if err != nil { + return errors.Wrap(err, "stat on ingest file failed") + } + + // change to readonly, more important for read, but provides _some_ + // protection from this point on. We use the existing perms with a mask + // only allowing reads honoring the umask on creation. + // + // This removes write and exec, only allowing read per the creation umask. + // + // NOTE: Windows does not support this operation + if runtime.GOOS != "windows" { + if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil { + return errors.Wrap(err, "failed to change ingest file permissions") + } + } + + if size > 0 && size != fi.Size() { + return errors.Errorf("unexpected commit size %d, expected %d", fi.Size(), size) + } + + if err := w.fp.Close(); err != nil { + return errors.Wrap(err, "failed closing ingest") + } + + dgst := w.digester.Digest() + if expected != "" && expected != dgst { + return errors.Errorf("unexpected commit digest %s, expected %s", dgst, expected) + } + + var ( + ingest = filepath.Join(w.path, "data") + target = w.s.blobPath(dgst) + ) + + // make sure parent directories of blob exist + if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { + return err + } + + // clean up!! + defer os.RemoveAll(w.path) + + if err := os.Rename(ingest, target); err != nil { + if os.IsExist(err) { + // collision with the target file! + return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst) + } + return err + } + commitTime := time.Now() + if err := os.Chtimes(target, commitTime, commitTime); err != nil { + return err + } + + w.fp = nil + unlock(w.ref) + + if w.s.ls != nil && base.Labels != nil { + if err := w.s.ls.Set(dgst, base.Labels); err != nil { + return err + } + } + + return nil +} + +// Close the writer, flushing any unwritten data and leaving the progress in +// tact. +// +// If one needs to resume the transaction, a new writer can be obtained from +// `Ingester.Writer` using the same key. The write can then be continued +// from it was left off. +// +// To abandon a transaction completely, first call close then `IngestManager.Abort` to +// clean up the associated resources. +func (w *writer) Close() (err error) { + if w.fp != nil { + w.fp.Sync() + err = w.fp.Close() + writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt) + w.fp = nil + unlock(w.ref) + return + } + + return nil +} + +func (w *writer) Truncate(size int64) error { + if size != 0 { + return errors.New("Truncate: unsupported size") + } + w.offset = 0 + w.digester.Hash().Reset() + if _, err := w.fp.Seek(0, io.SeekStart); err != nil { + return err + } + return w.fp.Truncate(0) +} diff --git a/vendor/github.com/containerd/containerd/content_reader.go b/vendor/github.com/containerd/containerd/content_reader.go index 7acd2d30e..72628e6ca 100644 --- a/vendor/github.com/containerd/containerd/content_reader.go +++ b/vendor/github.com/containerd/containerd/content_reader.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/content_store.go b/vendor/github.com/containerd/containerd/content_store.go index 1b539694d..790249c2b 100644 --- a/vendor/github.com/containerd/containerd/content_store.go +++ b/vendor/github.com/containerd/containerd/content_store.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/content_writer.go b/vendor/github.com/containerd/containerd/content_writer.go index b18f3512a..a4247daa0 100644 --- a/vendor/github.com/containerd/containerd/content_writer.go +++ b/vendor/github.com/containerd/containerd/content_writer.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/contrib/apparmor/apparmor.go b/vendor/github.com/containerd/containerd/contrib/apparmor/apparmor.go index 33d325e4b..5bb108640 100644 --- a/vendor/github.com/containerd/containerd/contrib/apparmor/apparmor.go +++ b/vendor/github.com/containerd/containerd/contrib/apparmor/apparmor.go @@ -1,5 +1,21 @@ // +build linux +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package apparmor import ( diff --git a/vendor/github.com/containerd/containerd/contrib/apparmor/template.go b/vendor/github.com/containerd/containerd/contrib/apparmor/template.go index 29c7af9ea..34ff99246 100644 --- a/vendor/github.com/containerd/containerd/contrib/apparmor/template.go +++ b/vendor/github.com/containerd/containerd/contrib/apparmor/template.go @@ -1,5 +1,21 @@ // +build linux +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package apparmor import ( diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go index 1d4b1bfba..2a1806cf8 100644 --- a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go @@ -1,5 +1,21 @@ // +build linux +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package seccomp import ( diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go index cefdd952d..fe5f43273 100644 --- a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go @@ -1,5 +1,21 @@ // +build linux +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package seccomp import ( diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go index 1a76d569f..30ed42235 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package defaults const ( diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go index 7d74d22e4..983bf762f 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -1,5 +1,21 @@ // +build windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package defaults import ( diff --git a/vendor/github.com/containerd/containerd/defaults/doc.go b/vendor/github.com/containerd/containerd/defaults/doc.go index 2c6e39f55..274d504a3 100644 --- a/vendor/github.com/containerd/containerd/defaults/doc.go +++ b/vendor/github.com/containerd/containerd/defaults/doc.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + // Package defaults provides several common defaults for interacting wtih // containerd. These can be used on the client-side or server-side. package defaults diff --git a/vendor/github.com/containerd/containerd/dialer/dialer.go b/vendor/github.com/containerd/containerd/dialer/dialer.go index 1ad4ab718..766d34493 100644 --- a/vendor/github.com/containerd/containerd/dialer/dialer.go +++ b/vendor/github.com/containerd/containerd/dialer/dialer.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package dialer import ( diff --git a/vendor/github.com/containerd/containerd/dialer/dialer_unix.go b/vendor/github.com/containerd/containerd/dialer/dialer_unix.go index 7f8d43b03..e7d195833 100644 --- a/vendor/github.com/containerd/containerd/dialer/dialer_unix.go +++ b/vendor/github.com/containerd/containerd/dialer/dialer_unix.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package dialer import ( diff --git a/vendor/github.com/containerd/containerd/dialer/dialer_windows.go b/vendor/github.com/containerd/containerd/dialer/dialer_windows.go index 2aac03898..64d30dea0 100644 --- a/vendor/github.com/containerd/containerd/dialer/dialer_windows.go +++ b/vendor/github.com/containerd/containerd/dialer/dialer_windows.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package dialer import ( diff --git a/vendor/github.com/containerd/containerd/diff.go b/vendor/github.com/containerd/containerd/diff.go index 1a10dbc64..af95e03e7 100644 --- a/vendor/github.com/containerd/containerd/diff.go +++ b/vendor/github.com/containerd/containerd/diff.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/diff/apply/apply.go b/vendor/github.com/containerd/containerd/diff/apply/apply.go new file mode 100644 index 000000000..3f30f29cc --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/apply/apply.go @@ -0,0 +1,128 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package apply + +import ( + "context" + "io" + "io/ioutil" + "time" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// NewFileSystemApplier returns an applier which simply mounts +// and applies diff onto the mounted filesystem. +func NewFileSystemApplier(cs content.Provider) diff.Applier { + return &fsApplier{ + store: cs, + } +} + +type fsApplier struct { + store content.Provider +} + +var emptyDesc = ocispec.Descriptor{} + +// Apply applies the content associated with the provided digests onto the +// provided mounts. Archive content will be extracted and decompressed if +// necessary. +func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount) (d ocispec.Descriptor, err error) { + t1 := time.Now() + defer func() { + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "d": time.Now().Sub(t1), + "dgst": desc.Digest, + "size": desc.Size, + "media": desc.MediaType, + }).Debugf("diff applied") + } + }() + + isCompressed, err := images.IsCompressedDiff(ctx, desc.MediaType) + if err != nil { + return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType) + } + + var ocidesc ocispec.Descriptor + if err := mount.WithTempMount(ctx, mounts, func(root string) error { + ra, err := s.store.ReaderAt(ctx, desc.Digest) + if err != nil { + return errors.Wrap(err, "failed to get reader from content store") + } + defer ra.Close() + + r := content.NewReader(ra) + if isCompressed { + ds, err := compression.DecompressStream(r) + if err != nil { + return err + } + defer ds.Close() + r = ds + } + + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(r, digester.Hash()), + } + + if _, err := archive.Apply(ctx, root, rc); err != nil { + return err + } + + // Read any trailing data + if _, err := io.Copy(ioutil.Discard, rc); err != nil { + return err + } + + ocidesc = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + } + return nil + + }); err != nil { + return emptyDesc, err + } + return ocidesc, nil +} + +type readCounter struct { + r io.Reader + c int64 +} + +func (rc *readCounter) Read(p []byte) (n int, err error) { + n, err = rc.r.Read(p) + rc.c += int64(n) + return +} diff --git a/vendor/github.com/containerd/containerd/diff/diff.go b/vendor/github.com/containerd/containerd/diff/diff.go index c7e1c3db0..2b6f01c74 100644 --- a/vendor/github.com/containerd/containerd/diff/diff.go +++ b/vendor/github.com/containerd/containerd/diff/diff.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package diff import ( diff --git a/vendor/github.com/containerd/containerd/diff/walking/differ.go b/vendor/github.com/containerd/containerd/diff/walking/differ.go new file mode 100644 index 000000000..aa1bd73b4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/walking/differ.go @@ -0,0 +1,166 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package walking + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "io" + "time" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type walkingDiff struct { + store content.Store +} + +var emptyDesc = ocispec.Descriptor{} + +// NewWalkingDiff is a generic implementation of diff.Comparer. The diff is +// calculated by mounting both the upper and lower mount sets and walking the +// mounted directories concurrently. Changes are calculated by comparing files +// against each other or by comparing file existence between directories. +// NewWalkingDiff uses no special characteristics of the mount sets and is +// expected to work with any filesystem. +func NewWalkingDiff(store content.Store) diff.Comparer { + return &walkingDiff{ + store: store, + } +} + +// Compare creates a diff between the given mounts and uploads the result +// to the content store. +func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) { + var config diff.Config + for _, opt := range opts { + if err := opt(&config); err != nil { + return emptyDesc, err + } + } + + if config.MediaType == "" { + config.MediaType = ocispec.MediaTypeImageLayerGzip + } + + var isCompressed bool + switch config.MediaType { + case ocispec.MediaTypeImageLayer: + case ocispec.MediaTypeImageLayerGzip: + isCompressed = true + default: + return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", config.MediaType) + } + + var ocidesc ocispec.Descriptor + if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error { + return mount.WithTempMount(ctx, upper, func(upperRoot string) error { + var newReference bool + if config.Reference == "" { + newReference = true + config.Reference = uniqueRef() + } + + cw, err := s.store.Writer(ctx, config.Reference, 0, "") + if err != nil { + return errors.Wrap(err, "failed to open writer") + } + defer func() { + if err != nil { + cw.Close() + if newReference { + if err := s.store.Abort(ctx, config.Reference); err != nil { + log.G(ctx).WithField("ref", config.Reference).Warnf("failed to delete diff upload") + } + } + } + }() + if !newReference { + if err := cw.Truncate(0); err != nil { + return err + } + } + + if isCompressed { + dgstr := digest.SHA256.Digester() + compressed, err := compression.CompressStream(cw, compression.Gzip) + if err != nil { + return errors.Wrap(err, "failed to get compressed stream") + } + err = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot) + compressed.Close() + if err != nil { + return errors.Wrap(err, "failed to write compressed diff") + } + + if config.Labels == nil { + config.Labels = map[string]string{} + } + config.Labels["containerd.io/uncompressed"] = dgstr.Digest().String() + } else { + if err = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot); err != nil { + return errors.Wrap(err, "failed to write diff") + } + } + + var commitopts []content.Opt + if config.Labels != nil { + commitopts = append(commitopts, content.WithLabels(config.Labels)) + } + + dgst := cw.Digest() + if err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil { + return errors.Wrap(err, "failed to commit") + } + + info, err := s.store.Info(ctx, dgst) + if err != nil { + return errors.Wrap(err, "failed to get info from content store") + } + + ocidesc = ocispec.Descriptor{ + MediaType: config.MediaType, + Size: info.Size, + Digest: info.Digest, + } + return nil + }) + }); err != nil { + return emptyDesc, err + } + + return ocidesc, nil +} + +func uniqueRef() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:])) +} diff --git a/vendor/github.com/containerd/containerd/diff/walking/plugin/plugin.go b/vendor/github.com/containerd/containerd/diff/walking/plugin/plugin.go new file mode 100644 index 000000000..a2dcc1094 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/walking/plugin/plugin.go @@ -0,0 +1,55 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/diff/apply" + "github.com/containerd/containerd/diff/walking" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/plugin" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.DiffPlugin, + ID: "walking", + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + md, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) + cs := md.(*metadata.DB).ContentStore() + + return diffPlugin{ + Comparer: walking.NewWalkingDiff(cs), + Applier: apply.NewFileSystemApplier(cs), + }, nil + }, + }) +} + +type diffPlugin struct { + diff.Comparer + diff.Applier +} diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go index b4d6ea860..40427fc5a 100644 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + // Package errdefs defines the common errors used throughout containerd // packages. // diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go index 6a3bbcaa1..4eab03ab8 100644 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package errdefs import ( diff --git a/vendor/github.com/containerd/containerd/events/events.go b/vendor/github.com/containerd/containerd/events/events.go index 6f4c8b3af..b7eb86f1e 100644 --- a/vendor/github.com/containerd/containerd/events/events.go +++ b/vendor/github.com/containerd/containerd/events/events.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package events import ( diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go index 3178fc407..51c760f04 100644 --- a/vendor/github.com/containerd/containerd/events/exchange/exchange.go +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package exchange import ( diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go index 5a5ac7ec1..5a9c559c1 100644 --- a/vendor/github.com/containerd/containerd/filters/adaptor.go +++ b/vendor/github.com/containerd/containerd/filters/adaptor.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package filters // Adaptor specifies the mapping of fieldpaths to a type. For the given field diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go index 621755762..30debadc9 100644 --- a/vendor/github.com/containerd/containerd/filters/filter.go +++ b/vendor/github.com/containerd/containerd/filters/filter.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + // Package filters defines a syntax and parser that can be used for the // filtration of items across the containerd API. The core is built on the // concept of protobuf field paths, with quoting. Several operators allow the diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go index c765ea00c..9dced523b 100644 --- a/vendor/github.com/containerd/containerd/filters/parser.go +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package filters import ( diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go index 08698e1ba..2d64e23a3 100644 --- a/vendor/github.com/containerd/containerd/filters/quote.go +++ b/vendor/github.com/containerd/containerd/filters/quote.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package filters import ( diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go index 3a8e72395..c3961962c 100644 --- a/vendor/github.com/containerd/containerd/filters/scanner.go +++ b/vendor/github.com/containerd/containerd/filters/scanner.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package filters import ( diff --git a/vendor/github.com/containerd/containerd/gc/gc.go b/vendor/github.com/containerd/containerd/gc/gc.go new file mode 100644 index 000000000..35a1712cb --- /dev/null +++ b/vendor/github.com/containerd/containerd/gc/gc.go @@ -0,0 +1,182 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package gc experiments with providing central gc tooling to ensure +// deterministic resource removal within containerd. +// +// For now, we just have a single exported implementation that can be used +// under certain use cases. +package gc + +import ( + "context" + "sync" + "time" +) + +// ResourceType represents type of resource at a node +type ResourceType uint8 + +// Node presents a resource which has a type and key, +// this node can be used to lookup other nodes. +type Node struct { + Type ResourceType + Namespace string + Key string +} + +// Stats about a garbage collection run +type Stats interface { + Elapsed() time.Duration +} + +// Tricolor implements basic, single-thread tri-color GC. Given the roots, the +// complete set and a refs function, this function returns a map of all +// reachable objects. +// +// Correct usage requires that the caller not allow the arguments to change +// until the result is used to delete objects in the system. +// +// It will allocate memory proportional to the size of the reachable set. +// +// We can probably use this to inform a design for incremental GC by injecting +// callbacks to the set modification algorithms. +func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struct{}, error) { + var ( + grays []Node // maintain a gray "stack" + seen = map[Node]struct{}{} // or not "white", basically "seen" + reachable = map[Node]struct{}{} // or "black", in tri-color parlance + ) + + grays = append(grays, roots...) + + for len(grays) > 0 { + // Pick any gray object + id := grays[len(grays)-1] // effectively "depth first" because first element + grays = grays[:len(grays)-1] + seen[id] = struct{}{} // post-mark this as not-white + rs, err := refs(id) + if err != nil { + return nil, err + } + + // mark all the referenced objects as gray + for _, target := range rs { + if _, ok := seen[target]; !ok { + grays = append(grays, target) + } + } + + // mark as black when done + reachable[id] = struct{}{} + } + + return reachable, nil +} + +// ConcurrentMark implements simple, concurrent GC. All the roots are scanned +// and the complete set of references is formed by calling the refs function +// for each seen object. This function returns a map of all object reachable +// from a root. +// +// Correct usage requires that the caller not allow the arguments to change +// until the result is used to delete objects in the system. +// +// It will allocate memory proportional to the size of the reachable set. +func ConcurrentMark(ctx context.Context, root <-chan Node, refs func(context.Context, Node, func(Node)) error) (map[Node]struct{}, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var ( + grays = make(chan Node) + seen = map[Node]struct{}{} // or not "white", basically "seen" + wg sync.WaitGroup + + errOnce sync.Once + refErr error + ) + + go func() { + for gray := range grays { + if _, ok := seen[gray]; ok { + wg.Done() + continue + } + seen[gray] = struct{}{} // post-mark this as non-white + + go func(gray Node) { + defer wg.Done() + + send := func(n Node) { + wg.Add(1) + select { + case grays <- n: + case <-ctx.Done(): + wg.Done() + } + } + + if err := refs(ctx, gray, send); err != nil { + errOnce.Do(func() { + refErr = err + cancel() + }) + } + + }(gray) + } + }() + + for r := range root { + wg.Add(1) + select { + case grays <- r: + case <-ctx.Done(): + wg.Done() + } + + } + + // Wait for outstanding grays to be processed + wg.Wait() + + close(grays) + + if refErr != nil { + return nil, refErr + } + if cErr := ctx.Err(); cErr != nil { + return nil, cErr + } + + return seen, nil +} + +// Sweep removes all nodes returned through the channel which are not in +// the reachable set by calling the provided remove function. +func Sweep(reachable map[Node]struct{}, all []Node, remove func(Node) error) error { + // All black objects are now reachable, and all white objects are + // unreachable. Free those that are white! + for _, node := range all { + if _, ok := reachable[node]; !ok { + if err := remove(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/gc/scheduler/scheduler.go b/vendor/github.com/containerd/containerd/gc/scheduler/scheduler.go new file mode 100644 index 000000000..199c90c0d --- /dev/null +++ b/vendor/github.com/containerd/containerd/gc/scheduler/scheduler.go @@ -0,0 +1,352 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package scheduler + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/plugin" + "github.com/pkg/errors" +) + +// config configures the garbage collection policies. +type config struct { + // PauseThreshold represents the maximum amount of time garbage + // collection should be scheduled based on the average pause time. + // For example, a value of 0.02 means that scheduled garbage collection + // pauses should present at most 2% of real time, + // or 20ms of every second. + // + // A maximum value of .5 is enforced to prevent over scheduling of the + // garbage collector, trigger options are available to run in a more + // predictable time frame after mutation. + // + // Default is 0.02 + PauseThreshold float64 `toml:"pause_threshold"` + + // DeletionThreshold is used to guarantee that a garbage collection is + // scheduled after configured number of deletions have occurred + // since the previous garbage collection. A value of 0 indicates that + // garbage collection will not be triggered by deletion count. + // + // Default 0 + DeletionThreshold int `toml:"deletion_threshold"` + + // MutationThreshold is used to guarantee that a garbage collection is + // run after a configured number of database mutations have occurred + // since the previous garbage collection. A value of 0 indicates that + // garbage collection will only be run after a manual trigger or + // deletion. Unlike the deletion threshold, the mutation threshold does + // not cause scheduling of a garbage collection, but ensures GC is run + // at the next scheduled GC. + // + // Default 100 + MutationThreshold int `toml:"mutation_threshold"` + + // ScheduleDelay is the duration in the future to schedule a garbage + // collection triggered manually or by exceeding the configured + // threshold for deletion or mutation. A zero value will immediately + // schedule. Use suffix "ms" for millisecond and "s" for second. + // + // Default is "0ms" + ScheduleDelay duration `toml:"schedule_delay"` + + // StartupDelay is the delay duration to do an initial garbage + // collection after startup. The initial garbage collection is used to + // set the base for pause threshold and should be scheduled in the + // future to avoid slowing down other startup processes. Use suffix + // "ms" for millisecond and "s" for second. + // + // Default is "100ms" + StartupDelay duration `toml:"startup_delay"` +} + +type duration time.Duration + +func (d *duration) UnmarshalText(text []byte) error { + ed, err := time.ParseDuration(string(text)) + if err != nil { + return err + } + *d = duration(ed) + return nil +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GCPlugin, + ID: "scheduler", + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + Config: &config{ + PauseThreshold: 0.02, + DeletionThreshold: 0, + MutationThreshold: 100, + ScheduleDelay: duration(0), + StartupDelay: duration(100 * time.Millisecond), + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + md, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + + mdCollector, ok := md.(collector) + if !ok { + return nil, errors.Errorf("%s %T must implement collector", plugin.MetadataPlugin, md) + } + + m := newScheduler(mdCollector, ic.Config.(*config)) + + ic.Meta.Exports = map[string]string{ + "PauseThreshold": fmt.Sprint(m.pauseThreshold), + "DeletionThreshold": fmt.Sprint(m.deletionThreshold), + "MutationThreshold": fmt.Sprint(m.mutationThreshold), + "ScheduleDelay": fmt.Sprint(m.scheduleDelay), + } + + go m.run(ic.Context) + + return m, nil + }, + }) +} + +type mutationEvent struct { + ts time.Time + mutation bool + dirty bool +} + +type collector interface { + RegisterMutationCallback(func(bool)) + GarbageCollect(context.Context) (gc.Stats, error) +} + +type gcScheduler struct { + c collector + + eventC chan mutationEvent + + waiterL sync.Mutex + waiters []chan gc.Stats + + pauseThreshold float64 + deletionThreshold int + mutationThreshold int + scheduleDelay time.Duration + startupDelay time.Duration +} + +func newScheduler(c collector, cfg *config) *gcScheduler { + eventC := make(chan mutationEvent) + + s := &gcScheduler{ + c: c, + eventC: eventC, + pauseThreshold: cfg.PauseThreshold, + deletionThreshold: cfg.DeletionThreshold, + mutationThreshold: cfg.MutationThreshold, + scheduleDelay: time.Duration(cfg.ScheduleDelay), + startupDelay: time.Duration(cfg.StartupDelay), + } + + if s.pauseThreshold < 0.0 { + s.pauseThreshold = 0.0 + } + if s.pauseThreshold > 0.5 { + s.pauseThreshold = 0.5 + } + if s.mutationThreshold < 0 { + s.mutationThreshold = 0 + } + if s.scheduleDelay < 0 { + s.scheduleDelay = 0 + } + if s.startupDelay < 0 { + s.startupDelay = 0 + } + + c.RegisterMutationCallback(s.mutationCallback) + + return s +} + +func (s *gcScheduler) ScheduleAndWait(ctx context.Context) (gc.Stats, error) { + return s.wait(ctx, true) +} + +func (s *gcScheduler) wait(ctx context.Context, trigger bool) (gc.Stats, error) { + wc := make(chan gc.Stats, 1) + s.waiterL.Lock() + s.waiters = append(s.waiters, wc) + s.waiterL.Unlock() + + if trigger { + e := mutationEvent{ + ts: time.Now(), + } + go func() { + s.eventC <- e + }() + } + + var gcStats gc.Stats + select { + case stats, ok := <-wc: + if !ok { + return gcStats, errors.New("gc failed") + } + gcStats = stats + case <-ctx.Done(): + return gcStats, ctx.Err() + } + + return gcStats, nil +} + +func (s *gcScheduler) mutationCallback(dirty bool) { + e := mutationEvent{ + ts: time.Now(), + mutation: true, + dirty: dirty, + } + go func() { + s.eventC <- e + }() +} + +func schedule(d time.Duration) (<-chan time.Time, *time.Time) { + next := time.Now().Add(d) + return time.After(d), &next +} + +func (s *gcScheduler) run(ctx context.Context) { + var ( + schedC <-chan time.Time + + lastCollection *time.Time + nextCollection *time.Time + + interval = time.Second + gcTime time.Duration + collections int + // TODO(dmcg): expose collection stats as metrics + + triggered bool + deletions int + mutations int + ) + if s.startupDelay > 0 { + schedC, nextCollection = schedule(s.startupDelay) + } + for { + select { + case <-schedC: + // Check if garbage collection can be skipped because + // it is not needed or was not requested and reschedule + // it to attempt again after another time interval. + if !triggered && lastCollection != nil && deletions == 0 && + (s.mutationThreshold == 0 || mutations < s.mutationThreshold) { + schedC, nextCollection = schedule(interval) + continue + } + case e := <-s.eventC: + if lastCollection != nil && lastCollection.After(e.ts) { + continue + } + if e.dirty { + deletions++ + } + if e.mutation { + mutations++ + } else { + triggered = true + } + + // Check if condition should cause immediate collection. + if triggered || + (s.deletionThreshold > 0 && deletions >= s.deletionThreshold) || + (nextCollection == nil && ((s.deletionThreshold == 0 && deletions > 0) || + (s.mutationThreshold > 0 && mutations >= s.mutationThreshold))) { + // Check if not already scheduled before delay threshold + if nextCollection == nil || nextCollection.After(time.Now().Add(s.scheduleDelay)) { + // TODO(dmcg): track re-schedules for tuning schedule config + schedC, nextCollection = schedule(s.scheduleDelay) + } + } + + continue + case <-ctx.Done(): + return + } + + s.waiterL.Lock() + + stats, err := s.c.GarbageCollect(ctx) + last := time.Now() + if err != nil { + log.G(ctx).WithError(err).Error("garbage collection failed") + + // Reschedule garbage collection for same duration + 1 second + schedC, nextCollection = schedule(nextCollection.Sub(*lastCollection) + time.Second) + + // Update last collection time even though failure occured + lastCollection = &last + + for _, w := range s.waiters { + close(w) + } + s.waiters = nil + s.waiterL.Unlock() + continue + } + + log.G(ctx).WithField("d", stats.Elapsed()).Debug("garbage collected") + + gcTime += stats.Elapsed() + collections++ + triggered = false + deletions = 0 + mutations = 0 + + // Calculate new interval with updated times + if s.pauseThreshold > 0.0 { + // Set interval to average gc time divided by the pause threshold + // This algorithm ensures that a gc is scheduled to allow enough + // runtime in between gc to reach the pause threshold. + // Pause threshold is always 0.0 < threshold <= 0.5 + avg := float64(gcTime) / float64(collections) + interval = time.Duration(avg/s.pauseThreshold - avg) + } + + lastCollection = &last + schedC, nextCollection = schedule(interval) + + for _, w := range s.waiters { + w <- stats + } + s.waiters = nil + s.waiterL.Unlock() + } +} diff --git a/vendor/github.com/containerd/containerd/grpc.go b/vendor/github.com/containerd/containerd/grpc.go index c56eeada3..05fd5cca2 100644 --- a/vendor/github.com/containerd/containerd/grpc.go +++ b/vendor/github.com/containerd/containerd/grpc.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/identifiers/validate.go b/vendor/github.com/containerd/containerd/identifiers/validate.go index 37f9b72ff..c58513c02 100644 --- a/vendor/github.com/containerd/containerd/identifiers/validate.go +++ b/vendor/github.com/containerd/containerd/identifiers/validate.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + // Package identifiers provides common validation for identifiers and keys // across containerd. // diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go index 6e9f4bd19..5ae6db232 100644 --- a/vendor/github.com/containerd/containerd/image.go +++ b/vendor/github.com/containerd/containerd/image.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/image_store.go b/vendor/github.com/containerd/containerd/image_store.go index 9a3aafc84..3676bdada 100644 --- a/vendor/github.com/containerd/containerd/image_store.go +++ b/vendor/github.com/containerd/containerd/image_store.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go index 63acdb722..b95251c57 100644 --- a/vendor/github.com/containerd/containerd/images/handlers.go +++ b/vendor/github.com/containerd/containerd/images/handlers.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package images import ( @@ -5,6 +21,7 @@ import ( "fmt" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/platforms" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -128,8 +145,78 @@ func Dispatch(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) // // One can also replace this with another implementation to allow descending of // arbitrary types. -func ChildrenHandler(provider content.Provider, platform string) HandlerFunc { +func ChildrenHandler(provider content.Provider) HandlerFunc { return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - return Children(ctx, provider, desc, platform) + return Children(ctx, provider, desc) + } +} + +// SetChildrenLabels is a handler wrapper which sets labels for the content on +// the children returned by the handler and passes through the children. +// Must follow a handler that returns the children to be labeled. +func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + if len(children) > 0 { + info := content.Info{ + Digest: desc.Digest, + Labels: map[string]string{}, + } + fields := []string{} + for i, ch := range children { + info.Labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = ch.Digest.String() + fields = append(fields, fmt.Sprintf("labels.containerd.io/gc.ref.content.%d", i)) + } + + _, err := manager.Update(ctx, info, fields...) + if err != nil { + return nil, err + } + } + + return children, err + } +} + +// FilterPlatform is a handler wrapper which limits the descriptors returned +// by a handler to a single platform. +func FilterPlatform(platform string, f HandlerFunc) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + var descs []ocispec.Descriptor + if platform != "" && isMultiPlatform(desc.MediaType) { + matcher, err := platforms.Parse(platform) + if err != nil { + return nil, err + } + + for _, d := range children { + if d.Platform == nil || matcher.Match(*d.Platform) { + descs = append(descs, d) + } + } + } else { + descs = children + } + + return descs, nil + } + +} + +func isMultiPlatform(mediaType string) bool { + switch mediaType { + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + return true + default: + return false } } diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go index 5fea0dcfc..cdcf0af34 100644 --- a/vendor/github.com/containerd/containerd/images/image.go +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package images import ( @@ -102,7 +118,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor } size += desc.Size return nil, nil - }), ChildrenHandler(provider, platform)), image.Target) + }), FilterPlatform(platform, ChildrenHandler(provider))), image.Target) } // Manifest resolves a manifest from the image for the given platform. @@ -238,7 +254,7 @@ func Platforms(ctx context.Context, provider content.Provider, image ocispec.Des platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) } return nil, nil - }), ChildrenHandler(provider, "")), image) + }), ChildrenHandler(provider)), image) } // Check returns nil if the all components of an image are available in the @@ -285,7 +301,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip } // Children returns the immediate children of content described by the descriptor. -func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor, platform string) ([]ocispec.Descriptor, error) { +func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { var descs []ocispec.Descriptor switch desc.MediaType { case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: @@ -314,21 +330,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr return nil, err } - if platform != "" { - matcher, err := platforms.Parse(platform) - if err != nil { - return nil, err - } - - for _, d := range index.Manifests { - if d.Platform == nil || matcher.Match(*d.Platform) { - descs = append(descs, d) - } - } - } else { - descs = append(descs, index.Manifests...) - } - + descs = append(descs, index.Manifests...) case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip, MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, diff --git a/vendor/github.com/containerd/containerd/images/importexport.go b/vendor/github.com/containerd/containerd/images/importexport.go index f8cf742ba..04a55fd38 100644 --- a/vendor/github.com/containerd/containerd/images/importexport.go +++ b/vendor/github.com/containerd/containerd/images/importexport.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package images import ( @@ -17,5 +33,5 @@ type Importer interface { // Exporter is the interface for image exporter. type Exporter interface { // Export exports an image to a tar stream. - Export(ctx context.Context, store content.Store, desc ocispec.Descriptor, writer io.Writer) error + Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error } diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go index f01f615c4..ca4ca071b 100644 --- a/vendor/github.com/containerd/containerd/images/mediatypes.go +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package images // mediatype definitions for image components handled in containerd. diff --git a/vendor/github.com/containerd/containerd/labels/validate.go b/vendor/github.com/containerd/containerd/labels/validate.go new file mode 100644 index 000000000..0de461663 --- /dev/null +++ b/vendor/github.com/containerd/containerd/labels/validate.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package labels + +import ( + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +const ( + maxSize = 4096 +) + +// Validate a label's key and value are under 4096 bytes +func Validate(k, v string) error { + if (len(k) + len(v)) > maxSize { + if len(k) > 10 { + k = k[:10] + } + return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/lease.go b/vendor/github.com/containerd/containerd/lease.go index 8eb3bc0fe..6187c1df7 100644 --- a/vendor/github.com/containerd/containerd/lease.go +++ b/vendor/github.com/containerd/containerd/lease.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/leases/context.go b/vendor/github.com/containerd/containerd/leases/context.go index cfd7e4a46..b66b154ce 100644 --- a/vendor/github.com/containerd/containerd/leases/context.go +++ b/vendor/github.com/containerd/containerd/leases/context.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package leases import "context" diff --git a/vendor/github.com/containerd/containerd/leases/grpc.go b/vendor/github.com/containerd/containerd/leases/grpc.go index cea5b25fe..284924e7d 100644 --- a/vendor/github.com/containerd/containerd/leases/grpc.go +++ b/vendor/github.com/containerd/containerd/leases/grpc.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package leases import ( diff --git a/vendor/github.com/containerd/containerd/linux/bundle.go b/vendor/github.com/containerd/containerd/linux/bundle.go new file mode 100644 index 000000000..735547c5c --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/bundle.go @@ -0,0 +1,152 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package linux + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/containerd/events/exchange" + "github.com/containerd/containerd/linux/runctypes" + "github.com/containerd/containerd/linux/shim" + "github.com/containerd/containerd/linux/shim/client" + "github.com/pkg/errors" +) + +// loadBundle loads an existing bundle from disk +func loadBundle(id, path, workdir string) *bundle { + return &bundle{ + id: id, + path: path, + workDir: workdir, + } +} + +// newBundle creates a new bundle on disk at the provided path for the given id +func newBundle(id, path, workDir string, spec []byte) (b *bundle, err error) { + if err := os.MkdirAll(path, 0711); err != nil { + return nil, err + } + path = filepath.Join(path, id) + defer func() { + if err != nil { + os.RemoveAll(path) + } + }() + workDir = filepath.Join(workDir, id) + if err := os.MkdirAll(workDir, 0711); err != nil { + return nil, err + } + defer func() { + if err != nil { + os.RemoveAll(workDir) + } + }() + + if err := os.Mkdir(path, 0711); err != nil { + return nil, err + } + if err := os.Mkdir(filepath.Join(path, "rootfs"), 0711); err != nil { + return nil, err + } + err = ioutil.WriteFile(filepath.Join(path, configFilename), spec, 0666) + return &bundle{ + id: id, + path: path, + workDir: workDir, + }, err +} + +type bundle struct { + id string + path string + workDir string +} + +// ShimOpt specifies shim options for initialization and connection +type ShimOpt func(*bundle, string, *runctypes.RuncOptions) (shim.Config, client.Opt) + +// ShimRemote is a ShimOpt for connecting and starting a remote shim +func ShimRemote(shimBinary, daemonAddress, cgroup string, debug bool, exitHandler func()) ShimOpt { + return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) { + return b.shimConfig(ns, ropts), + client.WithStart(shimBinary, b.shimAddress(ns), daemonAddress, cgroup, debug, exitHandler) + } +} + +// ShimLocal is a ShimOpt for using an in process shim implementation +func ShimLocal(exchange *exchange.Exchange) ShimOpt { + return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) { + return b.shimConfig(ns, ropts), client.WithLocal(exchange) + } +} + +// ShimConnect is a ShimOpt for connecting to an existing remote shim +func ShimConnect(onClose func()) ShimOpt { + return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) { + return b.shimConfig(ns, ropts), client.WithConnect(b.shimAddress(ns), onClose) + } +} + +// NewShimClient connects to the shim managing the bundle and tasks creating it if needed +func (b *bundle) NewShimClient(ctx context.Context, namespace string, getClientOpts ShimOpt, runcOpts *runctypes.RuncOptions) (*client.Client, error) { + cfg, opt := getClientOpts(b, namespace, runcOpts) + return client.New(ctx, cfg, opt) +} + +// Delete deletes the bundle from disk +func (b *bundle) Delete() error { + err := os.RemoveAll(b.path) + if err == nil { + return os.RemoveAll(b.workDir) + } + // error removing the bundle path; still attempt removing work dir + err2 := os.RemoveAll(b.workDir) + if err2 == nil { + return err + } + return errors.Wrapf(err, "Failed to remove both bundle and workdir locations: %v", err2) +} + +func (b *bundle) shimAddress(namespace string) string { + return filepath.Join(string(filepath.Separator), "containerd-shim", namespace, b.id, "shim.sock") +} + +func (b *bundle) shimConfig(namespace string, runcOptions *runctypes.RuncOptions) shim.Config { + var ( + criuPath string + runtimeRoot string + systemdCgroup bool + ) + if runcOptions != nil { + criuPath = runcOptions.CriuPath + systemdCgroup = runcOptions.SystemdCgroup + runtimeRoot = runcOptions.RuntimeRoot + } + return shim.Config{ + Path: b.path, + WorkDir: b.workDir, + Namespace: namespace, + Criu: criuPath, + RuntimeRoot: runtimeRoot, + SystemdCgroup: systemdCgroup, + } +} diff --git a/vendor/github.com/containerd/containerd/linux/proc/deleted_state.go b/vendor/github.com/containerd/containerd/linux/proc/deleted_state.go new file mode 100644 index 000000000..87a3fd0c0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/proc/deleted_state.go @@ -0,0 +1,70 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proc + +import ( + "context" + + "github.com/containerd/console" + google_protobuf "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +type deletedState struct { +} + +func (s *deletedState) Pause(ctx context.Context) error { + return errors.Errorf("cannot pause a deleted process") +} + +func (s *deletedState) Resume(ctx context.Context) error { + return errors.Errorf("cannot resume a deleted process") +} + +func (s *deletedState) Update(context context.Context, r *google_protobuf.Any) error { + return errors.Errorf("cannot update a deleted process") +} + +func (s *deletedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { + return errors.Errorf("cannot checkpoint a deleted process") +} + +func (s *deletedState) Resize(ws console.WinSize) error { + return errors.Errorf("cannot resize a deleted process") +} + +func (s *deletedState) Start(ctx context.Context) error { + return errors.Errorf("cannot start a deleted process") +} + +func (s *deletedState) Delete(ctx context.Context) error { + return errors.Errorf("cannot delete a deleted process") +} + +func (s *deletedState) Kill(ctx context.Context, sig uint32, all bool) error { + return errors.Errorf("cannot kill a deleted process") +} + +func (s *deletedState) SetExited(status int) { + // no op +} + +func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { + return nil, errors.Errorf("cannot exec in a deleted state") +} diff --git a/vendor/github.com/containerd/containerd/linux/proc/exec.go b/vendor/github.com/containerd/containerd/linux/proc/exec.go new file mode 100644 index 000000000..636879da1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/proc/exec.go @@ -0,0 +1,219 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proc + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "golang.org/x/sys/unix" + + "github.com/containerd/console" + "github.com/containerd/fifo" + runc "github.com/containerd/go-runc" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type execProcess struct { + wg sync.WaitGroup + + State + + mu sync.Mutex + id string + console console.Console + io runc.IO + status int + exited time.Time + pid int + closers []io.Closer + stdin io.Closer + stdio Stdio + path string + spec specs.Process + + parent *Init + waitBlock chan struct{} +} + +func (e *execProcess) Wait() { + <-e.waitBlock +} + +func (e *execProcess) ID() string { + return e.id +} + +func (e *execProcess) Pid() int { + e.mu.Lock() + defer e.mu.Unlock() + return e.pid +} + +func (e *execProcess) ExitStatus() int { + e.mu.Lock() + defer e.mu.Unlock() + return e.status +} + +func (e *execProcess) ExitedAt() time.Time { + e.mu.Lock() + defer e.mu.Unlock() + return e.exited +} + +func (e *execProcess) setExited(status int) { + e.status = status + e.exited = time.Now() + e.parent.platform.ShutdownConsole(context.Background(), e.console) + close(e.waitBlock) +} + +func (e *execProcess) delete(ctx context.Context) error { + e.wg.Wait() + if e.io != nil { + for _, c := range e.closers { + c.Close() + } + e.io.Close() + } + pidfile := filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id)) + // silently ignore error + os.Remove(pidfile) + return nil +} + +func (e *execProcess) resize(ws console.WinSize) error { + if e.console == nil { + return nil + } + return e.console.Resize(ws) +} + +func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error { + pid := e.pid + if pid != 0 { + if err := unix.Kill(pid, syscall.Signal(sig)); err != nil { + return errors.Wrapf(checkKillError(err), "exec kill error") + } + } + return nil +} + +func (e *execProcess) Stdin() io.Closer { + return e.stdin +} + +func (e *execProcess) Stdio() Stdio { + return e.stdio +} + +func (e *execProcess) start(ctx context.Context) (err error) { + var ( + socket *runc.Socket + pidfile = filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id)) + ) + if e.stdio.Terminal { + if socket, err = runc.NewTempConsoleSocket(); err != nil { + return errors.Wrap(err, "failed to create runc console socket") + } + defer socket.Close() + } else if e.stdio.IsNull() { + if e.io, err = runc.NewNullIO(); err != nil { + return errors.Wrap(err, "creating new NULL IO") + } + } else { + if e.io, err = runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID); err != nil { + return errors.Wrap(err, "failed to create runc io pipes") + } + } + opts := &runc.ExecOpts{ + PidFile: pidfile, + IO: e.io, + Detach: true, + } + if socket != nil { + opts.ConsoleSocket = socket + } + if err := e.parent.runtime.Exec(ctx, e.parent.id, e.spec, opts); err != nil { + close(e.waitBlock) + return e.parent.runtimeError(err, "OCI runtime exec failed") + } + if e.stdio.Stdin != "" { + sc, err := fifo.OpenFifo(ctx, e.stdio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.Stdin) + } + e.closers = append(e.closers, sc) + e.stdin = sc + } + var copyWaitGroup sync.WaitGroup + if socket != nil { + console, err := socket.ReceiveMaster() + if err != nil { + return errors.Wrap(err, "failed to retrieve console master") + } + if e.console, err = e.parent.platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { + return errors.Wrap(err, "failed to start console copy") + } + } else if !e.stdio.IsNull() { + if err := copyPipes(ctx, e.io, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { + return errors.Wrap(err, "failed to start io pipe copy") + } + } + copyWaitGroup.Wait() + pid, err := runc.ReadPidFile(opts.PidFile) + if err != nil { + return errors.Wrap(err, "failed to retrieve OCI runtime exec pid") + } + e.pid = pid + return nil +} + +func (e *execProcess) Status(ctx context.Context) (string, error) { + s, err := e.parent.Status(ctx) + if err != nil { + return "", err + } + // if the container as a whole is in the pausing/paused state, so are all + // other processes inside the container, use container state here + switch s { + case "paused", "pausing": + return s, nil + } + e.mu.Lock() + defer e.mu.Unlock() + // if we don't have a pid then the exec process has just been created + if e.pid == 0 { + return "created", nil + } + // if we have a pid and it can be signaled, the process is running + if err := unix.Kill(e.pid, 0); err == nil { + return "running", nil + } + // else if we have a pid but it can nolonger be signaled, it has stopped + return "stopped", nil +} diff --git a/vendor/github.com/containerd/containerd/linux/proc/exec_state.go b/vendor/github.com/containerd/containerd/linux/proc/exec_state.go new file mode 100644 index 000000000..617ec0d97 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/proc/exec_state.go @@ -0,0 +1,188 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proc + +import ( + "context" + + "github.com/containerd/console" + "github.com/pkg/errors" +) + +type execCreatedState struct { + p *execProcess +} + +func (s *execCreatedState) transition(name string) error { + switch name { + case "running": + s.p.State = &execRunningState{p: s.p} + case "stopped": + s.p.State = &execStoppedState{p: s.p} + case "deleted": + s.p.State = &deletedState{} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *execCreatedState) Resize(ws console.WinSize) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.resize(ws) +} + +func (s *execCreatedState) Start(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + if err := s.p.start(ctx); err != nil { + return err + } + return s.transition("running") +} + +func (s *execCreatedState) Delete(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + if err := s.p.delete(ctx); err != nil { + return err + } + return s.transition("deleted") +} + +func (s *execCreatedState) Kill(ctx context.Context, sig uint32, all bool) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.kill(ctx, sig, all) +} + +func (s *execCreatedState) SetExited(status int) { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + s.p.setExited(status) + + if err := s.transition("stopped"); err != nil { + panic(err) + } +} + +type execRunningState struct { + p *execProcess +} + +func (s *execRunningState) transition(name string) error { + switch name { + case "stopped": + s.p.State = &execStoppedState{p: s.p} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *execRunningState) Resize(ws console.WinSize) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.resize(ws) +} + +func (s *execRunningState) Start(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot start a running process") +} + +func (s *execRunningState) Delete(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot delete a running process") +} + +func (s *execRunningState) Kill(ctx context.Context, sig uint32, all bool) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.kill(ctx, sig, all) +} + +func (s *execRunningState) SetExited(status int) { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + s.p.setExited(status) + + if err := s.transition("stopped"); err != nil { + panic(err) + } +} + +type execStoppedState struct { + p *execProcess +} + +func (s *execStoppedState) transition(name string) error { + switch name { + case "deleted": + s.p.State = &deletedState{} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *execStoppedState) Resize(ws console.WinSize) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot resize a stopped container") +} + +func (s *execStoppedState) Start(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot start a stopped process") +} + +func (s *execStoppedState) Delete(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + if err := s.p.delete(ctx); err != nil { + return err + } + return s.transition("deleted") +} + +func (s *execStoppedState) Kill(ctx context.Context, sig uint32, all bool) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.kill(ctx, sig, all) +} + +func (s *execStoppedState) SetExited(status int) { + // no op +} diff --git a/vendor/github.com/containerd/containerd/linux/proc/init.go b/vendor/github.com/containerd/containerd/linux/proc/init.go new file mode 100644 index 000000000..82f9ebdf2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/proc/init.go @@ -0,0 +1,451 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proc + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/containerd/console" + "github.com/containerd/containerd/linux/runctypes" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/fifo" + runc "github.com/containerd/go-runc" + "github.com/containerd/typeurl" + google_protobuf "github.com/gogo/protobuf/types" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// InitPidFile name of the file that contains the init pid +const InitPidFile = "init.pid" + +// Init represents an initial process for a container +type Init struct { + wg sync.WaitGroup + initState + + // mu is used to ensure that `Start()` and `Exited()` calls return in + // the right order when invoked in separate go routines. + // This is the case within the shim implementation as it makes use of + // the reaper interface. + mu sync.Mutex + + waitBlock chan struct{} + + workDir string + + id string + bundle string + console console.Console + platform Platform + io runc.IO + runtime *runc.Runc + status int + exited time.Time + pid int + closers []io.Closer + stdin io.Closer + stdio Stdio + rootfs string + IoUID int + IoGID int +} + +// NewRunc returns a new runc instance for a process +func NewRunc(root, path, namespace, runtime, criu string, systemd bool) *runc.Runc { + if root == "" { + root = RuncRoot + } + return &runc.Runc{ + Command: runtime, + Log: filepath.Join(path, "log.json"), + LogFormat: runc.JSON, + PdeathSignal: syscall.SIGKILL, + Root: filepath.Join(root, namespace), + Criu: criu, + SystemdCgroup: systemd, + } +} + +// New returns a new init process +func New(context context.Context, path, workDir, runtimeRoot, namespace, criu string, systemdCgroup bool, platform Platform, r *CreateConfig) (*Init, error) { + var success bool + + var options runctypes.CreateOptions + if r.Options != nil { + v, err := typeurl.UnmarshalAny(r.Options) + if err != nil { + return nil, err + } + options = *v.(*runctypes.CreateOptions) + } + + rootfs := filepath.Join(path, "rootfs") + // count the number of successful mounts so we can undo + // what was actually done rather than what should have been + // done. + defer func() { + if success { + return + } + if err2 := mount.UnmountAll(rootfs, 0); err2 != nil { + log.G(context).WithError(err2).Warn("Failed to cleanup rootfs mount") + } + }() + for _, rm := range r.Rootfs { + m := &mount.Mount{ + Type: rm.Type, + Source: rm.Source, + Options: rm.Options, + } + if err := m.Mount(rootfs); err != nil { + return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) + } + } + runtime := NewRunc(runtimeRoot, path, namespace, r.Runtime, criu, systemdCgroup) + p := &Init{ + id: r.ID, + bundle: r.Bundle, + runtime: runtime, + platform: platform, + stdio: Stdio{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }, + rootfs: rootfs, + workDir: workDir, + status: 0, + waitBlock: make(chan struct{}), + IoUID: int(options.IoUid), + IoGID: int(options.IoGid), + } + p.initState = &createdState{p: p} + var ( + err error + socket *runc.Socket + ) + if r.Terminal { + if socket, err = runc.NewTempConsoleSocket(); err != nil { + return nil, errors.Wrap(err, "failed to create OCI runtime console socket") + } + defer socket.Close() + } else if hasNoIO(r) { + if p.io, err = runc.NewNullIO(); err != nil { + return nil, errors.Wrap(err, "creating new NULL IO") + } + } else { + if p.io, err = runc.NewPipeIO(int(options.IoUid), int(options.IoGid)); err != nil { + return nil, errors.Wrap(err, "failed to create OCI runtime io pipes") + } + } + pidFile := filepath.Join(path, InitPidFile) + if r.Checkpoint != "" { + opts := &runc.RestoreOpts{ + CheckpointOpts: runc.CheckpointOpts{ + ImagePath: r.Checkpoint, + WorkDir: p.workDir, + ParentPath: r.ParentCheckpoint, + }, + PidFile: pidFile, + IO: p.io, + NoPivot: options.NoPivotRoot, + Detach: true, + NoSubreaper: true, + } + p.initState = &createdCheckpointState{ + p: p, + opts: opts, + } + success = true + return p, nil + } + opts := &runc.CreateOpts{ + PidFile: pidFile, + IO: p.io, + NoPivot: options.NoPivotRoot, + NoNewKeyring: options.NoNewKeyring, + } + if socket != nil { + opts.ConsoleSocket = socket + } + if err := p.runtime.Create(context, r.ID, r.Bundle, opts); err != nil { + return nil, p.runtimeError(err, "OCI runtime create failed") + } + if r.Stdin != "" { + sc, err := fifo.OpenFifo(context, r.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return nil, errors.Wrapf(err, "failed to open stdin fifo %s", r.Stdin) + } + p.stdin = sc + p.closers = append(p.closers, sc) + } + var copyWaitGroup sync.WaitGroup + if socket != nil { + console, err := socket.ReceiveMaster() + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve console master") + } + console, err = platform.CopyConsole(context, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup) + if err != nil { + return nil, errors.Wrap(err, "failed to start console copy") + } + p.console = console + } else if !hasNoIO(r) { + if err := copyPipes(context, p.io, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup); err != nil { + return nil, errors.Wrap(err, "failed to start io pipe copy") + } + } + + copyWaitGroup.Wait() + pid, err := runc.ReadPidFile(pidFile) + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve OCI runtime container pid") + } + p.pid = pid + success = true + return p, nil +} + +// Wait for the process to exit +func (p *Init) Wait() { + <-p.waitBlock +} + +// ID of the process +func (p *Init) ID() string { + return p.id +} + +// Pid of the process +func (p *Init) Pid() int { + return p.pid +} + +// ExitStatus of the process +func (p *Init) ExitStatus() int { + p.mu.Lock() + defer p.mu.Unlock() + return p.status +} + +// ExitedAt at time when the process exited +func (p *Init) ExitedAt() time.Time { + p.mu.Lock() + defer p.mu.Unlock() + return p.exited +} + +// Status of the process +func (p *Init) Status(ctx context.Context) (string, error) { + p.mu.Lock() + defer p.mu.Unlock() + c, err := p.runtime.State(ctx, p.id) + if err != nil { + if os.IsNotExist(err) { + return "stopped", nil + } + return "", p.runtimeError(err, "OCI runtime state failed") + } + return c.Status, nil +} + +func (p *Init) start(context context.Context) error { + err := p.runtime.Start(context, p.id) + return p.runtimeError(err, "OCI runtime start failed") +} + +func (p *Init) setExited(status int) { + p.exited = time.Now() + p.status = status + p.platform.ShutdownConsole(context.Background(), p.console) + close(p.waitBlock) +} + +func (p *Init) delete(context context.Context) error { + p.KillAll(context) + p.wg.Wait() + err := p.runtime.Delete(context, p.id, nil) + // ignore errors if a runtime has already deleted the process + // but we still hold metadata and pipes + // + // this is common during a checkpoint, runc will delete the container state + // after a checkpoint and the container will no longer exist within runc + if err != nil { + if strings.Contains(err.Error(), "does not exist") { + err = nil + } else { + err = p.runtimeError(err, "failed to delete task") + } + } + if p.io != nil { + for _, c := range p.closers { + c.Close() + } + p.io.Close() + } + if err2 := mount.UnmountAll(p.rootfs, 0); err2 != nil { + log.G(context).WithError(err2).Warn("failed to cleanup rootfs mount") + if err == nil { + err = errors.Wrap(err2, "failed rootfs umount") + } + } + return err +} + +func (p *Init) resize(ws console.WinSize) error { + if p.console == nil { + return nil + } + return p.console.Resize(ws) +} + +func (p *Init) pause(context context.Context) error { + err := p.runtime.Pause(context, p.id) + return p.runtimeError(err, "OCI runtime pause failed") +} + +func (p *Init) resume(context context.Context) error { + err := p.runtime.Resume(context, p.id) + return p.runtimeError(err, "OCI runtime resume failed") +} + +func (p *Init) kill(context context.Context, signal uint32, all bool) error { + err := p.runtime.Kill(context, p.id, int(signal), &runc.KillOpts{ + All: all, + }) + return checkKillError(err) +} + +// KillAll processes belonging to the init process +func (p *Init) KillAll(context context.Context) error { + err := p.runtime.Kill(context, p.id, int(syscall.SIGKILL), &runc.KillOpts{ + All: true, + }) + return p.runtimeError(err, "OCI runtime killall failed") +} + +// Stdin of the process +func (p *Init) Stdin() io.Closer { + return p.stdin +} + +// Runtime returns the OCI runtime configured for the init process +func (p *Init) Runtime() *runc.Runc { + return p.runtime +} + +// exec returns a new exec'd process +func (p *Init) exec(context context.Context, path string, r *ExecConfig) (Process, error) { + // process exec request + var spec specs.Process + if err := json.Unmarshal(r.Spec.Value, &spec); err != nil { + return nil, err + } + spec.Terminal = r.Terminal + + e := &execProcess{ + id: r.ID, + path: path, + parent: p, + spec: spec, + stdio: Stdio{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }, + waitBlock: make(chan struct{}), + } + e.State = &execCreatedState{p: e} + return e, nil +} + +func (p *Init) checkpoint(context context.Context, r *CheckpointConfig) error { + var options runctypes.CheckpointOptions + if r.Options != nil { + v, err := typeurl.UnmarshalAny(r.Options) + if err != nil { + return err + } + options = *v.(*runctypes.CheckpointOptions) + } + var actions []runc.CheckpointAction + if !options.Exit { + actions = append(actions, runc.LeaveRunning) + } + work := filepath.Join(p.workDir, "criu-work") + defer os.RemoveAll(work) + if err := p.runtime.Checkpoint(context, p.id, &runc.CheckpointOpts{ + WorkDir: work, + ImagePath: r.Path, + AllowOpenTCP: options.OpenTcp, + AllowExternalUnixSockets: options.ExternalUnixSockets, + AllowTerminal: options.Terminal, + FileLocks: options.FileLocks, + EmptyNamespaces: options.EmptyNamespaces, + }, actions...); err != nil { + dumpLog := filepath.Join(p.bundle, "criu-dump.log") + if cerr := copyFile(dumpLog, filepath.Join(work, "dump.log")); cerr != nil { + log.G(context).Error(err) + } + return fmt.Errorf("%s path= %s", criuError(err), dumpLog) + } + return nil +} + +func (p *Init) update(context context.Context, r *google_protobuf.Any) error { + var resources specs.LinuxResources + if err := json.Unmarshal(r.Value, &resources); err != nil { + return err + } + return p.runtime.Update(context, p.id, &resources) +} + +// Stdio of the process +func (p *Init) Stdio() Stdio { + return p.stdio +} + +func (p *Init) runtimeError(rErr error, msg string) error { + if rErr == nil { + return nil + } + + rMsg, err := getLastRuntimeError(p.runtime) + switch { + case err != nil: + return errors.Wrapf(rErr, "%s: %s (%s)", msg, "unable to retrieve OCI runtime error", err.Error()) + case rMsg == "": + return errors.Wrap(rErr, msg) + default: + return errors.Errorf("%s: %s", msg, rMsg) + } +} diff --git a/vendor/github.com/containerd/containerd/linux/proc/init_state.go b/vendor/github.com/containerd/containerd/linux/proc/init_state.go new file mode 100644 index 000000000..8944d6192 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/proc/init_state.go @@ -0,0 +1,522 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proc + +import ( + "context" + "sync" + "syscall" + + "github.com/containerd/console" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/fifo" + runc "github.com/containerd/go-runc" + google_protobuf "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +type initState interface { + State + + Pause(context.Context) error + Resume(context.Context) error + Update(context.Context, *google_protobuf.Any) error + Checkpoint(context.Context, *CheckpointConfig) error + Exec(context.Context, string, *ExecConfig) (Process, error) +} + +type createdState struct { + p *Init +} + +func (s *createdState) transition(name string) error { + switch name { + case "running": + s.p.initState = &runningState{p: s.p} + case "stopped": + s.p.initState = &stoppedState{p: s.p} + case "deleted": + s.p.initState = &deletedState{} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *createdState) Pause(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot pause task in created state") +} + +func (s *createdState) Resume(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot resume task in created state") +} + +func (s *createdState) Update(context context.Context, r *google_protobuf.Any) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.update(context, r) +} + +func (s *createdState) Checkpoint(context context.Context, r *CheckpointConfig) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot checkpoint a task in created state") +} + +func (s *createdState) Resize(ws console.WinSize) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.resize(ws) +} + +func (s *createdState) Start(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + if err := s.p.start(ctx); err != nil { + return err + } + return s.transition("running") +} + +func (s *createdState) Delete(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + if err := s.p.delete(ctx); err != nil { + return err + } + return s.transition("deleted") +} + +func (s *createdState) Kill(ctx context.Context, sig uint32, all bool) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.kill(ctx, sig, all) +} + +func (s *createdState) SetExited(status int) { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + s.p.setExited(status) + + if err := s.transition("stopped"); err != nil { + panic(err) + } +} + +func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { + s.p.mu.Lock() + defer s.p.mu.Unlock() + return s.p.exec(ctx, path, r) +} + +type createdCheckpointState struct { + p *Init + opts *runc.RestoreOpts +} + +func (s *createdCheckpointState) transition(name string) error { + switch name { + case "running": + s.p.initState = &runningState{p: s.p} + case "stopped": + s.p.initState = &stoppedState{p: s.p} + case "deleted": + s.p.initState = &deletedState{} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *createdCheckpointState) Pause(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot pause task in created state") +} + +func (s *createdCheckpointState) Resume(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot resume task in created state") +} + +func (s *createdCheckpointState) Update(context context.Context, r *google_protobuf.Any) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.update(context, r) +} + +func (s *createdCheckpointState) Checkpoint(context context.Context, r *CheckpointConfig) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot checkpoint a task in created state") +} + +func (s *createdCheckpointState) Resize(ws console.WinSize) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.resize(ws) +} + +func (s *createdCheckpointState) Start(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + p := s.p + if _, err := s.p.runtime.Restore(ctx, p.id, p.bundle, s.opts); err != nil { + return p.runtimeError(err, "OCI runtime restore failed") + } + sio := p.stdio + if sio.Stdin != "" { + sc, err := fifo.OpenFifo(ctx, sio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return errors.Wrapf(err, "failed to open stdin fifo %s", sio.Stdin) + } + p.stdin = sc + p.closers = append(p.closers, sc) + } + var copyWaitGroup sync.WaitGroup + if !sio.IsNull() { + if err := copyPipes(ctx, p.io, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg, ©WaitGroup); err != nil { + return errors.Wrap(err, "failed to start io pipe copy") + } + } + + copyWaitGroup.Wait() + pid, err := runc.ReadPidFile(s.opts.PidFile) + if err != nil { + return errors.Wrap(err, "failed to retrieve OCI runtime container pid") + } + p.pid = pid + + return s.transition("running") +} + +func (s *createdCheckpointState) Delete(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + if err := s.p.delete(ctx); err != nil { + return err + } + return s.transition("deleted") +} + +func (s *createdCheckpointState) Kill(ctx context.Context, sig uint32, all bool) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.kill(ctx, sig, all) +} + +func (s *createdCheckpointState) SetExited(status int) { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + s.p.setExited(status) + + if err := s.transition("stopped"); err != nil { + panic(err) + } +} + +func (s *createdCheckpointState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return nil, errors.Errorf("cannot exec in a created state") +} + +type runningState struct { + p *Init +} + +func (s *runningState) transition(name string) error { + switch name { + case "stopped": + s.p.initState = &stoppedState{p: s.p} + case "paused": + s.p.initState = &pausedState{p: s.p} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *runningState) Pause(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + if err := s.p.pause(ctx); err != nil { + return err + } + return s.transition("paused") +} + +func (s *runningState) Resume(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot resume a running process") +} + +func (s *runningState) Update(context context.Context, r *google_protobuf.Any) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.update(context, r) +} + +func (s *runningState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.checkpoint(ctx, r) +} + +func (s *runningState) Resize(ws console.WinSize) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.resize(ws) +} + +func (s *runningState) Start(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot start a running process") +} + +func (s *runningState) Delete(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot delete a running process") +} + +func (s *runningState) Kill(ctx context.Context, sig uint32, all bool) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.kill(ctx, sig, all) +} + +func (s *runningState) SetExited(status int) { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + s.p.setExited(status) + + if err := s.transition("stopped"); err != nil { + panic(err) + } +} + +func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { + s.p.mu.Lock() + defer s.p.mu.Unlock() + return s.p.exec(ctx, path, r) +} + +type pausedState struct { + p *Init +} + +func (s *pausedState) transition(name string) error { + switch name { + case "running": + s.p.initState = &runningState{p: s.p} + case "stopped": + s.p.initState = &stoppedState{p: s.p} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *pausedState) Pause(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot pause a paused container") +} + +func (s *pausedState) Resume(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + if err := s.p.resume(ctx); err != nil { + return err + } + return s.transition("running") +} + +func (s *pausedState) Update(context context.Context, r *google_protobuf.Any) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.update(context, r) +} + +func (s *pausedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.checkpoint(ctx, r) +} + +func (s *pausedState) Resize(ws console.WinSize) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.resize(ws) +} + +func (s *pausedState) Start(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot start a paused process") +} + +func (s *pausedState) Delete(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot delete a paused process") +} + +func (s *pausedState) Kill(ctx context.Context, sig uint32, all bool) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return s.p.kill(ctx, sig, all) +} + +func (s *pausedState) SetExited(status int) { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + s.p.setExited(status) + + if err := s.transition("stopped"); err != nil { + panic(err) + } +} + +func (s *pausedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return nil, errors.Errorf("cannot exec in a paused state") +} + +type stoppedState struct { + p *Init +} + +func (s *stoppedState) transition(name string) error { + switch name { + case "deleted": + s.p.initState = &deletedState{} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *stoppedState) Pause(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot pause a stopped container") +} + +func (s *stoppedState) Resume(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot resume a stopped container") +} + +func (s *stoppedState) Update(context context.Context, r *google_protobuf.Any) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot update a stopped container") +} + +func (s *stoppedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot checkpoint a stopped container") +} + +func (s *stoppedState) Resize(ws console.WinSize) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot resize a stopped container") +} + +func (s *stoppedState) Start(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return errors.Errorf("cannot start a stopped process") +} + +func (s *stoppedState) Delete(ctx context.Context) error { + s.p.mu.Lock() + defer s.p.mu.Unlock() + if err := s.p.delete(ctx); err != nil { + return err + } + return s.transition("deleted") +} + +func (s *stoppedState) Kill(ctx context.Context, sig uint32, all bool) error { + return errdefs.ToGRPCf(errdefs.ErrNotFound, "process %s not found", s.p.id) +} + +func (s *stoppedState) SetExited(status int) { + // no op +} + +func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (Process, error) { + s.p.mu.Lock() + defer s.p.mu.Unlock() + + return nil, errors.Errorf("cannot exec in a stopped state") +} diff --git a/vendor/github.com/containerd/containerd/linux/proc/io.go b/vendor/github.com/containerd/containerd/linux/proc/io.go new file mode 100644 index 000000000..335b8b89c --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/proc/io.go @@ -0,0 +1,98 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proc + +import ( + "context" + "fmt" + "io" + "sync" + "syscall" + + "github.com/containerd/fifo" + runc "github.com/containerd/go-runc" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32<<10) + return &buffer + }, +} + +func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error { + for name, dest := range map[string]func(wc io.WriteCloser, rc io.Closer){ + stdout: func(wc io.WriteCloser, rc io.Closer) { + wg.Add(1) + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + io.CopyBuffer(wc, rio.Stdout(), *p) + wg.Done() + wc.Close() + rc.Close() + }() + }, + stderr: func(wc io.WriteCloser, rc io.Closer) { + wg.Add(1) + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(wc, rio.Stderr(), *p) + wg.Done() + wc.Close() + rc.Close() + }() + }, + } { + fw, err := fifo.OpenFifo(ctx, name, syscall.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("containerd-shim: opening %s failed: %s", name, err) + } + fr, err := fifo.OpenFifo(ctx, name, syscall.O_RDONLY, 0) + if err != nil { + return fmt.Errorf("containerd-shim: opening %s failed: %s", name, err) + } + dest(fw, fr) + } + if stdin == "" { + rio.Stdin().Close() + return nil + } + f, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0) + if err != nil { + return fmt.Errorf("containerd-shim: opening %s failed: %s", stdin, err) + } + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(rio.Stdin(), f, *p) + rio.Stdin().Close() + f.Close() + }() + return nil +} diff --git a/vendor/github.com/containerd/containerd/linux/proc/process.go b/vendor/github.com/containerd/containerd/linux/proc/process.go new file mode 100644 index 000000000..135f9962b --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/proc/process.go @@ -0,0 +1,105 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proc + +import ( + "context" + "io" + "sync" + "time" + + "github.com/containerd/console" + "github.com/pkg/errors" +) + +// RuncRoot is the path to the root runc state directory +const RuncRoot = "/run/containerd/runc" + +// Stdio of a process +type Stdio struct { + Stdin string + Stdout string + Stderr string + Terminal bool +} + +// IsNull returns true if the stdio is not defined +func (s Stdio) IsNull() bool { + return s.Stdin == "" && s.Stdout == "" && s.Stderr == "" +} + +// Process on a linux system +type Process interface { + State + // ID returns the id for the process + ID() string + // Pid returns the pid for the process + Pid() int + // ExitStatus returns the exit status + ExitStatus() int + // ExitedAt is the time the process exited + ExitedAt() time.Time + // Stdin returns the process STDIN + Stdin() io.Closer + // Stdio returns io information for the container + Stdio() Stdio + // Status returns the process status + Status(context.Context) (string, error) + // Wait blocks until the process has exited + Wait() +} + +// State of a process +type State interface { + // Resize resizes the process console + Resize(ws console.WinSize) error + // Start execution of the process + Start(context.Context) error + // Delete deletes the process and its resourcess + Delete(context.Context) error + // Kill kills the process + Kill(context.Context, uint32, bool) error + // SetExited sets the exit status for the process + SetExited(status int) +} + +func stateName(v interface{}) string { + switch v.(type) { + case *runningState, *execRunningState: + return "running" + case *createdState, *execCreatedState, *createdCheckpointState: + return "created" + case *pausedState: + return "paused" + case *deletedState: + return "deleted" + case *stoppedState: + return "stopped" + } + panic(errors.Errorf("invalid state %v", v)) +} + +// Platform handles platform-specific behavior that may differs across +// platform implementations +type Platform interface { + CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, + wg, cwg *sync.WaitGroup) (console.Console, error) + ShutdownConsole(ctx context.Context, console console.Console) error + Close() error +} diff --git a/vendor/github.com/containerd/containerd/linux/proc/types.go b/vendor/github.com/containerd/containerd/linux/proc/types.go new file mode 100644 index 000000000..0cc123fd8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/proc/types.go @@ -0,0 +1,60 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proc + +import ( + google_protobuf "github.com/gogo/protobuf/types" +) + +// Mount holds filesystem mount configuration +type Mount struct { + Type string + Source string + Target string + Options []string +} + +// CreateConfig hold task creation configuration +type CreateConfig struct { + ID string + Bundle string + Runtime string + Rootfs []Mount + Terminal bool + Stdin string + Stdout string + Stderr string + Checkpoint string + ParentCheckpoint string + Options *google_protobuf.Any +} + +// ExecConfig holds exec creation configuration +type ExecConfig struct { + ID string + Terminal bool + Stdin string + Stdout string + Stderr string + Spec *google_protobuf.Any +} + +// CheckpointConfig holds task checkpoint configuration +type CheckpointConfig struct { + Path string + Options *google_protobuf.Any +} diff --git a/vendor/github.com/containerd/containerd/linux/proc/utils.go b/vendor/github.com/containerd/containerd/linux/proc/utils.go new file mode 100644 index 000000000..3d0334c45 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/proc/utils.go @@ -0,0 +1,104 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proc + +import ( + "encoding/json" + "io" + "os" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + runc "github.com/containerd/go-runc" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// TODO(mlaventure): move to runc package? +func getLastRuntimeError(r *runc.Runc) (string, error) { + if r.Log == "" { + return "", nil + } + + f, err := os.OpenFile(r.Log, os.O_RDONLY, 0400) + if err != nil { + return "", err + } + + var ( + errMsg string + log struct { + Level string + Msg string + Time time.Time + } + ) + + dec := json.NewDecoder(f) + for err = nil; err == nil; { + if err = dec.Decode(&log); err != nil && err != io.EOF { + return "", err + } + if log.Level == "error" { + errMsg = strings.TrimSpace(log.Msg) + } + } + + return errMsg, nil +} + +// criuError returns only the first line of the error message from criu +// it tries to add an invalid dump log location when returning the message +func criuError(err error) string { + parts := strings.Split(err.Error(), "\n") + return parts[0] +} + +func copyFile(to, from string) error { + ff, err := os.Open(from) + if err != nil { + return err + } + defer ff.Close() + tt, err := os.Create(to) + if err != nil { + return err + } + defer tt.Close() + + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + _, err = io.CopyBuffer(tt, ff, *p) + return err +} + +func checkKillError(err error) error { + if err == nil { + return nil + } + if strings.Contains(err.Error(), "os: process already finished") || err == unix.ESRCH { + return errors.Wrapf(errdefs.ErrNotFound, "process already finished") + } + return errors.Wrapf(err, "unknown error after kill") +} + +func hasNoIO(r *CreateConfig) bool { + return r.Stdin == "" && r.Stdout == "" && r.Stderr == "" +} diff --git a/vendor/github.com/containerd/containerd/linux/process.go b/vendor/github.com/containerd/containerd/linux/process.go new file mode 100644 index 000000000..0790d8a52 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/process.go @@ -0,0 +1,151 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package linux + +import ( + "context" + + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/api/types/task" + "github.com/containerd/containerd/errdefs" + shim "github.com/containerd/containerd/linux/shim/v1" + "github.com/containerd/containerd/runtime" + "github.com/pkg/errors" + "github.com/stevvooe/ttrpc" +) + +// Process implements a linux process +type Process struct { + id string + t *Task +} + +// ID of the process +func (p *Process) ID() string { + return p.id +} + +// Kill sends the provided signal to the underlying process +// +// Unable to kill all processes in the task using this method on a process +func (p *Process) Kill(ctx context.Context, signal uint32, _ bool) error { + _, err := p.t.shim.Kill(ctx, &shim.KillRequest{ + Signal: signal, + ID: p.id, + }) + if err != nil { + return errdefs.FromGRPC(err) + } + return err +} + +// State of process +func (p *Process) State(ctx context.Context) (runtime.State, error) { + // use the container status for the status of the process + response, err := p.t.shim.State(ctx, &shim.StateRequest{ + ID: p.id, + }) + if err != nil { + if errors.Cause(err) != ttrpc.ErrClosed { + return runtime.State{}, errdefs.FromGRPC(err) + } + + // We treat ttrpc.ErrClosed as the shim being closed, but really this + // likely means that the process no longer exists. We'll have to plumb + // the connection differently if this causes problems. + return runtime.State{}, errdefs.ErrNotFound + } + var status runtime.Status + switch response.Status { + case task.StatusCreated: + status = runtime.CreatedStatus + case task.StatusRunning: + status = runtime.RunningStatus + case task.StatusStopped: + status = runtime.StoppedStatus + case task.StatusPaused: + status = runtime.PausedStatus + case task.StatusPausing: + status = runtime.PausingStatus + } + return runtime.State{ + Pid: response.Pid, + Status: status, + Stdin: response.Stdin, + Stdout: response.Stdout, + Stderr: response.Stderr, + Terminal: response.Terminal, + ExitStatus: response.ExitStatus, + }, nil +} + +// ResizePty changes the side of the process's PTY to the provided width and height +func (p *Process) ResizePty(ctx context.Context, size runtime.ConsoleSize) error { + _, err := p.t.shim.ResizePty(ctx, &shim.ResizePtyRequest{ + ID: p.id, + Width: size.Width, + Height: size.Height, + }) + if err != nil { + err = errdefs.FromGRPC(err) + } + return err +} + +// CloseIO closes the provided IO pipe for the process +func (p *Process) CloseIO(ctx context.Context) error { + _, err := p.t.shim.CloseIO(ctx, &shim.CloseIORequest{ + ID: p.id, + Stdin: true, + }) + if err != nil { + return errdefs.FromGRPC(err) + } + return nil +} + +// Start the process +func (p *Process) Start(ctx context.Context) error { + r, err := p.t.shim.Start(ctx, &shim.StartRequest{ + ID: p.id, + }) + if err != nil { + return errdefs.FromGRPC(err) + } + p.t.events.Publish(ctx, runtime.TaskExecStartedEventTopic, &eventstypes.TaskExecStarted{ + ContainerID: p.t.id, + Pid: r.Pid, + ExecID: p.id, + }) + return nil +} + +// Wait on the process to exit and return the exit status and timestamp +func (p *Process) Wait(ctx context.Context) (*runtime.Exit, error) { + r, err := p.t.shim.Wait(ctx, &shim.WaitRequest{ + ID: p.id, + }) + if err != nil { + return nil, err + } + return &runtime.Exit{ + Timestamp: r.ExitedAt, + Status: r.ExitStatus, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/linux/runtime.go b/vendor/github.com/containerd/containerd/linux/runtime.go new file mode 100644 index 000000000..28941b579 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/runtime.go @@ -0,0 +1,545 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package linux + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/boltdb/bolt" + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events/exchange" + "github.com/containerd/containerd/identifiers" + "github.com/containerd/containerd/linux/proc" + "github.com/containerd/containerd/linux/runctypes" + shim "github.com/containerd/containerd/linux/shim/v1" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/runtime" + runc "github.com/containerd/go-runc" + "github.com/containerd/typeurl" + ptypes "github.com/gogo/protobuf/types" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +var ( + pluginID = fmt.Sprintf("%s.%s", plugin.RuntimePlugin, "linux") + empty = &ptypes.Empty{} +) + +const ( + configFilename = "config.json" + defaultRuntime = "runc" + defaultShim = "containerd-shim" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.RuntimePlugin, + ID: "linux", + InitFn: New, + Requires: []plugin.Type{ + plugin.TaskMonitorPlugin, + plugin.MetadataPlugin, + }, + Config: &Config{ + Shim: defaultShim, + Runtime: defaultRuntime, + }, + }) +} + +var _ = (runtime.Runtime)(&Runtime{}) + +// Config options for the runtime +type Config struct { + // Shim is a path or name of binary implementing the Shim GRPC API + Shim string `toml:"shim"` + // Runtime is a path or name of an OCI runtime used by the shim + Runtime string `toml:"runtime"` + // RuntimeRoot is the path that shall be used by the OCI runtime for its data + RuntimeRoot string `toml:"runtime_root"` + // NoShim calls runc directly from within the pkg + NoShim bool `toml:"no_shim"` + // Debug enable debug on the shim + ShimDebug bool `toml:"shim_debug"` +} + +// New returns a configured runtime +func New(ic *plugin.InitContext) (interface{}, error) { + ic.Meta.Platforms = []ocispec.Platform{platforms.DefaultSpec()} + + if err := os.MkdirAll(ic.Root, 0711); err != nil { + return nil, err + } + if err := os.MkdirAll(ic.State, 0711); err != nil { + return nil, err + } + monitor, err := ic.Get(plugin.TaskMonitorPlugin) + if err != nil { + return nil, err + } + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + cfg := ic.Config.(*Config) + r := &Runtime{ + root: ic.Root, + state: ic.State, + monitor: monitor.(runtime.TaskMonitor), + tasks: runtime.NewTaskList(), + db: m.(*metadata.DB), + address: ic.Address, + events: ic.Events, + config: cfg, + } + tasks, err := r.restoreTasks(ic.Context) + if err != nil { + return nil, err + } + + // TODO: need to add the tasks to the monitor + for _, t := range tasks { + if err := r.tasks.AddWithNamespace(t.namespace, t); err != nil { + return nil, err + } + } + return r, nil +} + +// Runtime for a linux based system +type Runtime struct { + root string + state string + address string + + monitor runtime.TaskMonitor + tasks *runtime.TaskList + db *metadata.DB + events *exchange.Exchange + + config *Config +} + +// ID of the runtime +func (r *Runtime) ID() string { + return pluginID +} + +// Create a new task +func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts) (_ runtime.Task, err error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + if err := identifiers.Validate(id); err != nil { + return nil, errors.Wrapf(err, "invalid task id") + } + + ropts, err := r.getRuncOptions(ctx, id) + if err != nil { + return nil, err + } + + bundle, err := newBundle(id, + filepath.Join(r.state, namespace), + filepath.Join(r.root, namespace), + opts.Spec.Value) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + bundle.Delete() + } + }() + + shimopt := ShimLocal(r.events) + if !r.config.NoShim { + var cgroup string + if opts.Options != nil { + v, err := typeurl.UnmarshalAny(opts.Options) + if err != nil { + return nil, err + } + cgroup = v.(*runctypes.CreateOptions).ShimCgroup + } + exitHandler := func() { + log.G(ctx).WithField("id", id).Info("shim reaped") + t, err := r.tasks.Get(ctx, id) + if err != nil { + // Task was never started or was already sucessfully deleted + return + } + lc := t.(*Task) + + // Stop the monitor + if err := r.monitor.Stop(lc); err != nil { + log.G(ctx).WithError(err).WithFields(logrus.Fields{ + "id": id, + "namespace": namespace, + }).Warn("failed to stop monitor") + } + + log.G(ctx).WithFields(logrus.Fields{ + "id": id, + "namespace": namespace, + }).Warn("cleaning up after killed shim") + if err = r.cleanupAfterDeadShim(context.Background(), bundle, namespace, id, lc.pid); err != nil { + log.G(ctx).WithError(err).WithFields(logrus.Fields{ + "id": id, + "namespace": namespace, + }).Warn("failed to clen up after killed shim") + } + } + shimopt = ShimRemote(r.config.Shim, r.address, cgroup, r.config.ShimDebug, exitHandler) + } + + s, err := bundle.NewShimClient(ctx, namespace, shimopt, ropts) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + if kerr := s.KillShim(ctx); kerr != nil { + log.G(ctx).WithError(err).Error("failed to kill shim") + } + } + }() + + rt := r.config.Runtime + if ropts != nil && ropts.Runtime != "" { + rt = ropts.Runtime + } + sopts := &shim.CreateTaskRequest{ + ID: id, + Bundle: bundle.path, + Runtime: rt, + Stdin: opts.IO.Stdin, + Stdout: opts.IO.Stdout, + Stderr: opts.IO.Stderr, + Terminal: opts.IO.Terminal, + Checkpoint: opts.Checkpoint, + Options: opts.Options, + } + for _, m := range opts.Rootfs { + sopts.Rootfs = append(sopts.Rootfs, &types.Mount{ + Type: m.Type, + Source: m.Source, + Options: m.Options, + }) + } + cr, err := s.Create(ctx, sopts) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + t, err := newTask(id, namespace, int(cr.Pid), s, r.monitor, r.events, + proc.NewRunc(ropts.RuntimeRoot, sopts.Bundle, namespace, rt, ropts.CriuPath, ropts.SystemdCgroup)) + if err != nil { + return nil, err + } + if err := r.tasks.Add(ctx, t); err != nil { + return nil, err + } + // after the task is created, add it to the monitor if it has a cgroup + // this can be different on a checkpoint/restore + if t.cg != nil { + if err = r.monitor.Monitor(t); err != nil { + if _, err := r.Delete(ctx, t); err != nil { + log.G(ctx).WithError(err).Error("deleting task after failed monitor") + } + return nil, err + } + } + r.events.Publish(ctx, runtime.TaskCreateEventTopic, &eventstypes.TaskCreate{ + ContainerID: sopts.ID, + Bundle: sopts.Bundle, + Rootfs: sopts.Rootfs, + IO: &eventstypes.TaskIO{ + Stdin: sopts.Stdin, + Stdout: sopts.Stdout, + Stderr: sopts.Stderr, + Terminal: sopts.Terminal, + }, + Checkpoint: sopts.Checkpoint, + Pid: uint32(t.pid), + }) + + return t, nil +} + +// Delete a task removing all on disk state +func (r *Runtime) Delete(ctx context.Context, c runtime.Task) (*runtime.Exit, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + lc, ok := c.(*Task) + if !ok { + return nil, fmt.Errorf("task cannot be cast as *linux.Task") + } + if err := r.monitor.Stop(lc); err != nil { + return nil, err + } + bundle := loadBundle( + lc.id, + filepath.Join(r.state, namespace, lc.id), + filepath.Join(r.root, namespace, lc.id), + ) + + rsp, err := lc.shim.Delete(ctx, empty) + if err != nil { + if cerr := r.cleanupAfterDeadShim(ctx, bundle, namespace, c.ID(), lc.pid); cerr != nil { + log.G(ctx).WithError(err).Error("unable to cleanup task") + } + return nil, errdefs.FromGRPC(err) + } + r.tasks.Delete(ctx, lc.id) + if err := lc.shim.KillShim(ctx); err != nil { + log.G(ctx).WithError(err).Error("failed to kill shim") + } + + if err := bundle.Delete(); err != nil { + log.G(ctx).WithError(err).Error("failed to delete bundle") + } + r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{ + ContainerID: lc.id, + ExitStatus: rsp.ExitStatus, + ExitedAt: rsp.ExitedAt, + Pid: rsp.Pid, + }) + return &runtime.Exit{ + Status: rsp.ExitStatus, + Timestamp: rsp.ExitedAt, + Pid: rsp.Pid, + }, nil +} + +// Tasks returns all tasks known to the runtime +func (r *Runtime) Tasks(ctx context.Context) ([]runtime.Task, error) { + return r.tasks.GetAll(ctx) +} + +func (r *Runtime) restoreTasks(ctx context.Context) ([]*Task, error) { + dir, err := ioutil.ReadDir(r.state) + if err != nil { + return nil, err + } + var o []*Task + for _, namespace := range dir { + if !namespace.IsDir() { + continue + } + name := namespace.Name() + log.G(ctx).WithField("namespace", name).Debug("loading tasks in namespace") + tasks, err := r.loadTasks(ctx, name) + if err != nil { + return nil, err + } + o = append(o, tasks...) + } + return o, nil +} + +// Get a specific task by task id +func (r *Runtime) Get(ctx context.Context, id string) (runtime.Task, error) { + return r.tasks.Get(ctx, id) +} + +func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { + dir, err := ioutil.ReadDir(filepath.Join(r.state, ns)) + if err != nil { + return nil, err + } + var o []*Task + for _, path := range dir { + if !path.IsDir() { + continue + } + id := path.Name() + bundle := loadBundle( + id, + filepath.Join(r.state, ns, id), + filepath.Join(r.root, ns, id), + ) + ctx = namespaces.WithNamespace(ctx, ns) + pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile)) + s, err := bundle.NewShimClient(ctx, ns, ShimConnect(func() { + log.G(ctx).WithError(err).WithFields(logrus.Fields{ + "id": id, + "namespace": ns, + }).Error("connecting to shim") + err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid) + if err != nil { + log.G(ctx).WithError(err).WithField("bundle", bundle.path). + Error("cleaning up after dead shim") + } + }), nil) + if err != nil { + log.G(ctx).WithError(err).WithFields(logrus.Fields{ + "id": id, + "namespace": ns, + }).Error("connecting to shim") + err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid) + if err != nil { + log.G(ctx).WithError(err).WithField("bundle", bundle.path). + Error("cleaning up after dead shim") + } + continue + } + ropts, err := r.getRuncOptions(ctx, id) + if err != nil { + log.G(ctx).WithError(err).WithField("id", id). + Error("get runtime options") + continue + } + + t, err := newTask(id, ns, pid, s, r.monitor, r.events, + proc.NewRunc(ropts.RuntimeRoot, bundle.path, ns, ropts.Runtime, ropts.CriuPath, ropts.SystemdCgroup)) + if err != nil { + log.G(ctx).WithError(err).Error("loading task type") + continue + } + o = append(o, t) + } + return o, nil +} + +func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, id string, pid int) error { + ctx = namespaces.WithNamespace(ctx, ns) + if err := r.terminate(ctx, bundle, ns, id); err != nil { + if r.config.ShimDebug { + return errors.Wrap(err, "failed to terminate task, leaving bundle for debugging") + } + log.G(ctx).WithError(err).Warn("failed to terminate task") + } + + // Notify Client + exitedAt := time.Now().UTC() + r.events.Publish(ctx, runtime.TaskExitEventTopic, &eventstypes.TaskExit{ + ContainerID: id, + ID: id, + Pid: uint32(pid), + ExitStatus: 128 + uint32(unix.SIGKILL), + ExitedAt: exitedAt, + }) + + r.tasks.Delete(ctx, id) + if err := bundle.Delete(); err != nil { + log.G(ctx).WithError(err).Error("delete bundle") + } + + r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{ + ContainerID: id, + Pid: uint32(pid), + ExitStatus: 128 + uint32(unix.SIGKILL), + ExitedAt: exitedAt, + }) + + return nil +} + +func (r *Runtime) terminate(ctx context.Context, bundle *bundle, ns, id string) error { + rt, err := r.getRuntime(ctx, ns, id) + if err != nil { + return err + } + if err := rt.Delete(ctx, id, &runc.DeleteOpts{ + Force: true, + }); err != nil { + log.G(ctx).WithError(err).Warnf("delete runtime state %s", id) + } + if err := mount.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil { + log.G(ctx).WithError(err).WithFields(logrus.Fields{ + "path": bundle.path, + "id": id, + }).Warnf("unmount task rootfs") + } + return nil +} + +func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, error) { + ropts, err := r.getRuncOptions(ctx, id) + if err != nil { + return nil, err + } + + var ( + cmd = r.config.Runtime + root = proc.RuncRoot + ) + if ropts != nil { + if ropts.Runtime != "" { + cmd = ropts.Runtime + } + if ropts.RuntimeRoot != "" { + root = ropts.RuntimeRoot + } + } + + return &runc.Runc{ + Command: cmd, + LogFormat: runc.JSON, + PdeathSignal: unix.SIGKILL, + Root: filepath.Join(root, ns), + }, nil +} + +func (r *Runtime) getRuncOptions(ctx context.Context, id string) (*runctypes.RuncOptions, error) { + var container containers.Container + + if err := r.db.View(func(tx *bolt.Tx) error { + store := metadata.NewContainerStore(tx) + var err error + container, err = store.Get(ctx, id) + return err + }); err != nil { + return nil, err + } + + if container.Runtime.Options != nil { + v, err := typeurl.UnmarshalAny(container.Runtime.Options) + if err != nil { + return nil, err + } + ropts, ok := v.(*runctypes.RuncOptions) + if !ok { + return nil, errors.New("invalid runtime options format") + } + + return ropts, nil + } + return &runctypes.RuncOptions{}, nil +} diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client.go b/vendor/github.com/containerd/containerd/linux/shim/client/client.go new file mode 100644 index 000000000..37881a36f --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/shim/client/client.go @@ -0,0 +1,278 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package client + +import ( + "context" + "io" + "net" + "os" + "os/exec" + "strings" + "sync" + "syscall" + "time" + + "golang.org/x/sys/unix" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stevvooe/ttrpc" + + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/linux/shim" + shimapi "github.com/containerd/containerd/linux/shim/v1" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/sys" + ptypes "github.com/gogo/protobuf/types" +) + +var empty = &ptypes.Empty{} + +// Opt is an option for a shim client configuration +type Opt func(context.Context, shim.Config) (shimapi.ShimService, io.Closer, error) + +// WithStart executes a new shim process +func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHandler func()) Opt { + return func(ctx context.Context, config shim.Config) (_ shimapi.ShimService, _ io.Closer, err error) { + socket, err := newSocket(address) + if err != nil { + return nil, nil, err + } + defer socket.Close() + f, err := socket.File() + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to get fd for socket %s", address) + } + defer f.Close() + + cmd, err := newCommand(binary, daemonAddress, debug, config, f) + if err != nil { + return nil, nil, err + } + if err := cmd.Start(); err != nil { + return nil, nil, errors.Wrapf(err, "failed to start shim") + } + defer func() { + if err != nil { + cmd.Process.Kill() + } + }() + go func() { + cmd.Wait() + exitHandler() + }() + log.G(ctx).WithFields(logrus.Fields{ + "pid": cmd.Process.Pid, + "address": address, + "debug": debug, + }).Infof("shim %s started", binary) + // set shim in cgroup if it is provided + if cgroup != "" { + if err := setCgroup(cgroup, cmd); err != nil { + return nil, nil, err + } + log.G(ctx).WithFields(logrus.Fields{ + "pid": cmd.Process.Pid, + "address": address, + }).Infof("shim placed in cgroup %s", cgroup) + } + if err = sys.SetOOMScore(cmd.Process.Pid, sys.OOMScoreMaxKillable); err != nil { + return nil, nil, errors.Wrap(err, "failed to set OOM Score on shim") + } + c, clo, err := WithConnect(address, func() {})(ctx, config) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to connect") + } + return c, clo, nil + } +} + +func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File) (*exec.Cmd, error) { + selfExe, err := os.Executable() + if err != nil { + return nil, err + } + args := []string{ + "-namespace", config.Namespace, + "-workdir", config.WorkDir, + "-address", daemonAddress, + "-containerd-binary", selfExe, + } + + if config.Criu != "" { + args = append(args, "-criu-path", config.Criu) + } + if config.RuntimeRoot != "" { + args = append(args, "-runtime-root", config.RuntimeRoot) + } + if config.SystemdCgroup { + args = append(args, "-systemd-cgroup") + } + if debug { + args = append(args, "-debug") + } + + cmd := exec.Command(binary, args...) + cmd.Dir = config.Path + // make sure the shim can be re-parented to system init + // and is cloned in a new mount namespace because the overlay/filesystems + // will be mounted by the shim + cmd.SysProcAttr = getSysProcAttr() + cmd.ExtraFiles = append(cmd.ExtraFiles, socket) + if debug { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + } + return cmd, nil +} + +func newSocket(address string) (*net.UnixListener, error) { + if len(address) > 106 { + return nil, errors.Errorf("%q: unix socket path too long (limit 106)", address) + } + l, err := net.Listen("unix", "\x00"+address) + if err != nil { + return nil, errors.Wrapf(err, "failed to listen to abstract unix socket %q", address) + } + + return l.(*net.UnixListener), nil +} + +func connect(address string, d func(string, time.Duration) (net.Conn, error)) (net.Conn, error) { + return d(address, 100*time.Second) +} + +func annonDialer(address string, timeout time.Duration) (net.Conn, error) { + address = strings.TrimPrefix(address, "unix://") + return net.DialTimeout("unix", "\x00"+address, timeout) +} + +// WithConnect connects to an existing shim +func WithConnect(address string, onClose func()) Opt { + return func(ctx context.Context, config shim.Config) (shimapi.ShimService, io.Closer, error) { + conn, err := connect(address, annonDialer) + if err != nil { + return nil, nil, err + } + client := ttrpc.NewClient(conn) + client.OnClose(onClose) + return shimapi.NewShimClient(client), conn, nil + } +} + +// WithLocal uses an in process shim +func WithLocal(publisher events.Publisher) func(context.Context, shim.Config) (shimapi.ShimService, io.Closer, error) { + return func(ctx context.Context, config shim.Config) (shimapi.ShimService, io.Closer, error) { + service, err := shim.NewService(config, publisher) + if err != nil { + return nil, nil, err + } + return shim.NewLocal(service), nil, nil + } +} + +// New returns a new shim client +func New(ctx context.Context, config shim.Config, opt Opt) (*Client, error) { + s, c, err := opt(ctx, config) + if err != nil { + return nil, err + } + return &Client{ + ShimService: s, + c: c, + exitCh: make(chan struct{}), + }, nil +} + +// Client is a shim client containing the connection to a shim +type Client struct { + shimapi.ShimService + + c io.Closer + exitCh chan struct{} + exitOnce sync.Once +} + +// IsAlive returns true if the shim can be contacted. +// NOTE: a negative answer doesn't mean that the process is gone. +func (c *Client) IsAlive(ctx context.Context) (bool, error) { + _, err := c.ShimInfo(ctx, empty) + if err != nil { + // TODO(stevvooe): There are some error conditions that need to be + // handle with unix sockets existence to give the right answer here. + return false, err + } + return true, nil +} + +// StopShim signals the shim to exit and wait for the process to disappear +func (c *Client) StopShim(ctx context.Context) error { + return c.signalShim(ctx, unix.SIGTERM) +} + +// KillShim kills the shim forcefully and wait for the process to disappear +func (c *Client) KillShim(ctx context.Context) error { + return c.signalShim(ctx, unix.SIGKILL) +} + +// Close the cient connection +func (c *Client) Close() error { + if c.c == nil { + return nil + } + return c.c.Close() +} + +func (c *Client) signalShim(ctx context.Context, sig syscall.Signal) error { + info, err := c.ShimInfo(ctx, empty) + if err != nil { + return err + } + pid := int(info.ShimPid) + // make sure we don't kill ourselves if we are running a local shim + if os.Getpid() == pid { + return nil + } + if err := unix.Kill(pid, sig); err != nil && err != unix.ESRCH { + return err + } + // wait for shim to die after being signaled + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.waitForExit(pid): + return nil + } +} + +func (c *Client) waitForExit(pid int) <-chan struct{} { + c.exitOnce.Do(func() { + for { + // use kill(pid, 0) here because the shim could have been reparented + // and we are no longer able to waitpid(pid, ...) on the shim + if err := unix.Kill(pid, 0); err == unix.ESRCH { + close(c.exitCh) + return + } + time.Sleep(10 * time.Millisecond) + } + }) + return c.exitCh +} diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go b/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go new file mode 100644 index 000000000..2519380f5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go @@ -0,0 +1,46 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package client + +import ( + "os/exec" + "syscall" + + "github.com/containerd/cgroups" + "github.com/pkg/errors" +) + +func getSysProcAttr() *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + Setpgid: true, + } +} + +func setCgroup(cgroupPath string, cmd *exec.Cmd) error { + cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(cgroupPath)) + if err != nil { + return errors.Wrapf(err, "failed to load cgroup %s", cgroupPath) + } + if err := cg.Add(cgroups.Process{ + Pid: cmd.Process.Pid, + }); err != nil { + return errors.Wrapf(err, "failed to join cgroup %s", cgroupPath) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go b/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go new file mode 100644 index 000000000..8a5b22fb7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go @@ -0,0 +1,34 @@ +// +build !linux,!windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package client + +import ( + "os/exec" + "syscall" +) + +func getSysProcAttr() *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + Setpgid: true, + } +} + +func setCgroup(cgroupPath string, cmd *exec.Cmd) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/linux/shim/local.go b/vendor/github.com/containerd/containerd/linux/shim/local.go new file mode 100644 index 000000000..1600ef6f1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/shim/local.go @@ -0,0 +1,107 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "context" + "path/filepath" + + shimapi "github.com/containerd/containerd/linux/shim/v1" + "github.com/containerd/containerd/mount" + ptypes "github.com/gogo/protobuf/types" +) + +// NewLocal returns a shim client implementation for issue commands to a shim +func NewLocal(s *Service) shimapi.ShimService { + return &local{ + s: s, + } +} + +type local struct { + s *Service +} + +func (c *local) Create(ctx context.Context, in *shimapi.CreateTaskRequest) (*shimapi.CreateTaskResponse, error) { + return c.s.Create(ctx, in) +} + +func (c *local) Start(ctx context.Context, in *shimapi.StartRequest) (*shimapi.StartResponse, error) { + return c.s.Start(ctx, in) +} + +func (c *local) Delete(ctx context.Context, in *ptypes.Empty) (*shimapi.DeleteResponse, error) { + // make sure we unmount the containers rootfs for this local + if err := mount.Unmount(filepath.Join(c.s.config.Path, "rootfs"), 0); err != nil { + return nil, err + } + return c.s.Delete(ctx, in) +} + +func (c *local) DeleteProcess(ctx context.Context, in *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) { + return c.s.DeleteProcess(ctx, in) +} + +func (c *local) Exec(ctx context.Context, in *shimapi.ExecProcessRequest) (*ptypes.Empty, error) { + return c.s.Exec(ctx, in) +} + +func (c *local) ResizePty(ctx context.Context, in *shimapi.ResizePtyRequest) (*ptypes.Empty, error) { + return c.s.ResizePty(ctx, in) +} + +func (c *local) State(ctx context.Context, in *shimapi.StateRequest) (*shimapi.StateResponse, error) { + return c.s.State(ctx, in) +} + +func (c *local) Pause(ctx context.Context, in *ptypes.Empty) (*ptypes.Empty, error) { + return c.s.Pause(ctx, in) +} + +func (c *local) Resume(ctx context.Context, in *ptypes.Empty) (*ptypes.Empty, error) { + return c.s.Resume(ctx, in) +} + +func (c *local) Kill(ctx context.Context, in *shimapi.KillRequest) (*ptypes.Empty, error) { + return c.s.Kill(ctx, in) +} + +func (c *local) ListPids(ctx context.Context, in *shimapi.ListPidsRequest) (*shimapi.ListPidsResponse, error) { + return c.s.ListPids(ctx, in) +} + +func (c *local) CloseIO(ctx context.Context, in *shimapi.CloseIORequest) (*ptypes.Empty, error) { + return c.s.CloseIO(ctx, in) +} + +func (c *local) Checkpoint(ctx context.Context, in *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) { + return c.s.Checkpoint(ctx, in) +} + +func (c *local) ShimInfo(ctx context.Context, in *ptypes.Empty) (*shimapi.ShimInfoResponse, error) { + return c.s.ShimInfo(ctx, in) +} + +func (c *local) Update(ctx context.Context, in *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) { + return c.s.Update(ctx, in) +} + +func (c *local) Wait(ctx context.Context, in *shimapi.WaitRequest) (*shimapi.WaitResponse, error) { + return c.s.Wait(ctx, in) +} diff --git a/vendor/github.com/containerd/containerd/linux/shim/service.go b/vendor/github.com/containerd/containerd/linux/shim/service.go new file mode 100644 index 000000000..49d847e87 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/shim/service.go @@ -0,0 +1,547 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "context" + "fmt" + "os" + "sync" + + "github.com/containerd/console" + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/api/types/task" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/linux/proc" + "github.com/containerd/containerd/linux/runctypes" + shimapi "github.com/containerd/containerd/linux/shim/v1" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/reaper" + "github.com/containerd/containerd/runtime" + runc "github.com/containerd/go-runc" + "github.com/containerd/typeurl" + ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + empty = &ptypes.Empty{} + bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32<<10) + return &buffer + }, + } +) + +// Config contains shim specific configuration +type Config struct { + Path string + Namespace string + WorkDir string + Criu string + RuntimeRoot string + SystemdCgroup bool +} + +// NewService returns a new shim service that can be used via GRPC +func NewService(config Config, publisher events.Publisher) (*Service, error) { + if config.Namespace == "" { + return nil, fmt.Errorf("shim namespace cannot be empty") + } + ctx := namespaces.WithNamespace(context.Background(), config.Namespace) + ctx = log.WithLogger(ctx, logrus.WithFields(logrus.Fields{ + "namespace": config.Namespace, + "path": config.Path, + "pid": os.Getpid(), + })) + s := &Service{ + config: config, + context: ctx, + processes: make(map[string]proc.Process), + events: make(chan interface{}, 128), + ec: reaper.Default.Subscribe(), + } + go s.processExits() + if err := s.initPlatform(); err != nil { + return nil, errors.Wrap(err, "failed to initialized platform behavior") + } + go s.forward(publisher) + return s, nil +} + +// Service is the shim implementation of a remote shim over GRPC +type Service struct { + mu sync.Mutex + + config Config + context context.Context + processes map[string]proc.Process + events chan interface{} + platform proc.Platform + ec chan runc.Exit + + // Filled by Create() + id string + bundle string +} + +// Create a new initial process and container with the underlying OCI runtime +func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (*shimapi.CreateTaskResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + var mounts []proc.Mount + for _, m := range r.Rootfs { + mounts = append(mounts, proc.Mount{ + Type: m.Type, + Source: m.Source, + Target: m.Target, + Options: m.Options, + }) + } + process, err := proc.New( + ctx, + s.config.Path, + s.config.WorkDir, + s.config.RuntimeRoot, + s.config.Namespace, + s.config.Criu, + s.config.SystemdCgroup, + s.platform, + &proc.CreateConfig{ + ID: r.ID, + Bundle: r.Bundle, + Runtime: r.Runtime, + Rootfs: mounts, + Terminal: r.Terminal, + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Checkpoint: r.Checkpoint, + ParentCheckpoint: r.ParentCheckpoint, + Options: r.Options, + }, + ) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + // save the main task id and bundle to the shim for additional requests + s.id = r.ID + s.bundle = r.Bundle + pid := process.Pid() + s.processes[r.ID] = process + return &shimapi.CreateTaskResponse{ + Pid: uint32(pid), + }, nil +} + +// Start a process +func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi.StartResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[r.ID] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process %s", r.ID) + } + if err := p.Start(ctx); err != nil { + return nil, err + } + return &shimapi.StartResponse{ + ID: p.ID(), + Pid: uint32(p.Pid()), + }, nil +} + +// Delete the initial process and container +func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[s.id] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + if err := p.Delete(ctx); err != nil { + return nil, err + } + delete(s.processes, s.id) + s.platform.Close() + return &shimapi.DeleteResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + Pid: uint32(p.Pid()), + }, nil +} + +// DeleteProcess deletes an exec'd process +func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if r.ID == s.id { + return nil, status.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess") + } + p := s.processes[r.ID] + if p == nil { + return nil, errors.Wrapf(errdefs.ErrNotFound, "process %s", r.ID) + } + if err := p.Delete(ctx); err != nil { + return nil, err + } + delete(s.processes, r.ID) + return &shimapi.DeleteResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + Pid: uint32(p.Pid()), + }, nil +} + +// Exec an additional process inside the container +func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*ptypes.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if p := s.processes[r.ID]; p != nil { + return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, "id %s", r.ID) + } + + p := s.processes[s.id] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + + process, err := p.(*proc.Init).Exec(ctx, s.config.Path, &proc.ExecConfig{ + ID: r.ID, + Terminal: r.Terminal, + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Spec: r.Spec, + }) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + s.processes[r.ID] = process + return empty, nil +} + +// ResizePty of a process +func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*ptypes.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + if r.ID == "" { + return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "id not provided") + } + ws := console.WinSize{ + Width: uint16(r.Width), + Height: uint16(r.Height), + } + p := s.processes[r.ID] + if p == nil { + return nil, errors.Errorf("process does not exist %s", r.ID) + } + if err := p.Resize(ws); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// State returns runtime state information for a process +func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.StateResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[r.ID] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process id %s", r.ID) + } + st, err := p.Status(ctx) + if err != nil { + return nil, err + } + status := task.StatusUnknown + switch st { + case "created": + status = task.StatusCreated + case "running": + status = task.StatusRunning + case "stopped": + status = task.StatusStopped + case "paused": + status = task.StatusPaused + case "pausing": + status = task.StatusPausing + } + sio := p.Stdio() + return &shimapi.StateResponse{ + ID: p.ID(), + Bundle: s.bundle, + Pid: uint32(p.Pid()), + Status: status, + Stdin: sio.Stdin, + Stdout: sio.Stdout, + Stderr: sio.Stderr, + Terminal: sio.Terminal, + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + }, nil +} + +// Pause the container +func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[s.id] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + if err := p.(*proc.Init).Pause(ctx); err != nil { + return nil, err + } + return empty, nil +} + +// Resume the container +func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[s.id] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + if err := p.(*proc.Init).Resume(ctx); err != nil { + return nil, err + } + return empty, nil +} + +// Kill a process with the provided signal +func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + if r.ID == "" { + p := s.processes[s.id] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + if err := p.Kill(ctx, r.Signal, r.All); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil + } + + p := s.processes[r.ID] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process id %s not found", r.ID) + } + if err := p.Kill(ctx, r.Signal, r.All); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// ListPids returns all pids inside the container +func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*shimapi.ListPidsResponse, error) { + pids, err := s.getContainerPids(ctx, r.ID) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + var processes []*task.ProcessInfo + for _, pid := range pids { + pInfo := task.ProcessInfo{ + Pid: pid, + } + for _, p := range s.processes { + if p.Pid() == int(pid) { + d := &runctypes.ProcessDetails{ + ExecID: p.ID(), + } + a, err := typeurl.MarshalAny(d) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) + } + pInfo.Info = a + break + } + } + processes = append(processes, &pInfo) + } + return &shimapi.ListPidsResponse{ + Processes: processes, + }, nil +} + +// CloseIO of a process +func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptypes.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[r.ID] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process does not exist %s", r.ID) + } + if stdin := p.Stdin(); stdin != nil { + if err := stdin.Close(); err != nil { + return nil, errors.Wrap(err, "close stdin") + } + } + return empty, nil +} + +// Checkpoint the container +func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[s.id] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + if err := p.(*proc.Init).Checkpoint(ctx, &proc.CheckpointConfig{ + Path: r.Path, + Options: r.Options, + }); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// ShimInfo returns shim information such as the shim's pid +func (s *Service) ShimInfo(ctx context.Context, r *ptypes.Empty) (*shimapi.ShimInfoResponse, error) { + return &shimapi.ShimInfoResponse{ + ShimPid: uint32(os.Getpid()), + }, nil +} + +// Update a running container +func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[s.id] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + if err := p.(*proc.Init).Update(ctx, r.Resources); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// Wait for a process to exit +func (s *Service) Wait(ctx context.Context, r *shimapi.WaitRequest) (*shimapi.WaitResponse, error) { + s.mu.Lock() + p := s.processes[r.ID] + s.mu.Unlock() + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + p.Wait() + + return &shimapi.WaitResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + }, nil +} + +func (s *Service) processExits() { + for e := range s.ec { + s.checkProcesses(e) + } +} + +func (s *Service) checkProcesses(e runc.Exit) { + s.mu.Lock() + defer s.mu.Unlock() + for _, p := range s.processes { + if p.Pid() == e.Pid { + if ip, ok := p.(*proc.Init); ok { + // Ensure all children are killed + if err := ip.KillAll(s.context); err != nil { + log.G(s.context).WithError(err).WithField("id", ip.ID()). + Error("failed to kill init's children") + } + } + p.SetExited(e.Status) + s.events <- &eventstypes.TaskExit{ + ContainerID: s.id, + ID: p.ID(), + Pid: uint32(e.Pid), + ExitStatus: uint32(e.Status), + ExitedAt: p.ExitedAt(), + } + return + } + } +} + +func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[s.id] + if p == nil { + return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "container must be created") + } + + ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) + if err != nil { + return nil, err + } + pids := make([]uint32, 0, len(ps)) + for _, pid := range ps { + pids = append(pids, uint32(pid)) + } + return pids, nil +} + +func (s *Service) forward(publisher events.Publisher) { + for e := range s.events { + if err := publisher.Publish(s.context, getTopic(s.context, e), e); err != nil { + log.G(s.context).WithError(err).Error("post event") + } + } +} + +func getTopic(ctx context.Context, e interface{}) string { + switch e.(type) { + case *eventstypes.TaskCreate: + return runtime.TaskCreateEventTopic + case *eventstypes.TaskStart: + return runtime.TaskStartEventTopic + case *eventstypes.TaskOOM: + return runtime.TaskOOMEventTopic + case *eventstypes.TaskExit: + return runtime.TaskExitEventTopic + case *eventstypes.TaskDelete: + return runtime.TaskDeleteEventTopic + case *eventstypes.TaskExecAdded: + return runtime.TaskExecAddedEventTopic + case *eventstypes.TaskExecStarted: + return runtime.TaskExecStartedEventTopic + case *eventstypes.TaskPaused: + return runtime.TaskPausedEventTopic + case *eventstypes.TaskResumed: + return runtime.TaskResumedEventTopic + case *eventstypes.TaskCheckpointed: + return runtime.TaskCheckpointedEventTopic + default: + logrus.Warnf("no topic for type %#v", e) + } + return runtime.TaskUnknownTopic +} diff --git a/vendor/github.com/containerd/containerd/linux/shim/service_linux.go b/vendor/github.com/containerd/containerd/linux/shim/service_linux.go new file mode 100644 index 000000000..18ae6503b --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/shim/service_linux.go @@ -0,0 +1,111 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "context" + "io" + "sync" + "syscall" + + "github.com/containerd/console" + "github.com/containerd/fifo" + "github.com/pkg/errors" +) + +type linuxPlatform struct { + epoller *console.Epoller +} + +func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { + if p.epoller == nil { + return nil, errors.New("uninitialized epoller") + } + + epollConsole, err := p.epoller.Add(console) + if err != nil { + return nil, err + } + + if stdin != "" { + in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0) + if err != nil { + return nil, err + } + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + io.CopyBuffer(epollConsole, in, *p) + }() + } + + outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0) + if err != nil { + return nil, err + } + outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0) + if err != nil { + return nil, err + } + wg.Add(1) + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + io.CopyBuffer(outw, epollConsole, *p) + epollConsole.Close() + outr.Close() + outw.Close() + wg.Done() + }() + return epollConsole, nil +} + +func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { + if p.epoller == nil { + return errors.New("uninitialized epoller") + } + epollConsole, ok := cons.(*console.EpollConsole) + if !ok { + return errors.Errorf("expected EpollConsole, got %#v", cons) + } + return epollConsole.Shutdown(p.epoller.CloseConsole) +} + +func (p *linuxPlatform) Close() error { + return p.epoller.Close() +} + +// initialize a single epoll fd to manage our consoles. `initPlatform` should +// only be called once. +func (s *Service) initPlatform() error { + if s.platform != nil { + return nil + } + epoller, err := console.NewEpoller() + if err != nil { + return errors.Wrap(err, "failed to initialize epoller") + } + s.platform = &linuxPlatform{ + epoller: epoller, + } + go epoller.Wait() + return nil +} diff --git a/vendor/github.com/containerd/containerd/linux/shim/service_unix.go b/vendor/github.com/containerd/containerd/linux/shim/service_unix.go new file mode 100644 index 000000000..708e45c29 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/shim/service_unix.go @@ -0,0 +1,84 @@ +// +build !windows,!linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "context" + "io" + "sync" + "syscall" + + "github.com/containerd/console" + "github.com/containerd/fifo" +) + +type unixPlatform struct { +} + +func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { + if stdin != "" { + in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0) + if err != nil { + return nil, err + } + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(console, in, *p) + }() + } + outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0) + if err != nil { + return nil, err + } + outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0) + if err != nil { + return nil, err + } + wg.Add(1) + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(outw, console, *p) + console.Close() + outr.Close() + outw.Close() + wg.Done() + }() + return console, nil +} + +func (p *unixPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { + return nil +} + +func (p *unixPlatform) Close() error { + return nil +} + +func (s *Service) initPlatform() error { + s.platform = &unixPlatform{} + return nil +} diff --git a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go new file mode 100644 index 000000000..fd4e32e88 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go @@ -0,0 +1,4431 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/linux/shim/v1/shim.proto + +/* + Package shim is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/containerd/linux/shim/v1/shim.proto + + It has these top-level messages: + CreateTaskRequest + CreateTaskResponse + DeleteResponse + DeleteProcessRequest + ExecProcessRequest + ExecProcessResponse + ResizePtyRequest + StateRequest + StateResponse + KillRequest + CloseIORequest + ListPidsRequest + ListPidsResponse + CheckpointTaskRequest + ShimInfoResponse + UpdateTaskRequest + StartRequest + StartResponse + WaitRequest + WaitResponse +*/ +package shim + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/types" +import google_protobuf1 "github.com/gogo/protobuf/types" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +import _ "github.com/gogo/protobuf/types" +import containerd_types "github.com/containerd/containerd/api/types" +import containerd_v1_types "github.com/containerd/containerd/api/types/task" + +import time "time" + +import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" + +import context "context" +import github_com_stevvooe_ttrpc "github.com/stevvooe/ttrpc" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type CreateTaskRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Runtime string `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"` + Rootfs []*containerd_types.Mount `protobuf:"bytes,4,rep,name=rootfs" json:"rootfs,omitempty"` + Terminal bool `protobuf:"varint,5,opt,name=terminal,proto3" json:"terminal,omitempty"` + Stdin string `protobuf:"bytes,6,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,7,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,8,opt,name=stderr,proto3" json:"stderr,omitempty"` + Checkpoint string `protobuf:"bytes,9,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + ParentCheckpoint string `protobuf:"bytes,10,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"` + Options *google_protobuf.Any `protobuf:"bytes,11,opt,name=options" json:"options,omitempty"` +} + +func (m *CreateTaskRequest) Reset() { *m = CreateTaskRequest{} } +func (*CreateTaskRequest) ProtoMessage() {} +func (*CreateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{0} } + +type CreateTaskResponse struct { + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (m *CreateTaskResponse) Reset() { *m = CreateTaskResponse{} } +func (*CreateTaskResponse) ProtoMessage() {} +func (*CreateTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{1} } + +type DeleteResponse struct { + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (*DeleteResponse) ProtoMessage() {} +func (*DeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{2} } + +type DeleteProcessRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *DeleteProcessRequest) Reset() { *m = DeleteProcessRequest{} } +func (*DeleteProcessRequest) ProtoMessage() {} +func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{3} } + +type ExecProcessRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Terminal bool `protobuf:"varint,2,opt,name=terminal,proto3" json:"terminal,omitempty"` + Stdin string `protobuf:"bytes,3,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,4,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,5,opt,name=stderr,proto3" json:"stderr,omitempty"` + Spec *google_protobuf.Any `protobuf:"bytes,6,opt,name=spec" json:"spec,omitempty"` +} + +func (m *ExecProcessRequest) Reset() { *m = ExecProcessRequest{} } +func (*ExecProcessRequest) ProtoMessage() {} +func (*ExecProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{4} } + +type ExecProcessResponse struct { +} + +func (m *ExecProcessResponse) Reset() { *m = ExecProcessResponse{} } +func (*ExecProcessResponse) ProtoMessage() {} +func (*ExecProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{5} } + +type ResizePtyRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Width uint32 `protobuf:"varint,2,opt,name=width,proto3" json:"width,omitempty"` + Height uint32 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ResizePtyRequest) Reset() { *m = ResizePtyRequest{} } +func (*ResizePtyRequest) ProtoMessage() {} +func (*ResizePtyRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{6} } + +type StateRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *StateRequest) Reset() { *m = StateRequest{} } +func (*StateRequest) ProtoMessage() {} +func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{7} } + +type StateResponse struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Status containerd_v1_types.Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` + ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` +} + +func (m *StateResponse) Reset() { *m = StateResponse{} } +func (*StateResponse) ProtoMessage() {} +func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{8} } + +type KillRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Signal uint32 `protobuf:"varint,2,opt,name=signal,proto3" json:"signal,omitempty"` + All bool `protobuf:"varint,3,opt,name=all,proto3" json:"all,omitempty"` +} + +func (m *KillRequest) Reset() { *m = KillRequest{} } +func (*KillRequest) ProtoMessage() {} +func (*KillRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{9} } + +type CloseIORequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Stdin bool `protobuf:"varint,2,opt,name=stdin,proto3" json:"stdin,omitempty"` +} + +func (m *CloseIORequest) Reset() { *m = CloseIORequest{} } +func (*CloseIORequest) ProtoMessage() {} +func (*CloseIORequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{10} } + +type ListPidsRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *ListPidsRequest) Reset() { *m = ListPidsRequest{} } +func (*ListPidsRequest) ProtoMessage() {} +func (*ListPidsRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{11} } + +type ListPidsResponse struct { + Processes []*containerd_v1_types.ProcessInfo `protobuf:"bytes,1,rep,name=processes" json:"processes,omitempty"` +} + +func (m *ListPidsResponse) Reset() { *m = ListPidsResponse{} } +func (*ListPidsResponse) ProtoMessage() {} +func (*ListPidsResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{12} } + +type CheckpointTaskRequest struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Options *google_protobuf.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (m *CheckpointTaskRequest) Reset() { *m = CheckpointTaskRequest{} } +func (*CheckpointTaskRequest) ProtoMessage() {} +func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{13} } + +type ShimInfoResponse struct { + ShimPid uint32 `protobuf:"varint,1,opt,name=shim_pid,json=shimPid,proto3" json:"shim_pid,omitempty"` +} + +func (m *ShimInfoResponse) Reset() { *m = ShimInfoResponse{} } +func (*ShimInfoResponse) ProtoMessage() {} +func (*ShimInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{14} } + +type UpdateTaskRequest struct { + Resources *google_protobuf.Any `protobuf:"bytes,1,opt,name=resources" json:"resources,omitempty"` +} + +func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } +func (*UpdateTaskRequest) ProtoMessage() {} +func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{15} } + +type StartRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *StartRequest) Reset() { *m = StartRequest{} } +func (*StartRequest) ProtoMessage() {} +func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{16} } + +type StartResponse struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (m *StartResponse) Reset() { *m = StartResponse{} } +func (*StartResponse) ProtoMessage() {} +func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{17} } + +type WaitRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *WaitRequest) Reset() { *m = WaitRequest{} } +func (*WaitRequest) ProtoMessage() {} +func (*WaitRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{18} } + +type WaitResponse struct { + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` +} + +func (m *WaitResponse) Reset() { *m = WaitResponse{} } +func (*WaitResponse) ProtoMessage() {} +func (*WaitResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{19} } + +func init() { + proto.RegisterType((*CreateTaskRequest)(nil), "containerd.runtime.linux.shim.v1.CreateTaskRequest") + proto.RegisterType((*CreateTaskResponse)(nil), "containerd.runtime.linux.shim.v1.CreateTaskResponse") + proto.RegisterType((*DeleteResponse)(nil), "containerd.runtime.linux.shim.v1.DeleteResponse") + proto.RegisterType((*DeleteProcessRequest)(nil), "containerd.runtime.linux.shim.v1.DeleteProcessRequest") + proto.RegisterType((*ExecProcessRequest)(nil), "containerd.runtime.linux.shim.v1.ExecProcessRequest") + proto.RegisterType((*ExecProcessResponse)(nil), "containerd.runtime.linux.shim.v1.ExecProcessResponse") + proto.RegisterType((*ResizePtyRequest)(nil), "containerd.runtime.linux.shim.v1.ResizePtyRequest") + proto.RegisterType((*StateRequest)(nil), "containerd.runtime.linux.shim.v1.StateRequest") + proto.RegisterType((*StateResponse)(nil), "containerd.runtime.linux.shim.v1.StateResponse") + proto.RegisterType((*KillRequest)(nil), "containerd.runtime.linux.shim.v1.KillRequest") + proto.RegisterType((*CloseIORequest)(nil), "containerd.runtime.linux.shim.v1.CloseIORequest") + proto.RegisterType((*ListPidsRequest)(nil), "containerd.runtime.linux.shim.v1.ListPidsRequest") + proto.RegisterType((*ListPidsResponse)(nil), "containerd.runtime.linux.shim.v1.ListPidsResponse") + proto.RegisterType((*CheckpointTaskRequest)(nil), "containerd.runtime.linux.shim.v1.CheckpointTaskRequest") + proto.RegisterType((*ShimInfoResponse)(nil), "containerd.runtime.linux.shim.v1.ShimInfoResponse") + proto.RegisterType((*UpdateTaskRequest)(nil), "containerd.runtime.linux.shim.v1.UpdateTaskRequest") + proto.RegisterType((*StartRequest)(nil), "containerd.runtime.linux.shim.v1.StartRequest") + proto.RegisterType((*StartResponse)(nil), "containerd.runtime.linux.shim.v1.StartResponse") + proto.RegisterType((*WaitRequest)(nil), "containerd.runtime.linux.shim.v1.WaitRequest") + proto.RegisterType((*WaitResponse)(nil), "containerd.runtime.linux.shim.v1.WaitResponse") +} +func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Bundle) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle))) + i += copy(dAtA[i:], m.Bundle) + } + if len(m.Runtime) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Runtime))) + i += copy(dAtA[i:], m.Runtime) + } + if len(m.Rootfs) > 0 { + for _, msg := range m.Rootfs { + dAtA[i] = 0x22 + i++ + i = encodeVarintShim(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Terminal { + dAtA[i] = 0x28 + i++ + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Stdin) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) + i += copy(dAtA[i:], m.Stdin) + } + if len(m.Stdout) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) + i += copy(dAtA[i:], m.Stdout) + } + if len(m.Stderr) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) + i += copy(dAtA[i:], m.Stderr) + } + if len(m.Checkpoint) > 0 { + dAtA[i] = 0x4a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Checkpoint))) + i += copy(dAtA[i:], m.Checkpoint) + } + if len(m.ParentCheckpoint) > 0 { + dAtA[i] = 0x52 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ParentCheckpoint))) + i += copy(dAtA[i:], m.ParentCheckpoint) + } + if m.Options != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Options.Size())) + n1, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Pid != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Pid)) + } + return i, nil +} + +func (m *DeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Pid != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Pid)) + } + if m.ExitStatus != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt))) + n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *DeleteProcessRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteProcessRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Terminal { + dAtA[i] = 0x10 + i++ + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Stdin) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) + i += copy(dAtA[i:], m.Stdin) + } + if len(m.Stdout) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) + i += copy(dAtA[i:], m.Stdout) + } + if len(m.Stderr) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) + i += copy(dAtA[i:], m.Stderr) + } + if m.Spec != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Spec.Size())) + n3, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Width != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Width)) + } + if m.Height != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Height)) + } + return i, nil +} + +func (m *StateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *StateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Bundle) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle))) + i += copy(dAtA[i:], m.Bundle) + } + if m.Pid != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Pid)) + } + if m.Status != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Status)) + } + if len(m.Stdin) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) + i += copy(dAtA[i:], m.Stdin) + } + if len(m.Stdout) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) + i += copy(dAtA[i:], m.Stdout) + } + if len(m.Stderr) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) + i += copy(dAtA[i:], m.Stderr) + } + if m.Terminal { + dAtA[i] = 0x40 + i++ + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ExitStatus != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) + } + dAtA[i] = 0x52 + i++ + i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt))) + n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *KillRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Signal != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Signal)) + } + if m.All { + dAtA[i] = 0x18 + i++ + if m.All { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *CloseIORequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Stdin { + dAtA[i] = 0x10 + i++ + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ListPidsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPidsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *ListPidsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPidsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Processes) > 0 { + for _, msg := range m.Processes { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Path) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + if m.Options != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Options.Size())) + n5, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *ShimInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShimInfoResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ShimPid != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.ShimPid)) + } + return i, nil +} + +func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Resources != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Resources.Size())) + n6, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *StartRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *StartResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Pid != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Pid)) + } + return i, nil +} + +func (m *WaitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *WaitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ExitStatus != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt))) + n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func encodeVarintShim(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *CreateTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Bundle) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Runtime) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if len(m.Rootfs) > 0 { + for _, e := range m.Rootfs { + l = e.Size() + n += 1 + l + sovShim(uint64(l)) + } + } + if m.Terminal { + n += 2 + } + l = len(m.Stdin) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Checkpoint) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.ParentCheckpoint) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *CreateTaskResponse) Size() (n int) { + var l int + _ = l + if m.Pid != 0 { + n += 1 + sovShim(uint64(m.Pid)) + } + return n +} + +func (m *DeleteResponse) Size() (n int) { + var l int + _ = l + if m.Pid != 0 { + n += 1 + sovShim(uint64(m.Pid)) + } + if m.ExitStatus != 0 { + n += 1 + sovShim(uint64(m.ExitStatus)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) + n += 1 + l + sovShim(uint64(l)) + return n +} + +func (m *DeleteProcessRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *ExecProcessRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Terminal { + n += 2 + } + l = len(m.Stdin) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *ExecProcessResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ResizePtyRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Width != 0 { + n += 1 + sovShim(uint64(m.Width)) + } + if m.Height != 0 { + n += 1 + sovShim(uint64(m.Height)) + } + return n +} + +func (m *StateRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *StateResponse) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Bundle) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sovShim(uint64(m.Pid)) + } + if m.Status != 0 { + n += 1 + sovShim(uint64(m.Status)) + } + l = len(m.Stdin) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Terminal { + n += 2 + } + if m.ExitStatus != 0 { + n += 1 + sovShim(uint64(m.ExitStatus)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) + n += 1 + l + sovShim(uint64(l)) + return n +} + +func (m *KillRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Signal != 0 { + n += 1 + sovShim(uint64(m.Signal)) + } + if m.All { + n += 2 + } + return n +} + +func (m *CloseIORequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Stdin { + n += 2 + } + return n +} + +func (m *ListPidsRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *ListPidsResponse) Size() (n int) { + var l int + _ = l + if len(m.Processes) > 0 { + for _, e := range m.Processes { + l = e.Size() + n += 1 + l + sovShim(uint64(l)) + } + } + return n +} + +func (m *CheckpointTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *ShimInfoResponse) Size() (n int) { + var l int + _ = l + if m.ShimPid != 0 { + n += 1 + sovShim(uint64(m.ShimPid)) + } + return n +} + +func (m *UpdateTaskRequest) Size() (n int) { + var l int + _ = l + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *StartRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *StartResponse) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sovShim(uint64(m.Pid)) + } + return n +} + +func (m *WaitRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *WaitResponse) Size() (n int) { + var l int + _ = l + if m.ExitStatus != 0 { + n += 1 + sovShim(uint64(m.ExitStatus)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) + n += 1 + l + sovShim(uint64(l)) + return n +} + +func sovShim(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozShim(x uint64) (n int) { + return sovShim(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CreateTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateTaskRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, + `Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`, + `Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "containerd_types.Mount", 1) + `,`, + `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`, + `ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateTaskResponse{`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteResponse{`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteProcessRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteProcessRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *ExecProcessRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecProcessRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExecProcessResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecProcessResponse{`, + `}`, + }, "") + return s +} +func (this *ResizePtyRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResizePtyRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Width:` + fmt.Sprintf("%v", this.Width) + `,`, + `Height:` + fmt.Sprintf("%v", this.Height) + `,`, + `}`, + }, "") + return s +} +func (this *StateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StateRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *StateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StateResponse{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, + `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *KillRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KillRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `All:` + fmt.Sprintf("%v", this.All) + `,`, + `}`, + }, "") + return s +} +func (this *CloseIORequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CloseIORequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `}`, + }, "") + return s +} +func (this *ListPidsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListPidsRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *ListPidsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListPidsResponse{`, + `Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "containerd_v1_types.ProcessInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CheckpointTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckpointTaskRequest{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ShimInfoResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ShimInfoResponse{`, + `ShimPid:` + fmt.Sprintf("%v", this.ShimPid) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskRequest{`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "google_protobuf.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StartRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *StartResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartResponse{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `}`, + }, "") + return s +} +func (this *WaitRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WaitRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *WaitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WaitResponse{`, + `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringShim(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} + +type ShimService interface { + State(ctx context.Context, req *StateRequest) (*StateResponse, error) + Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) + Start(ctx context.Context, req *StartRequest) (*StartResponse, error) + Delete(ctx context.Context, req *google_protobuf1.Empty) (*DeleteResponse, error) + DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error) + ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error) + Pause(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) + Resume(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) + Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error) + Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error) + Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error) + ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error) + CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error) + ShimInfo(ctx context.Context, req *google_protobuf1.Empty) (*ShimInfoResponse, error) + Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error) + Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) +} + +func RegisterShimService(srv *github_com_stevvooe_ttrpc.Server, svc ShimService) { + srv.Register("containerd.runtime.linux.shim.v1.Shim", map[string]github_com_stevvooe_ttrpc.Method{ + "State": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StateRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.State(ctx, &req) + }, + "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CreateTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Create(ctx, &req) + }, + "Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StartRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Start(ctx, &req) + }, + "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req google_protobuf1.Empty + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Delete(ctx, &req) + }, + "DeleteProcess": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req DeleteProcessRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.DeleteProcess(ctx, &req) + }, + "ListPids": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ListPidsRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.ListPids(ctx, &req) + }, + "Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req google_protobuf1.Empty + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Pause(ctx, &req) + }, + "Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req google_protobuf1.Empty + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Resume(ctx, &req) + }, + "Checkpoint": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CheckpointTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Checkpoint(ctx, &req) + }, + "Kill": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req KillRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Kill(ctx, &req) + }, + "Exec": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ExecProcessRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Exec(ctx, &req) + }, + "ResizePty": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ResizePtyRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.ResizePty(ctx, &req) + }, + "CloseIO": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CloseIORequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.CloseIO(ctx, &req) + }, + "ShimInfo": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req google_protobuf1.Empty + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.ShimInfo(ctx, &req) + }, + "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req UpdateTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Update(ctx, &req) + }, + "Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req WaitRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Wait(ctx, &req) + }, + }) +} + +type shimClient struct { + client *github_com_stevvooe_ttrpc.Client +} + +func NewShimClient(client *github_com_stevvooe_ttrpc.Client) ShimService { + return &shimClient{ + client: client, + } +} + +func (c *shimClient) State(ctx context.Context, req *StateRequest) (*StateResponse, error) { + var resp StateResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "State", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) { + var resp CreateTaskResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Create", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) { + var resp StartResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Start", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Delete(ctx context.Context, req *google_protobuf1.Empty) (*DeleteResponse, error) { + var resp DeleteResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Delete", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error) { + var resp DeleteResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "DeleteProcess", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error) { + var resp ListPidsResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "ListPids", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Pause(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Pause", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Resume(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Resume", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Checkpoint", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Kill", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Exec", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "ResizePty", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "CloseIO", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) ShimInfo(ctx context.Context, req *google_protobuf1.Empty) (*ShimInfoResponse, error) { + var resp ShimInfoResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "ShimInfo", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Update", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *shimClient) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) { + var resp WaitResponse + if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Wait", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} +func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bundle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtime = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rootfs = append(m.Rootfs, &containerd_types.Mount{}) + if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminal = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checkpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentCheckpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ParentCheckpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &google_protobuf.Any{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteProcessRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteProcessRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecProcessRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminal = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &google_protobuf.Any{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecProcessResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecProcessResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResizePtyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResizePtyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Width", wireType) + } + m.Width = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Width |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bundle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= (containerd_v1_types.Status(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminal = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KillRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KillRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KillRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + m.Signal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Signal |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.All = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloseIORequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloseIORequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloseIORequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPidsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPidsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPidsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPidsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Processes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Processes = append(m.Processes, &containerd_v1_types.ProcessInfo{}) + if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckpointTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckpointTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &google_protobuf.Any{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShimInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShimInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShimInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShimPid", wireType) + } + m.ShimPid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ShimPid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &google_protobuf.Any{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WaitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WaitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipShim(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShim + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShim + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShim + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthShim + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShim + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipShim(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthShim = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowShim = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/linux/shim/v1/shim.proto", fileDescriptorShim) +} + +var fileDescriptorShim = []byte{ + // 1133 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x36, 0x69, 0xfd, 0x8e, 0x22, 0xd7, 0xde, 0x3a, 0x2e, 0xa3, 0x00, 0xb2, 0x40, 0xa0, 0x81, + 0x8b, 0x22, 0x54, 0x2d, 0x17, 0x49, 0xd3, 0x02, 0x01, 0x6c, 0x27, 0x28, 0x8c, 0xd6, 0x88, 0x41, + 0x3b, 0x4d, 0xd0, 0xa2, 0x30, 0x68, 0x71, 0x2d, 0x2d, 0x2c, 0x91, 0x0c, 0x77, 0xe9, 0xda, 0x3d, + 0xf5, 0xd4, 0x73, 0x1f, 0xa7, 0x8f, 0xe0, 0x43, 0x0e, 0x3d, 0xf6, 0x94, 0x36, 0xba, 0xf7, 0x1d, + 0x8a, 0xfd, 0x91, 0x49, 0x49, 0x66, 0x48, 0xf9, 0x62, 0x71, 0x76, 0xbf, 0xd9, 0x9d, 0x9d, 0xef, + 0xdb, 0x99, 0x35, 0x3c, 0xe9, 0x11, 0xd6, 0x8f, 0x4e, 0xac, 0xae, 0x3f, 0x6c, 0x77, 0x7d, 0x8f, + 0x39, 0xc4, 0xc3, 0xa1, 0x9b, 0xfc, 0x1c, 0x10, 0x2f, 0xba, 0x68, 0xd3, 0x3e, 0x19, 0xb6, 0xcf, + 0x37, 0xc5, 0xaf, 0x15, 0x84, 0x3e, 0xf3, 0x51, 0x2b, 0x06, 0x59, 0x61, 0xe4, 0x31, 0x32, 0xc4, + 0x96, 0x00, 0x5b, 0x02, 0x74, 0xbe, 0xd9, 0xb8, 0xd7, 0xf3, 0xfd, 0xde, 0x00, 0xb7, 0x05, 0xfe, + 0x24, 0x3a, 0x6d, 0x3b, 0xde, 0xa5, 0x74, 0x6e, 0xdc, 0x9f, 0x9e, 0xc2, 0xc3, 0x80, 0x8d, 0x27, + 0x57, 0x7b, 0x7e, 0xcf, 0x17, 0x9f, 0x6d, 0xfe, 0xa5, 0x46, 0xd7, 0xa7, 0x5d, 0xf8, 0x8e, 0x94, + 0x39, 0xc3, 0x40, 0x01, 0x1e, 0x65, 0x9e, 0xc5, 0x09, 0x48, 0x9b, 0x5d, 0x06, 0x98, 0xb6, 0x87, + 0x7e, 0xe4, 0x31, 0xe5, 0xf7, 0xf5, 0x1c, 0x7e, 0xcc, 0xa1, 0x67, 0xe2, 0x8f, 0xf4, 0x35, 0xff, + 0xd3, 0x61, 0x65, 0x37, 0xc4, 0x0e, 0xc3, 0x47, 0x0e, 0x3d, 0xb3, 0xf1, 0x9b, 0x08, 0x53, 0x86, + 0xd6, 0x40, 0x27, 0xae, 0xa1, 0xb5, 0xb4, 0x8d, 0xea, 0x4e, 0x69, 0xf4, 0x6e, 0x5d, 0xdf, 0x7b, + 0x66, 0xeb, 0xc4, 0x45, 0x6b, 0x50, 0x3a, 0x89, 0x3c, 0x77, 0x80, 0x0d, 0x9d, 0xcf, 0xd9, 0xca, + 0x42, 0x06, 0x94, 0x55, 0x06, 0x8d, 0x45, 0x31, 0x31, 0x36, 0x51, 0x1b, 0x4a, 0xa1, 0xef, 0xb3, + 0x53, 0x6a, 0x14, 0x5a, 0x8b, 0x1b, 0xb5, 0xce, 0x27, 0x56, 0x22, 0xeb, 0x22, 0x24, 0x6b, 0x9f, + 0x1f, 0xc5, 0x56, 0x30, 0xd4, 0x80, 0x0a, 0xc3, 0xe1, 0x90, 0x78, 0xce, 0xc0, 0x28, 0xb6, 0xb4, + 0x8d, 0x8a, 0x7d, 0x6d, 0xa3, 0x55, 0x28, 0x52, 0xe6, 0x12, 0xcf, 0x28, 0x89, 0x4d, 0xa4, 0xc1, + 0x83, 0xa2, 0xcc, 0xf5, 0x23, 0x66, 0x94, 0x65, 0x50, 0xd2, 0x52, 0xe3, 0x38, 0x0c, 0x8d, 0xca, + 0xf5, 0x38, 0x0e, 0x43, 0xd4, 0x04, 0xe8, 0xf6, 0x71, 0xf7, 0x2c, 0xf0, 0x89, 0xc7, 0x8c, 0xaa, + 0x98, 0x4b, 0x8c, 0xa0, 0xcf, 0x61, 0x25, 0x70, 0x42, 0xec, 0xb1, 0xe3, 0x04, 0x0c, 0x04, 0x6c, + 0x59, 0x4e, 0xec, 0xc6, 0x60, 0x0b, 0xca, 0x7e, 0xc0, 0x88, 0xef, 0x51, 0xa3, 0xd6, 0xd2, 0x36, + 0x6a, 0x9d, 0x55, 0x4b, 0xd2, 0x6c, 0x8d, 0x69, 0xb6, 0xb6, 0xbd, 0x4b, 0x7b, 0x0c, 0x32, 0x1f, + 0x00, 0x4a, 0xa6, 0x9b, 0x06, 0xbe, 0x47, 0x31, 0x5a, 0x86, 0xc5, 0x40, 0x25, 0xbc, 0x6e, 0xf3, + 0x4f, 0xf3, 0x77, 0x0d, 0x96, 0x9e, 0xe1, 0x01, 0x66, 0x38, 0x1d, 0x84, 0xd6, 0xa1, 0x86, 0x2f, + 0x08, 0x3b, 0xa6, 0xcc, 0x61, 0x11, 0x15, 0x9c, 0xd4, 0x6d, 0xe0, 0x43, 0x87, 0x62, 0x04, 0x6d, + 0x43, 0x95, 0x5b, 0xd8, 0x3d, 0x76, 0x98, 0x60, 0xa6, 0xd6, 0x69, 0xcc, 0xc4, 0x77, 0x34, 0x96, + 0xe1, 0x4e, 0xe5, 0xea, 0xdd, 0xfa, 0xc2, 0x1f, 0xff, 0xac, 0x6b, 0x76, 0x45, 0xba, 0x6d, 0x33, + 0xd3, 0x82, 0x55, 0x19, 0xc7, 0x41, 0xe8, 0x77, 0x31, 0xa5, 0x19, 0x12, 0x31, 0xff, 0xd4, 0x00, + 0x3d, 0xbf, 0xc0, 0xdd, 0x7c, 0xf0, 0x09, 0xba, 0xf5, 0x34, 0xba, 0x17, 0x6f, 0xa6, 0xbb, 0x90, + 0x42, 0x77, 0x71, 0x82, 0xee, 0x0d, 0x28, 0xd0, 0x00, 0x77, 0x85, 0x66, 0xd2, 0xe8, 0x11, 0x08, + 0xf3, 0x2e, 0x7c, 0x3c, 0x11, 0xb9, 0xcc, 0xbb, 0xf9, 0x1a, 0x96, 0x6d, 0x4c, 0xc9, 0xaf, 0xf8, + 0x80, 0x5d, 0x66, 0x1d, 0x67, 0x15, 0x8a, 0xbf, 0x10, 0x97, 0xf5, 0x15, 0x17, 0xd2, 0xe0, 0xa1, + 0xf5, 0x31, 0xe9, 0xf5, 0x25, 0x07, 0x75, 0x5b, 0x59, 0xe6, 0x03, 0xb8, 0xc3, 0x89, 0xc2, 0x59, + 0x39, 0x7d, 0xab, 0x43, 0x5d, 0x01, 0x95, 0x16, 0xe6, 0xbd, 0xa0, 0x4a, 0x3b, 0x8b, 0xb1, 0x76, + 0xb6, 0x78, 0xba, 0x84, 0x6c, 0x78, 0x1a, 0x97, 0x3a, 0xf7, 0x93, 0x17, 0xf3, 0x7c, 0x53, 0xdd, + 0x4d, 0xa9, 0x23, 0x5b, 0x41, 0x63, 0x46, 0x8a, 0x37, 0x33, 0x52, 0x4a, 0x61, 0xa4, 0x3c, 0xc1, + 0x48, 0x92, 0xf3, 0xca, 0x14, 0xe7, 0x53, 0x92, 0xae, 0x7e, 0x58, 0xd2, 0x70, 0x2b, 0x49, 0xbf, + 0x80, 0xda, 0x77, 0x64, 0x30, 0xc8, 0x51, 0xec, 0x28, 0xe9, 0x8d, 0x85, 0x59, 0xb7, 0x95, 0xc5, + 0x73, 0xe9, 0x0c, 0x06, 0x22, 0x97, 0x15, 0x9b, 0x7f, 0x9a, 0x4f, 0x61, 0x69, 0x77, 0xe0, 0x53, + 0xbc, 0xf7, 0x22, 0x87, 0x3e, 0x64, 0x02, 0xa5, 0xd6, 0xa5, 0x61, 0x7e, 0x06, 0x1f, 0x7d, 0x4f, + 0x28, 0x3b, 0x20, 0x6e, 0xe6, 0xf5, 0xb2, 0x61, 0x39, 0x86, 0x2a, 0x31, 0x3c, 0x85, 0x6a, 0x20, + 0x35, 0x8b, 0xa9, 0xa1, 0x89, 0x32, 0xdb, 0xba, 0x91, 0x4d, 0xa5, 0xec, 0x3d, 0xef, 0xd4, 0xb7, + 0x63, 0x17, 0xf3, 0x27, 0xb8, 0x1b, 0x57, 0xb4, 0x64, 0x1b, 0x40, 0x50, 0x08, 0x1c, 0xd6, 0x97, + 0x61, 0xd8, 0xe2, 0x3b, 0x59, 0xf0, 0xf4, 0x3c, 0x05, 0xef, 0x21, 0x2c, 0x1f, 0xf6, 0xc9, 0x50, + 0xec, 0x39, 0x0e, 0xf8, 0x1e, 0x54, 0x78, 0x8b, 0x3d, 0x8e, 0xcb, 0x59, 0x99, 0xdb, 0x07, 0xc4, + 0x35, 0xbf, 0x85, 0x95, 0x97, 0x81, 0x3b, 0xd5, 0x8e, 0x3a, 0x50, 0x0d, 0x31, 0xf5, 0xa3, 0xb0, + 0x2b, 0x0e, 0x98, 0xbe, 0x6b, 0x0c, 0x53, 0x77, 0x2b, 0x64, 0x59, 0x09, 0x7d, 0x22, 0xae, 0x16, + 0xc7, 0x65, 0x5c, 0x2d, 0x75, 0x85, 0xf4, 0xb8, 0x46, 0x7f, 0x0a, 0xb5, 0x57, 0x0e, 0xc9, 0xdc, + 0x21, 0x84, 0x3b, 0x12, 0xa6, 0x36, 0x98, 0x92, 0xb8, 0xf6, 0x61, 0x89, 0xeb, 0xb7, 0x91, 0x78, + 0xe7, 0x6d, 0x0d, 0x0a, 0x3c, 0xed, 0xa8, 0x0f, 0x45, 0x51, 0x39, 0x90, 0x65, 0x65, 0x3d, 0x77, + 0xac, 0x64, 0x2d, 0x6a, 0xb4, 0x73, 0xe3, 0xd5, 0xb1, 0x28, 0x94, 0x64, 0x67, 0x43, 0x5b, 0xd9, + 0xae, 0x33, 0x4f, 0x8e, 0xc6, 0x97, 0xf3, 0x39, 0xa9, 0x4d, 0xe5, 0xf1, 0x42, 0x96, 0xf3, 0x78, + 0xd7, 0x72, 0xc8, 0x79, 0xbc, 0x84, 0x2c, 0x6c, 0x28, 0xc9, 0x3e, 0x88, 0xd6, 0x66, 0xb8, 0x78, + 0xce, 0xdf, 0x7e, 0x8d, 0x2f, 0xb2, 0x97, 0x9c, 0xea, 0xe8, 0x97, 0x50, 0x9f, 0xe8, 0xad, 0xe8, + 0x51, 0xde, 0x25, 0x26, 0xbb, 0xeb, 0x2d, 0xb6, 0x7e, 0x03, 0x95, 0x71, 0x1d, 0x41, 0x9b, 0xd9, + 0xde, 0x53, 0xe5, 0xa9, 0xd1, 0x99, 0xc7, 0x45, 0x6d, 0xf9, 0x18, 0x8a, 0x07, 0x4e, 0x44, 0xd3, + 0x13, 0x98, 0x32, 0x8e, 0xbe, 0x82, 0x92, 0x8d, 0x69, 0x34, 0x9c, 0xdf, 0xf3, 0x67, 0x80, 0xc4, + 0x5b, 0xed, 0x71, 0x0e, 0x89, 0xdd, 0x54, 0x07, 0x53, 0x97, 0xdf, 0x87, 0x02, 0x6f, 0x24, 0xe8, + 0x61, 0xf6, 0xc2, 0x89, 0x86, 0x93, 0xba, 0xdc, 0x11, 0x14, 0xf8, 0xfb, 0x03, 0xe5, 0xb8, 0x0a, + 0xb3, 0x2f, 0xac, 0xd4, 0x55, 0x5f, 0x41, 0xf5, 0xfa, 0xf9, 0x82, 0x72, 0xf0, 0x36, 0xfd, 0xd6, + 0x49, 0x5d, 0xf8, 0x10, 0xca, 0xaa, 0xeb, 0xa1, 0x1c, 0xfa, 0x9b, 0x6c, 0x90, 0xa9, 0x8b, 0xfe, + 0x00, 0x95, 0x71, 0xbb, 0x48, 0x65, 0x3b, 0xc7, 0x21, 0x66, 0x5a, 0xce, 0x4b, 0x28, 0xc9, 0xbe, + 0x92, 0xa7, 0x3a, 0xcd, 0x74, 0xa0, 0xd4, 0x70, 0x31, 0x14, 0x78, 0x6d, 0xcf, 0xa3, 0x80, 0x44, + 0xab, 0x68, 0x58, 0x79, 0xe1, 0x32, 0xfa, 0x9d, 0xfd, 0xab, 0xf7, 0xcd, 0x85, 0xbf, 0xdf, 0x37, + 0x17, 0x7e, 0x1b, 0x35, 0xb5, 0xab, 0x51, 0x53, 0xfb, 0x6b, 0xd4, 0xd4, 0xfe, 0x1d, 0x35, 0xb5, + 0x1f, 0xb7, 0xe6, 0xfb, 0xff, 0xf7, 0x1b, 0xfe, 0xfb, 0x5a, 0x3f, 0x29, 0x89, 0x73, 0x6c, 0xfd, + 0x1f, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xd0, 0xe6, 0x46, 0x3f, 0x0f, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto new file mode 100644 index 000000000..6de8f1382 --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto @@ -0,0 +1,165 @@ +syntax = "proto3"; + +package containerd.runtime.linux.shim.v1; + +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import weak "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "github.com/containerd/containerd/api/types/mount.proto"; +import "github.com/containerd/containerd/api/types/task/task.proto"; + +option go_package = "github.com/containerd/containerd/linux/shim/v1;shim"; + +// Shim service is launched for each container and is responsible for owning the IO +// for the container and its additional processes. The shim is also the parent of +// each container and allows reattaching to the IO and receiving the exit status +// for the container processes. +service Shim { + // State returns shim and task state information. + rpc State(StateRequest) returns (StateResponse); + + rpc Create(CreateTaskRequest) returns (CreateTaskResponse); + + rpc Start(StartRequest) returns (StartResponse); + + rpc Delete(google.protobuf.Empty) returns (DeleteResponse); + + rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse); + + rpc ListPids(ListPidsRequest) returns (ListPidsResponse); + + rpc Pause(google.protobuf.Empty) returns (google.protobuf.Empty); + + rpc Resume(google.protobuf.Empty) returns (google.protobuf.Empty); + + rpc Checkpoint(CheckpointTaskRequest) returns (google.protobuf.Empty); + + rpc Kill(KillRequest) returns (google.protobuf.Empty); + + rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty); + + rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty); + + rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty); + + // ShimInfo returns information about the shim. + rpc ShimInfo(google.protobuf.Empty) returns (ShimInfoResponse); + + rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty); + + rpc Wait(WaitRequest) returns (WaitResponse); +} + +message CreateTaskRequest { + string id = 1; + string bundle = 2; + string runtime = 3; + repeated containerd.types.Mount rootfs = 4; + bool terminal = 5; + string stdin = 6; + string stdout = 7; + string stderr = 8; + string checkpoint = 9; + string parent_checkpoint = 10; + google.protobuf.Any options = 11; +} + +message CreateTaskResponse { + uint32 pid = 1; +} + +message DeleteResponse { + uint32 pid = 1; + uint32 exit_status = 2; + google.protobuf.Timestamp exited_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message DeleteProcessRequest { + string id = 1; +} + +message ExecProcessRequest { + string id = 1; + bool terminal = 2; + string stdin = 3; + string stdout = 4; + string stderr = 5; + google.protobuf.Any spec = 6; +} + +message ExecProcessResponse { +} + +message ResizePtyRequest { + string id = 1; + uint32 width = 2; + uint32 height = 3; +} + +message StateRequest { + string id = 1; +} + +message StateResponse { + string id = 1; + string bundle = 2; + uint32 pid = 3; + containerd.v1.types.Status status = 4; + string stdin = 5; + string stdout = 6; + string stderr = 7; + bool terminal = 8; + uint32 exit_status = 9; + google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message KillRequest { + string id = 1; + uint32 signal = 2; + bool all = 3; +} + +message CloseIORequest { + string id = 1; + bool stdin = 2; +} + +message ListPidsRequest { + string id = 1; +} + +message ListPidsResponse { + repeated containerd.v1.types.ProcessInfo processes = 1; +} + +message CheckpointTaskRequest { + string path = 1; + google.protobuf.Any options = 2; +} + +message ShimInfoResponse { + uint32 shim_pid = 1; +} + +message UpdateTaskRequest { + google.protobuf.Any resources = 1; +} + +message StartRequest { + string id = 1; +} + +message StartResponse { + string id = 1; + uint32 pid = 2; +} + +message WaitRequest { + string id = 1; +} + +message WaitResponse { + uint32 exit_status = 1; + google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/containerd/containerd/linux/task.go b/vendor/github.com/containerd/containerd/linux/task.go new file mode 100644 index 000000000..eca82fbbd --- /dev/null +++ b/vendor/github.com/containerd/containerd/linux/task.go @@ -0,0 +1,346 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package linux + +import ( + "context" + "sync" + + "github.com/containerd/cgroups" + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/api/types/task" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events/exchange" + "github.com/containerd/containerd/identifiers" + "github.com/containerd/containerd/linux/shim/client" + shim "github.com/containerd/containerd/linux/shim/v1" + "github.com/containerd/containerd/runtime" + runc "github.com/containerd/go-runc" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/stevvooe/ttrpc" +) + +// Task on a linux based system +type Task struct { + mu sync.Mutex + id string + pid int + shim *client.Client + namespace string + cg cgroups.Cgroup + monitor runtime.TaskMonitor + events *exchange.Exchange + runtime *runc.Runc +} + +func newTask(id, namespace string, pid int, shim *client.Client, monitor runtime.TaskMonitor, events *exchange.Exchange, runtime *runc.Runc) (*Task, error) { + var ( + err error + cg cgroups.Cgroup + ) + if pid > 0 { + cg, err = cgroups.Load(cgroups.V1, cgroups.PidPath(pid)) + if err != nil && err != cgroups.ErrCgroupDeleted { + return nil, err + } + } + return &Task{ + id: id, + pid: pid, + shim: shim, + namespace: namespace, + cg: cg, + monitor: monitor, + events: events, + runtime: runtime, + }, nil +} + +// ID of the task +func (t *Task) ID() string { + return t.id +} + +// Info returns task information about the runtime and namespace +func (t *Task) Info() runtime.TaskInfo { + return runtime.TaskInfo{ + ID: t.id, + Runtime: pluginID, + Namespace: t.namespace, + } +} + +// Start the task +func (t *Task) Start(ctx context.Context) error { + t.mu.Lock() + hasCgroup := t.cg != nil + t.mu.Unlock() + r, err := t.shim.Start(ctx, &shim.StartRequest{ + ID: t.id, + }) + if err != nil { + return errdefs.FromGRPC(err) + } + t.pid = int(r.Pid) + if !hasCgroup { + cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(t.pid)) + if err != nil { + return err + } + t.mu.Lock() + t.cg = cg + t.mu.Unlock() + if err := t.monitor.Monitor(t); err != nil { + return err + } + } + t.events.Publish(ctx, runtime.TaskStartEventTopic, &eventstypes.TaskStart{ + ContainerID: t.id, + Pid: uint32(t.pid), + }) + return nil +} + +// State returns runtime information for the task +func (t *Task) State(ctx context.Context) (runtime.State, error) { + response, err := t.shim.State(ctx, &shim.StateRequest{ + ID: t.id, + }) + if err != nil { + if errors.Cause(err) != ttrpc.ErrClosed { + return runtime.State{}, errdefs.FromGRPC(err) + } + return runtime.State{}, errdefs.ErrNotFound + } + var status runtime.Status + switch response.Status { + case task.StatusCreated: + status = runtime.CreatedStatus + case task.StatusRunning: + status = runtime.RunningStatus + case task.StatusStopped: + status = runtime.StoppedStatus + case task.StatusPaused: + status = runtime.PausedStatus + case task.StatusPausing: + status = runtime.PausingStatus + } + return runtime.State{ + Pid: response.Pid, + Status: status, + Stdin: response.Stdin, + Stdout: response.Stdout, + Stderr: response.Stderr, + Terminal: response.Terminal, + ExitStatus: response.ExitStatus, + ExitedAt: response.ExitedAt, + }, nil +} + +// Pause the task and all processes +func (t *Task) Pause(ctx context.Context) error { + if _, err := t.shim.Pause(ctx, empty); err != nil { + return errdefs.FromGRPC(err) + } + t.events.Publish(ctx, runtime.TaskPausedEventTopic, &eventstypes.TaskPaused{ + ContainerID: t.id, + }) + return nil +} + +// Resume the task and all processes +func (t *Task) Resume(ctx context.Context) error { + if _, err := t.shim.Resume(ctx, empty); err != nil { + return errdefs.FromGRPC(err) + } + t.events.Publish(ctx, runtime.TaskResumedEventTopic, &eventstypes.TaskResumed{ + ContainerID: t.id, + }) + return nil +} + +// Kill the task using the provided signal +// +// Optionally send the signal to all processes that are a child of the task +func (t *Task) Kill(ctx context.Context, signal uint32, all bool) error { + if _, err := t.shim.Kill(ctx, &shim.KillRequest{ + ID: t.id, + Signal: signal, + All: all, + }); err != nil { + return errdefs.FromGRPC(err) + } + return nil +} + +// Exec creates a new process inside the task +func (t *Task) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.Process, error) { + if err := identifiers.Validate(id); err != nil { + return nil, errors.Wrapf(err, "invalid exec id") + } + request := &shim.ExecProcessRequest{ + ID: id, + Stdin: opts.IO.Stdin, + Stdout: opts.IO.Stdout, + Stderr: opts.IO.Stderr, + Terminal: opts.IO.Terminal, + Spec: opts.Spec, + } + if _, err := t.shim.Exec(ctx, request); err != nil { + return nil, errdefs.FromGRPC(err) + } + return &Process{ + id: id, + t: t, + }, nil +} + +// Pids returns all system level process ids running inside the task +func (t *Task) Pids(ctx context.Context) ([]runtime.ProcessInfo, error) { + resp, err := t.shim.ListPids(ctx, &shim.ListPidsRequest{ + ID: t.id, + }) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + var processList []runtime.ProcessInfo + for _, p := range resp.Processes { + processList = append(processList, runtime.ProcessInfo{ + Pid: p.Pid, + Info: p.Info, + }) + } + return processList, nil +} + +// ResizePty changes the side of the task's PTY to the provided width and height +func (t *Task) ResizePty(ctx context.Context, size runtime.ConsoleSize) error { + _, err := t.shim.ResizePty(ctx, &shim.ResizePtyRequest{ + ID: t.id, + Width: size.Width, + Height: size.Height, + }) + if err != nil { + err = errdefs.FromGRPC(err) + } + return err +} + +// CloseIO closes the provided IO on the task +func (t *Task) CloseIO(ctx context.Context) error { + _, err := t.shim.CloseIO(ctx, &shim.CloseIORequest{ + ID: t.id, + Stdin: true, + }) + if err != nil { + err = errdefs.FromGRPC(err) + } + return err +} + +// Checkpoint creates a system level dump of the task and process information that can be later restored +func (t *Task) Checkpoint(ctx context.Context, path string, options *types.Any) error { + r := &shim.CheckpointTaskRequest{ + Path: path, + Options: options, + } + if _, err := t.shim.Checkpoint(ctx, r); err != nil { + return errdefs.FromGRPC(err) + } + t.events.Publish(ctx, runtime.TaskCheckpointedEventTopic, &eventstypes.TaskCheckpointed{ + ContainerID: t.id, + }) + return nil +} + +// DeleteProcess removes the provided process from the task and deletes all on disk state +func (t *Task) DeleteProcess(ctx context.Context, id string) (*runtime.Exit, error) { + r, err := t.shim.DeleteProcess(ctx, &shim.DeleteProcessRequest{ + ID: id, + }) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + return &runtime.Exit{ + Status: r.ExitStatus, + Timestamp: r.ExitedAt, + Pid: r.Pid, + }, nil +} + +// Update changes runtime information of a running task +func (t *Task) Update(ctx context.Context, resources *types.Any) error { + if _, err := t.shim.Update(ctx, &shim.UpdateTaskRequest{ + Resources: resources, + }); err != nil { + return errdefs.FromGRPC(err) + } + return nil +} + +// Process returns a specific process inside the task by the process id +func (t *Task) Process(ctx context.Context, id string) (runtime.Process, error) { + p := &Process{ + id: id, + t: t, + } + if _, err := p.State(ctx); err != nil { + return nil, err + } + return p, nil +} + +// Metrics returns runtime specific system level metric information for the task +func (t *Task) Metrics(ctx context.Context) (interface{}, error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.cg == nil { + return nil, errors.Wrap(errdefs.ErrNotFound, "cgroup does not exist") + } + stats, err := t.cg.Stat(cgroups.IgnoreNotExist) + if err != nil { + return nil, err + } + return stats, nil +} + +// Cgroup returns the underlying cgroup for a linux task +func (t *Task) Cgroup() (cgroups.Cgroup, error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.cg == nil { + return nil, errors.Wrap(errdefs.ErrNotFound, "cgroup does not exist") + } + return t.cg, nil +} + +// Wait for the task to exit returning the status and timestamp +func (t *Task) Wait(ctx context.Context) (*runtime.Exit, error) { + r, err := t.shim.Wait(ctx, &shim.WaitRequest{ + ID: t.id, + }) + if err != nil { + return nil, err + } + return &runtime.Exit{ + Timestamp: r.ExitedAt, + Status: r.ExitStatus, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 8ec69bc00..f40603b17 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package log import ( diff --git a/vendor/github.com/containerd/containerd/metadata/adaptors.go b/vendor/github.com/containerd/containerd/metadata/adaptors.go new file mode 100644 index 000000000..8145f9ace --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/adaptors.go @@ -0,0 +1,129 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "strings" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/images" +) + +func adaptImage(o interface{}) filters.Adaptor { + obj := o.(images.Image) + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "name": + return obj.Name, len(obj.Name) > 0 + case "target": + if len(fieldpath) < 2 { + return "", false + } + + switch fieldpath[1] { + case "digest": + return obj.Target.Digest.String(), len(obj.Target.Digest) > 0 + case "mediatype": + return obj.Target.MediaType, len(obj.Target.MediaType) > 0 + } + case "labels": + return checkMap(fieldpath[1:], obj.Labels) + // TODO(stevvooe): Greater/Less than filters would be awesome for + // size. Let's do it! + } + + return "", false + }) +} +func adaptContainer(o interface{}) filters.Adaptor { + obj := o.(containers.Container) + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "id": + return obj.ID, len(obj.ID) > 0 + case "runtime": + if len(fieldpath) <= 1 { + return "", false + } + + switch fieldpath[1] { + case "name": + return obj.Runtime.Name, len(obj.Runtime.Name) > 0 + default: + return "", false + } + case "image": + return obj.Image, len(obj.Image) > 0 + case "labels": + return checkMap(fieldpath[1:], obj.Labels) + } + + return "", false + }) +} + +func adaptContentInfo(info content.Info) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "digest": + return info.Digest.String(), true + case "size": + // TODO: support size based filtering + case "labels": + return checkMap(fieldpath[1:], info.Labels) + } + + return "", false + }) +} + +func adaptContentStatus(status content.Status) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "ref": + return status.Ref, true + } + + return "", false + }) +} + +func checkMap(fieldpath []string, m map[string]string) (string, bool) { + if len(m) == 0 { + return "", false + } + + value, ok := m[strings.Join(fieldpath, ".")] + return value, ok +} diff --git a/vendor/github.com/containerd/containerd/metadata/bolt.go b/vendor/github.com/containerd/containerd/metadata/bolt.go new file mode 100644 index 000000000..156a33593 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/bolt.go @@ -0,0 +1,61 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + + "github.com/boltdb/bolt" + "github.com/pkg/errors" +) + +type transactionKey struct{} + +// WithTransactionContext returns a new context holding the provided +// bolt transaction. Functions which require a bolt transaction will +// first check to see if a transaction is already created on the +// context before creating their own. +func WithTransactionContext(ctx context.Context, tx *bolt.Tx) context.Context { + return context.WithValue(ctx, transactionKey{}, tx) +} + +type transactor interface { + View(fn func(*bolt.Tx) error) error + Update(fn func(*bolt.Tx) error) error +} + +// view gets a bolt db transaction either from the context +// or starts a new one with the provided bolt database. +func view(ctx context.Context, db transactor, fn func(*bolt.Tx) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return db.View(fn) + } + return fn(tx) +} + +// update gets a writable bolt db transaction either from the context +// or starts a new one with the provided bolt database. +func update(ctx context.Context, db transactor, fn func(*bolt.Tx) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return db.Update(fn) + } else if !tx.Writable() { + return errors.Wrap(bolt.ErrTxNotWritable, "unable to use transaction from context") + } + return fn(tx) +} diff --git a/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go new file mode 100644 index 000000000..4b2ede876 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go @@ -0,0 +1,127 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package boltutil + +import ( + "time" + + "github.com/boltdb/bolt" + "github.com/pkg/errors" +) + +var ( + bucketKeyLabels = []byte("labels") + bucketKeyCreatedAt = []byte("createdat") + bucketKeyUpdatedAt = []byte("updatedat") +) + +// ReadLabels reads the labels key from the bucket +// Uses the key "labels" +func ReadLabels(bkt *bolt.Bucket) (map[string]string, error) { + lbkt := bkt.Bucket(bucketKeyLabels) + if lbkt == nil { + return nil, nil + } + labels := map[string]string{} + if err := lbkt.ForEach(func(k, v []byte) error { + labels[string(k)] = string(v) + return nil + }); err != nil { + return nil, err + } + return labels, nil +} + +// WriteLabels will write a new labels bucket to the provided bucket at key +// bucketKeyLabels, replacing the contents of the bucket with the provided map. +// +// The provide map labels will be modified to have the final contents of the +// bucket. Typically, this removes zero-value entries. +// Uses the key "labels" +func WriteLabels(bkt *bolt.Bucket, labels map[string]string) error { + // Remove existing labels to keep from merging + if lbkt := bkt.Bucket(bucketKeyLabels); lbkt != nil { + if err := bkt.DeleteBucket(bucketKeyLabels); err != nil { + return err + } + } + + if len(labels) == 0 { + return nil + } + + lbkt, err := bkt.CreateBucket(bucketKeyLabels) + if err != nil { + return err + } + + for k, v := range labels { + if v == "" { + delete(labels, k) // remove since we don't actually set it + continue + } + + if err := lbkt.Put([]byte(k), []byte(v)); err != nil { + return errors.Wrapf(err, "failed to set label %q=%q", k, v) + } + } + + return nil +} + +// ReadTimestamps reads created and updated timestamps from a bucket. +// Uses keys "createdat" and "updatedat" +func ReadTimestamps(bkt *bolt.Bucket, created, updated *time.Time) error { + for _, f := range []struct { + b []byte + t *time.Time + }{ + {bucketKeyCreatedAt, created}, + {bucketKeyUpdatedAt, updated}, + } { + v := bkt.Get(f.b) + if v != nil { + if err := f.t.UnmarshalBinary(v); err != nil { + return err + } + } + } + return nil +} + +// WriteTimestamps writes created and updated timestamps to a bucket. +// Uses keys "createdat" and "updatedat" +func WriteTimestamps(bkt *bolt.Bucket, created, updated time.Time) error { + createdAt, err := created.MarshalBinary() + if err != nil { + return err + } + updatedAt, err := updated.MarshalBinary() + if err != nil { + return err + } + for _, v := range [][2][]byte{ + {bucketKeyCreatedAt, createdAt}, + {bucketKeyUpdatedAt, updatedAt}, + } { + if err := bkt.Put(v[0], v[1]); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/buckets.go b/vendor/github.com/containerd/containerd/metadata/buckets.go new file mode 100644 index 000000000..cade7eea3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/buckets.go @@ -0,0 +1,191 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "github.com/boltdb/bolt" + digest "github.com/opencontainers/go-digest" +) + +// The layout where a "/" delineates a bucket is desribed in the following +// section. Please try to follow this as closely as possible when adding +// functionality. We can bolster this with helpers and more structure if that +// becomes an issue. +// +// Generically, we try to do the following: +// +// /// -> +// +// version: Currently, this is "v1". Additions can be made to v1 in a backwards +// compatible way. If the layout changes, a new version must be made, along +// with a migration. +// +// namespace: the namespace to which this object belongs. +// +// object: defines which object set is stored in the bucket. There are two +// special objects, "labels" and "indexes". The "labels" bucket stores the +// labels for the parent namespace. The "indexes" object is reserved for +// indexing objects, if we require in the future. +// +// key: object-specific key identifying the storage bucket for the objects +// contents. +var ( + bucketKeyVersion = []byte(schemaVersion) + bucketKeyDBVersion = []byte("version") // stores the version of the schema + bucketKeyObjectLabels = []byte("labels") // stores the labels for a namespace. + bucketKeyObjectImages = []byte("images") // stores image objects + bucketKeyObjectContainers = []byte("containers") // stores container objects + bucketKeyObjectSnapshots = []byte("snapshots") // stores snapshot references + bucketKeyObjectContent = []byte("content") // stores content references + bucketKeyObjectBlob = []byte("blob") // stores content links + bucketKeyObjectIngest = []byte("ingest") // stores ingest links + bucketKeyObjectLeases = []byte("leases") // stores leases + + bucketKeyDigest = []byte("digest") + bucketKeyMediaType = []byte("mediatype") + bucketKeySize = []byte("size") + bucketKeyImage = []byte("image") + bucketKeyRuntime = []byte("runtime") + bucketKeyName = []byte("name") + bucketKeyParent = []byte("parent") + bucketKeyChildren = []byte("children") + bucketKeyOptions = []byte("options") + bucketKeySpec = []byte("spec") + bucketKeySnapshotKey = []byte("snapshotKey") + bucketKeySnapshotter = []byte("snapshotter") + bucketKeyTarget = []byte("target") + bucketKeyExtensions = []byte("extensions") + bucketKeyCreatedAt = []byte("createdat") +) + +func getBucket(tx *bolt.Tx, keys ...[]byte) *bolt.Bucket { + bkt := tx.Bucket(keys[0]) + + for _, key := range keys[1:] { + if bkt == nil { + break + } + bkt = bkt.Bucket(key) + } + + return bkt +} + +func createBucketIfNotExists(tx *bolt.Tx, keys ...[]byte) (*bolt.Bucket, error) { + bkt, err := tx.CreateBucketIfNotExists(keys[0]) + if err != nil { + return nil, err + } + + for _, key := range keys[1:] { + bkt, err = bkt.CreateBucketIfNotExists(key) + if err != nil { + return nil, err + } + } + + return bkt, nil +} + +func namespaceLabelsBucketPath(namespace string) [][]byte { + return [][]byte{bucketKeyVersion, []byte(namespace), bucketKeyObjectLabels} +} + +func withNamespacesLabelsBucket(tx *bolt.Tx, namespace string, fn func(bkt *bolt.Bucket) error) error { + bkt, err := createBucketIfNotExists(tx, namespaceLabelsBucketPath(namespace)...) + if err != nil { + return err + } + + return fn(bkt) +} + +func getNamespaceLabelsBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, namespaceLabelsBucketPath(namespace)...) +} + +func imagesBucketPath(namespace string) [][]byte { + return [][]byte{bucketKeyVersion, []byte(namespace), bucketKeyObjectImages} +} + +func createImagesBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) { + return createBucketIfNotExists(tx, imagesBucketPath(namespace)...) +} + +func getImagesBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, imagesBucketPath(namespace)...) +} + +func createContainersBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) { + bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers) + if err != nil { + return nil, err + } + return bkt, nil +} + +func getContainersBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers) +} + +func getContainerBucket(tx *bolt.Tx, namespace, id string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers, []byte(id)) +} + +func createSnapshotterBucket(tx *bolt.Tx, namespace, snapshotter string) (*bolt.Bucket, error) { + bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectSnapshots, []byte(snapshotter)) + if err != nil { + return nil, err + } + return bkt, nil +} + +func getSnapshottersBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectSnapshots) +} + +func getSnapshotterBucket(tx *bolt.Tx, namespace, snapshotter string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectSnapshots, []byte(snapshotter)) +} + +func createBlobBucket(tx *bolt.Tx, namespace string, dgst digest.Digest) (*bolt.Bucket, error) { + bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(dgst.String())) + if err != nil { + return nil, err + } + return bkt, nil +} + +func getBlobsBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob) +} + +func getBlobBucket(tx *bolt.Tx, namespace string, dgst digest.Digest) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(dgst.String())) +} + +func createIngestBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) { + bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngest) + if err != nil { + return nil, err + } + return bkt, nil +} + +func getIngestBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngest) +} diff --git a/vendor/github.com/containerd/containerd/metadata/containers.go b/vendor/github.com/containerd/containerd/metadata/containers.go new file mode 100644 index 000000000..2ba45c72f --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/containers.go @@ -0,0 +1,430 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "strings" + "time" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/identifiers" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/namespaces" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +type containerStore struct { + tx *bolt.Tx +} + +// NewContainerStore returns a Store backed by an underlying bolt DB +func NewContainerStore(tx *bolt.Tx) containers.Store { + return &containerStore{ + tx: tx, + } +} + +func (s *containerStore) Get(ctx context.Context, id string) (containers.Container, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return containers.Container{}, err + } + + bkt := getContainerBucket(s.tx, namespace, id) + if bkt == nil { + return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace) + } + + container := containers.Container{ID: id} + if err := readContainer(&container, bkt); err != nil { + return containers.Container{}, errors.Wrapf(err, "failed to read container %q", id) + } + + return container, nil +} + +func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.Container, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error()) + } + + bkt := getContainersBucket(s.tx, namespace) + if bkt == nil { + return nil, nil // empty store + } + + var m []containers.Container + if err := bkt.ForEach(func(k, v []byte) error { + cbkt := bkt.Bucket(k) + if cbkt == nil { + return nil + } + container := containers.Container{ID: string(k)} + + if err := readContainer(&container, cbkt); err != nil { + return errors.Wrapf(err, "failed to read container %q", string(k)) + } + + if filter.Match(adaptContainer(container)) { + m = append(m, container) + } + return nil + }); err != nil { + return nil, err + } + + return m, nil +} + +func (s *containerStore) Create(ctx context.Context, container containers.Container) (containers.Container, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return containers.Container{}, err + } + + if err := validateContainer(&container); err != nil { + return containers.Container{}, errors.Wrap(err, "create container failed validation") + } + + bkt, err := createContainersBucket(s.tx, namespace) + if err != nil { + return containers.Container{}, err + } + + cbkt, err := bkt.CreateBucket([]byte(container.ID)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errors.Wrapf(errdefs.ErrAlreadyExists, "container %q", container.ID) + } + return containers.Container{}, err + } + + container.CreatedAt = time.Now().UTC() + container.UpdatedAt = container.CreatedAt + if err := writeContainer(cbkt, &container); err != nil { + return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID) + } + + return container, nil +} + +func (s *containerStore) Update(ctx context.Context, container containers.Container, fieldpaths ...string) (containers.Container, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return containers.Container{}, err + } + + if container.ID == "" { + return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "must specify a container id") + } + + bkt := getContainersBucket(s.tx, namespace) + if bkt == nil { + return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace) + } + + cbkt := bkt.Bucket([]byte(container.ID)) + if cbkt == nil { + return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID) + } + + var updated containers.Container + if err := readContainer(&updated, cbkt); err != nil { + return updated, errors.Wrapf(err, "failed to read container %q", container.ID) + } + createdat := updated.CreatedAt + updated.ID = container.ID + + if len(fieldpaths) == 0 { + // only allow updates to these field on full replace. + fieldpaths = []string{"labels", "spec", "extensions"} + + // Fields that are immutable must cause an error when no field paths + // are provided. This allows these fields to become mutable in the + // future. + if updated.Image != container.Image { + return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Image field is immutable") + } + + if updated.SnapshotKey != container.SnapshotKey { + return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.SnapshotKey field is immutable") + } + + if updated.Snapshotter != container.Snapshotter { + return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter field is immutable") + } + + if updated.Runtime.Name != container.Runtime.Name { + return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name field is immutable") + } + } + + // apply the field mask. If you update this code, you better follow the + // field mask rules in field_mask.proto. If you don't know what this + // is, do not update this code. + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = container.Labels[key] + continue + } + + if strings.HasPrefix(path, "extensions.") { + if updated.Extensions == nil { + updated.Extensions = map[string]types.Any{} + } + key := strings.TrimPrefix(path, "extensions.") + updated.Extensions[key] = container.Extensions[key] + continue + } + + switch path { + case "labels": + updated.Labels = container.Labels + case "spec": + updated.Spec = container.Spec + case "extensions": + updated.Extensions = container.Extensions + default: + return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID) + } + } + + if err := validateContainer(&updated); err != nil { + return containers.Container{}, errors.Wrap(err, "update failed validation") + } + + updated.CreatedAt = createdat + updated.UpdatedAt = time.Now().UTC() + if err := writeContainer(cbkt, &updated); err != nil { + return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID) + } + + return updated, nil +} + +func (s *containerStore) Delete(ctx context.Context, id string) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + bkt := getContainersBucket(s.tx, namespace) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace) + } + + if err := bkt.DeleteBucket([]byte(id)); err == bolt.ErrBucketNotFound { + return errors.Wrapf(errdefs.ErrNotFound, "container %v", id) + } + return err +} + +func validateContainer(container *containers.Container) error { + if err := identifiers.Validate(container.ID); err != nil { + return errors.Wrap(err, "container.ID") + } + + for k := range container.Extensions { + if k == "" { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Extension keys must not be zero-length") + } + } + + // image has no validation + for k, v := range container.Labels { + if err := labels.Validate(k, v); err == nil { + return errors.Wrapf(err, "containers.Labels") + } + } + + if container.Runtime.Name == "" { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name must be set") + } + + if container.Spec == nil { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Spec must be set") + } + + if container.SnapshotKey != "" && container.Snapshotter == "" { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set if container.SnapshotKey is set") + } + + return nil +} + +func readContainer(container *containers.Container, bkt *bolt.Bucket) error { + labels, err := boltutil.ReadLabels(bkt) + if err != nil { + return err + } + container.Labels = labels + + if err := boltutil.ReadTimestamps(bkt, &container.CreatedAt, &container.UpdatedAt); err != nil { + return err + } + + return bkt.ForEach(func(k, v []byte) error { + switch string(k) { + case string(bucketKeyImage): + container.Image = string(v) + case string(bucketKeyRuntime): + rbkt := bkt.Bucket(bucketKeyRuntime) + if rbkt == nil { + return nil // skip runtime. should be an error? + } + + n := rbkt.Get(bucketKeyName) + if n != nil { + container.Runtime.Name = string(n) + } + + obkt := rbkt.Get(bucketKeyOptions) + if obkt == nil { + return nil + } + + var any types.Any + if err := proto.Unmarshal(obkt, &any); err != nil { + return err + } + container.Runtime.Options = &any + case string(bucketKeySpec): + var any types.Any + if err := proto.Unmarshal(v, &any); err != nil { + return err + } + container.Spec = &any + case string(bucketKeySnapshotKey): + container.SnapshotKey = string(v) + case string(bucketKeySnapshotter): + container.Snapshotter = string(v) + case string(bucketKeyExtensions): + ebkt := bkt.Bucket(bucketKeyExtensions) + if ebkt == nil { + return nil + } + + extensions := make(map[string]types.Any) + if err := ebkt.ForEach(func(k, v []byte) error { + var a types.Any + if err := proto.Unmarshal(v, &a); err != nil { + return err + } + + extensions[string(k)] = a + return nil + }); err != nil { + + return err + } + + container.Extensions = extensions + } + + return nil + }) +} + +func writeContainer(bkt *bolt.Bucket, container *containers.Container) error { + if err := boltutil.WriteTimestamps(bkt, container.CreatedAt, container.UpdatedAt); err != nil { + return err + } + + if container.Spec != nil { + spec, err := container.Spec.Marshal() + if err != nil { + return err + } + + if err := bkt.Put(bucketKeySpec, spec); err != nil { + return err + } + } + + for _, v := range [][2][]byte{ + {bucketKeyImage, []byte(container.Image)}, + {bucketKeySnapshotter, []byte(container.Snapshotter)}, + {bucketKeySnapshotKey, []byte(container.SnapshotKey)}, + } { + if err := bkt.Put(v[0], v[1]); err != nil { + return err + } + } + + if rbkt := bkt.Bucket(bucketKeyRuntime); rbkt != nil { + if err := bkt.DeleteBucket(bucketKeyRuntime); err != nil { + return err + } + } + + rbkt, err := bkt.CreateBucket(bucketKeyRuntime) + if err != nil { + return err + } + + if err := rbkt.Put(bucketKeyName, []byte(container.Runtime.Name)); err != nil { + return err + } + + if len(container.Extensions) > 0 { + ebkt, err := bkt.CreateBucketIfNotExists(bucketKeyExtensions) + if err != nil { + return err + } + + for name, ext := range container.Extensions { + p, err := proto.Marshal(&ext) + if err != nil { + return err + } + + if err := ebkt.Put([]byte(name), p); err != nil { + return err + } + } + } + + if container.Runtime.Options != nil { + data, err := proto.Marshal(container.Runtime.Options) + if err != nil { + return err + } + + if err := rbkt.Put(bucketKeyOptions, data); err != nil { + return err + } + } + + return boltutil.WriteLabels(bkt, container.Labels) +} diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go new file mode 100644 index 000000000..3a746c016 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/content.go @@ -0,0 +1,618 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "encoding/binary" + "strings" + "sync" + "time" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/namespaces" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type contentStore struct { + content.Store + db *DB + l sync.RWMutex +} + +// newContentStore returns a namespaced content store using an existing +// content store interface. +func newContentStore(db *DB, cs content.Store) *contentStore { + return &contentStore{ + Store: cs, + db: db, + } +} + +func (cs *contentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return content.Info{}, err + } + + var info content.Info + if err := view(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getBlobBucket(tx, ns, dgst) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst) + } + + info.Digest = dgst + return readInfo(&info, bkt) + }); err != nil { + return content.Info{}, err + } + + return info, nil +} + +func (cs *contentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return content.Info{}, err + } + + cs.l.RLock() + defer cs.l.RUnlock() + + updated := content.Info{ + Digest: info.Digest, + } + if err := update(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getBlobBucket(tx, ns, info.Digest) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", info.Digest) + } + + if err := readInfo(&updated, bkt); err != nil { + return errors.Wrapf(err, "info %q", info.Digest) + } + + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + updated.Labels = info.Labels + default: + return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest) + } + } + } else { + // Set mutable fields + updated.Labels = info.Labels + } + if err := validateInfo(&updated); err != nil { + return err + } + + updated.UpdatedAt = time.Now().UTC() + return writeInfo(&updated, bkt) + }); err != nil { + return content.Info{}, err + } + return updated, nil +} + +func (cs *contentStore) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return err + } + + // TODO: Batch results to keep from reading all info into memory + var infos []content.Info + if err := view(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getBlobsBucket(tx, ns) + if bkt == nil { + return nil + } + + return bkt.ForEach(func(k, v []byte) error { + dgst, err := digest.Parse(string(k)) + if err != nil { + // Not a digest, skip + return nil + } + bbkt := bkt.Bucket(k) + if bbkt == nil { + return nil + } + info := content.Info{ + Digest: dgst, + } + if err := readInfo(&info, bkt.Bucket(k)); err != nil { + return err + } + if filter.Match(adaptContentInfo(info)) { + infos = append(infos, info) + } + return nil + }) + }); err != nil { + return err + } + + for _, info := range infos { + if err := fn(info); err != nil { + return err + } + } + + return nil +} + +func (cs *contentStore) Delete(ctx context.Context, dgst digest.Digest) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + cs.l.RLock() + defer cs.l.RUnlock() + + return update(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getBlobBucket(tx, ns, dgst) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst) + } + + if err := getBlobsBucket(tx, ns).DeleteBucket([]byte(dgst.String())); err != nil { + return err + } + if err := removeContentLease(ctx, tx, dgst); err != nil { + return err + } + + // Mark content store as dirty for triggering garbage collection + cs.db.dirtyL.Lock() + cs.db.dirtyCS = true + cs.db.dirtyL.Unlock() + + return nil + }) +} + +func (cs *contentStore) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, err + } + + brefs := map[string]string{} + if err := view(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getIngestBucket(tx, ns) + if bkt == nil { + return nil + } + + return bkt.ForEach(func(k, v []byte) error { + // TODO(dmcgowan): match name and potentially labels here + brefs[string(k)] = string(v) + return nil + }) + }); err != nil { + return nil, err + } + + statuses := make([]content.Status, 0, len(brefs)) + for k, bref := range brefs { + status, err := cs.Store.Status(ctx, bref) + if err != nil { + if errdefs.IsNotFound(err) { + continue + } + return nil, err + } + status.Ref = k + + if filter.Match(adaptContentStatus(status)) { + statuses = append(statuses, status) + } + } + + return statuses, nil + +} + +func getRef(tx *bolt.Tx, ns, ref string) string { + bkt := getIngestBucket(tx, ns) + if bkt == nil { + return "" + } + v := bkt.Get([]byte(ref)) + if len(v) == 0 { + return "" + } + return string(v) +} + +func (cs *contentStore) Status(ctx context.Context, ref string) (content.Status, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return content.Status{}, err + } + + var bref string + if err := view(ctx, cs.db, func(tx *bolt.Tx) error { + bref = getRef(tx, ns, ref) + if bref == "" { + return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref) + } + + return nil + }); err != nil { + return content.Status{}, err + } + + st, err := cs.Store.Status(ctx, bref) + if err != nil { + return content.Status{}, err + } + st.Ref = ref + return st, nil +} + +func (cs *contentStore) Abort(ctx context.Context, ref string) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + cs.l.RLock() + defer cs.l.RUnlock() + + return update(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getIngestBucket(tx, ns) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref) + } + bref := string(bkt.Get([]byte(ref))) + if bref == "" { + return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref) + } + if err := bkt.Delete([]byte(ref)); err != nil { + return err + } + + return cs.Store.Abort(ctx, bref) + }) + +} + +func (cs *contentStore) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + cs.l.RLock() + defer cs.l.RUnlock() + + var ( + w content.Writer + exists bool + ) + if err := update(ctx, cs.db, func(tx *bolt.Tx) error { + if expected != "" { + cbkt := getBlobBucket(tx, ns, expected) + if cbkt != nil { + // Add content to lease to prevent other reference removals + // from effecting this object during a provided lease + if err := addContentLease(ctx, tx, expected); err != nil { + return errors.Wrap(err, "unable to lease content") + } + // Return error outside of transaction to ensure + // commit succeeds with the lease. + exists = true + return nil + } + } + + bkt, err := createIngestBucket(tx, ns) + if err != nil { + return err + } + + var ( + bref string + brefb = bkt.Get([]byte(ref)) + ) + + if brefb == nil { + sid, err := bkt.NextSequence() + if err != nil { + return err + } + + bref = createKey(sid, ns, ref) + if err := bkt.Put([]byte(ref), []byte(bref)); err != nil { + return err + } + } else { + bref = string(brefb) + } + + // Do not use the passed in expected value here since it was + // already checked against the user metadata. If the content + // store has the content, it must still be written before + // linked into the given namespace. It is possible in the future + // to allow content which exists in content store but not + // namespace to be linked here and returned an exist error, but + // this would require more configuration to make secure. + w, err = cs.Store.Writer(ctx, bref, size, "") + return err + }); err != nil { + return nil, err + } + if exists { + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected) + } + + // TODO: keep the expected in the writer to use on commit + // when no expected is provided there. + return &namespacedWriter{ + Writer: w, + ref: ref, + namespace: ns, + db: cs.db, + l: &cs.l, + }, nil +} + +type namespacedWriter struct { + content.Writer + ref string + namespace string + db transactor + l *sync.RWMutex +} + +func (nw *namespacedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + nw.l.RLock() + defer nw.l.RUnlock() + + return update(ctx, nw.db, func(tx *bolt.Tx) error { + bkt := getIngestBucket(tx, nw.namespace) + if bkt != nil { + if err := bkt.Delete([]byte(nw.ref)); err != nil { + return err + } + } + dgst, err := nw.commit(ctx, tx, size, expected, opts...) + if err != nil { + return err + } + return addContentLease(ctx, tx, dgst) + }) +} + +func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, expected digest.Digest, opts ...content.Opt) (digest.Digest, error) { + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return "", err + } + } + if err := validateInfo(&base); err != nil { + return "", err + } + + status, err := nw.Writer.Status() + if err != nil { + return "", err + } + if size != 0 && size != status.Offset { + return "", errors.Errorf("%q failed size validation: %v != %v", nw.ref, status.Offset, size) + } + size = status.Offset + + actual := nw.Writer.Digest() + + if err := nw.Writer.Commit(ctx, size, expected); err != nil { + if !errdefs.IsAlreadyExists(err) { + return "", err + } + if getBlobBucket(tx, nw.namespace, actual) != nil { + return "", errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", actual) + } + } + + bkt, err := createBlobBucket(tx, nw.namespace, actual) + if err != nil { + return "", err + } + + commitTime := time.Now().UTC() + + sizeEncoded, err := encodeInt(size) + if err != nil { + return "", err + } + + if err := boltutil.WriteTimestamps(bkt, commitTime, commitTime); err != nil { + return "", err + } + if err := boltutil.WriteLabels(bkt, base.Labels); err != nil { + return "", err + } + return actual, bkt.Put(bucketKeySize, sizeEncoded) +} + +func (nw *namespacedWriter) Status() (content.Status, error) { + st, err := nw.Writer.Status() + if err == nil { + st.Ref = nw.ref + } + return st, err +} + +func (cs *contentStore) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) { + if err := cs.checkAccess(ctx, dgst); err != nil { + return nil, err + } + return cs.Store.ReaderAt(ctx, dgst) +} + +func (cs *contentStore) checkAccess(ctx context.Context, dgst digest.Digest) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return view(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getBlobBucket(tx, ns, dgst) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst) + } + return nil + }) +} + +func validateInfo(info *content.Info) error { + for k, v := range info.Labels { + if err := labels.Validate(k, v); err == nil { + return errors.Wrapf(err, "info.Labels") + } + } + + return nil +} + +func readInfo(info *content.Info, bkt *bolt.Bucket) error { + if err := boltutil.ReadTimestamps(bkt, &info.CreatedAt, &info.UpdatedAt); err != nil { + return err + } + + labels, err := boltutil.ReadLabels(bkt) + if err != nil { + return err + } + info.Labels = labels + + if v := bkt.Get(bucketKeySize); len(v) > 0 { + info.Size, _ = binary.Varint(v) + } + + return nil +} + +func writeInfo(info *content.Info, bkt *bolt.Bucket) error { + if err := boltutil.WriteTimestamps(bkt, info.CreatedAt, info.UpdatedAt); err != nil { + return err + } + + if err := boltutil.WriteLabels(bkt, info.Labels); err != nil { + return errors.Wrapf(err, "writing labels for info %v", info.Digest) + } + + // Write size + sizeEncoded, err := encodeInt(info.Size) + if err != nil { + return err + } + + return bkt.Put(bucketKeySize, sizeEncoded) +} + +func (cs *contentStore) garbageCollect(ctx context.Context) (d time.Duration, err error) { + cs.l.Lock() + t1 := time.Now() + defer func() { + if err == nil { + d = time.Now().Sub(t1) + } + cs.l.Unlock() + }() + + seen := map[string]struct{}{} + if err := cs.db.View(func(tx *bolt.Tx) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + // iterate through each namespace + v1c := v1bkt.Cursor() + + for k, v := v1c.First(); k != nil; k, v = v1c.Next() { + if v != nil { + continue + } + + cbkt := v1bkt.Bucket(k).Bucket(bucketKeyObjectContent) + if cbkt == nil { + continue + } + bbkt := cbkt.Bucket(bucketKeyObjectBlob) + if err := bbkt.ForEach(func(ck, cv []byte) error { + if cv == nil { + seen[string(ck)] = struct{}{} + } + return nil + }); err != nil { + return err + } + } + + return nil + }); err != nil { + return 0, err + } + + err = cs.Store.Walk(ctx, func(info content.Info) error { + if _, ok := seen[info.Digest.String()]; !ok { + if err := cs.Store.Delete(ctx, info.Digest); err != nil { + return err + } + log.G(ctx).WithField("digest", info.Digest).Debug("removed content") + } + return nil + }) + return +} diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go new file mode 100644 index 000000000..f08e1d46c --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/db.go @@ -0,0 +1,410 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "encoding/binary" + "strings" + "sync" + "time" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/snapshots" + "github.com/pkg/errors" +) + +const ( + // schemaVersion represents the schema version of + // the database. This schema version represents the + // structure of the data in the database. The schema + // can envolve at any time but any backwards + // incompatible changes or structural changes require + // bumping the schema version. + schemaVersion = "v1" + + // dbVersion represents updates to the schema + // version which are additions and compatible with + // prior version of the same schema. + dbVersion = 1 +) + +// DB represents a metadata database backed by a bolt +// database. The database is fully namespaced and stores +// image, container, namespace, snapshot, and content data +// while proxying data shared across namespaces to backend +// datastores for content and snapshots. +type DB struct { + db *bolt.DB + ss map[string]*snapshotter + cs *contentStore + + // wlock is used to protect access to the data structures during garbage + // collection. While the wlock is held no writable transactions can be + // opened, preventing changes from occurring between the mark and + // sweep phases without preventing read transactions. + wlock sync.RWMutex + + // dirty flags and lock keeps track of datastores which have had deletions + // since the last garbage collection. These datastores will will be garbage + // collected during the next garbage collection. + dirtyL sync.Mutex + dirtySS map[string]struct{} + dirtyCS bool + + // mutationCallbacks are called after each mutation with the flag + // set indicating whether any dirty flags are set + mutationCallbacks []func(bool) +} + +// NewDB creates a new metadata database using the provided +// bolt database, content store, and snapshotters. +func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshots.Snapshotter) *DB { + m := &DB{ + db: db, + ss: make(map[string]*snapshotter, len(ss)), + dirtySS: map[string]struct{}{}, + } + + // Initialize data stores + m.cs = newContentStore(m, cs) + for name, sn := range ss { + m.ss[name] = newSnapshotter(m, name, sn) + } + + return m +} + +// Init ensures the database is at the correct version +// and performs any needed migrations. +func (m *DB) Init(ctx context.Context) error { + // errSkip is used when no migration or version needs to be written + // to the database and the transaction can be immediately rolled + // back rather than performing a much slower and unnecessary commit. + var errSkip = errors.New("skip update") + + err := m.db.Update(func(tx *bolt.Tx) error { + var ( + // current schema and version + schema = "v0" + version = 0 + ) + + // i represents the index of the first migration + // which must be run to get the database up to date. + // The migration's version will be checked in reverse + // order, decrementing i for each migration which + // represents a version newer than the current + // database version + i := len(migrations) + + for ; i > 0; i-- { + migration := migrations[i-1] + + bkt := tx.Bucket([]byte(migration.schema)) + if bkt == nil { + // Hasn't encountered another schema, go to next migration + if schema == "v0" { + continue + } + break + } + if schema == "v0" { + schema = migration.schema + vb := bkt.Get(bucketKeyDBVersion) + if vb != nil { + v, _ := binary.Varint(vb) + version = int(v) + } + } + + if version >= migration.version { + break + } + } + + // Previous version fo database found + if schema != "v0" { + updates := migrations[i:] + + // No migration updates, return immediately + if len(updates) == 0 { + return errSkip + } + + for _, m := range updates { + t0 := time.Now() + if err := m.migrate(tx); err != nil { + return errors.Wrapf(err, "failed to migrate to %s.%d", m.schema, m.version) + } + log.G(ctx).WithField("d", time.Now().Sub(t0)).Debugf("finished database migration to %s.%d", m.schema, m.version) + } + } + + bkt, err := tx.CreateBucketIfNotExists(bucketKeyVersion) + if err != nil { + return err + } + + versionEncoded, err := encodeInt(dbVersion) + if err != nil { + return err + } + + return bkt.Put(bucketKeyDBVersion, versionEncoded) + }) + if err == errSkip { + err = nil + } + return err +} + +// ContentStore returns a namespaced content store +// proxied to a content store. +func (m *DB) ContentStore() content.Store { + if m.cs == nil { + return nil + } + return m.cs +} + +// Snapshotter returns a namespaced content store for +// the requested snapshotter name proxied to a snapshotter. +func (m *DB) Snapshotter(name string) snapshots.Snapshotter { + sn, ok := m.ss[name] + if !ok { + return nil + } + return sn +} + +// View runs a readonly transaction on the metadata store. +func (m *DB) View(fn func(*bolt.Tx) error) error { + return m.db.View(fn) +} + +// Update runs a writable transaction on the metadata store. +func (m *DB) Update(fn func(*bolt.Tx) error) error { + m.wlock.RLock() + defer m.wlock.RUnlock() + err := m.db.Update(fn) + if err == nil { + m.dirtyL.Lock() + dirty := m.dirtyCS || len(m.dirtySS) > 0 + for _, fn := range m.mutationCallbacks { + fn(dirty) + } + m.dirtyL.Unlock() + } + + return err +} + +// RegisterMutationCallback registers a function to be called after a metadata +// mutations has been performed. +// +// The callback function is an argument for whether a deletion has occurred +// since the last garbage collection. +func (m *DB) RegisterMutationCallback(fn func(bool)) { + m.dirtyL.Lock() + m.mutationCallbacks = append(m.mutationCallbacks, fn) + m.dirtyL.Unlock() +} + +// GCStats holds the duration for the different phases of the garbage collector +type GCStats struct { + MetaD time.Duration + ContentD time.Duration + SnapshotD map[string]time.Duration +} + +// Elapsed returns the duration which elapsed during a collection +func (s GCStats) Elapsed() time.Duration { + return s.MetaD +} + +// GarbageCollect starts garbage collection +func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { + m.wlock.Lock() + t1 := time.Now() + + marked, err := m.getMarked(ctx) + if err != nil { + m.wlock.Unlock() + return nil, err + } + + m.dirtyL.Lock() + + if err := m.db.Update(func(tx *bolt.Tx) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + rm := func(ctx context.Context, n gc.Node) error { + if _, ok := marked[n]; ok { + return nil + } + + if n.Type == ResourceSnapshot { + if idx := strings.IndexRune(n.Key, '/'); idx > 0 { + m.dirtySS[n.Key[:idx]] = struct{}{} + } + } else if n.Type == ResourceContent { + m.dirtyCS = true + } + return remove(ctx, tx, n) + } + + if err := scanAll(ctx, tx, rm); err != nil { + return errors.Wrap(err, "failed to scan and remove") + } + + return nil + }); err != nil { + m.dirtyL.Unlock() + m.wlock.Unlock() + return nil, err + } + + var stats GCStats + var wg sync.WaitGroup + + if len(m.dirtySS) > 0 { + var sl sync.Mutex + stats.SnapshotD = map[string]time.Duration{} + wg.Add(len(m.dirtySS)) + for snapshotterName := range m.dirtySS { + log.G(ctx).WithField("snapshotter", snapshotterName).Debug("schedule snapshotter cleanup") + go func(snapshotterName string) { + st1 := time.Now() + m.cleanupSnapshotter(snapshotterName) + + sl.Lock() + stats.SnapshotD[snapshotterName] = time.Now().Sub(st1) + sl.Unlock() + + wg.Done() + }(snapshotterName) + } + m.dirtySS = map[string]struct{}{} + } + + if m.dirtyCS { + wg.Add(1) + log.G(ctx).Debug("schedule content cleanup") + go func() { + ct1 := time.Now() + m.cleanupContent() + stats.ContentD = time.Now().Sub(ct1) + wg.Done() + }() + m.dirtyCS = false + } + + m.dirtyL.Unlock() + + stats.MetaD = time.Now().Sub(t1) + m.wlock.Unlock() + + wg.Wait() + + return stats, err +} + +func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { + var marked map[gc.Node]struct{} + if err := m.db.View(func(tx *bolt.Tx) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var ( + nodes []gc.Node + wg sync.WaitGroup + roots = make(chan gc.Node) + ) + wg.Add(1) + go func() { + defer wg.Done() + for n := range roots { + nodes = append(nodes, n) + } + }() + // Call roots + if err := scanRoots(ctx, tx, roots); err != nil { + cancel() + return err + } + close(roots) + wg.Wait() + + refs := func(n gc.Node) ([]gc.Node, error) { + var sn []gc.Node + if err := references(ctx, tx, n, func(nn gc.Node) { + sn = append(sn, nn) + }); err != nil { + return nil, err + } + return sn, nil + } + + reachable, err := gc.Tricolor(nodes, refs) + if err != nil { + return err + } + marked = reachable + return nil + }); err != nil { + return nil, err + } + return marked, nil +} + +func (m *DB) cleanupSnapshotter(name string) (time.Duration, error) { + ctx := context.Background() + sn, ok := m.ss[name] + if !ok { + return 0, nil + } + + d, err := sn.garbageCollect(ctx) + logger := log.G(ctx).WithField("snapshotter", name) + if err != nil { + logger.WithError(err).Warn("snapshot garbage collection failed") + } else { + logger.WithField("d", d).Debugf("snapshot garbage collected") + } + return d, err +} + +func (m *DB) cleanupContent() (time.Duration, error) { + ctx := context.Background() + if m.cs == nil { + return 0, nil + } + + d, err := m.cs.garbageCollect(ctx) + if err != nil { + log.G(ctx).WithError(err).Warn("content garbage collection failed") + } else { + log.G(ctx).WithField("d", d).Debugf("content garbage collected") + } + + return d, err +} diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go new file mode 100644 index 000000000..d13047eb1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/gc.go @@ -0,0 +1,405 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "fmt" + "strings" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +const ( + // ResourceUnknown specifies an unknown resource + ResourceUnknown gc.ResourceType = iota + // ResourceContent specifies a content resource + ResourceContent + // ResourceSnapshot specifies a snapshot resource + ResourceSnapshot + // ResourceContainer specifies a container resource + ResourceContainer + // ResourceTask specifies a task resource + ResourceTask +) + +var ( + labelGCRoot = []byte("containerd.io/gc.root") + labelGCSnapRef = []byte("containerd.io/gc.ref.snapshot.") + labelGCContentRef = []byte("containerd.io/gc.ref.content") +) + +func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + // iterate through each namespace + v1c := v1bkt.Cursor() + + for k, v := v1c.First(); k != nil; k, v = v1c.Next() { + if v != nil { + continue + } + nbkt := v1bkt.Bucket(k) + ns := string(k) + + lbkt := nbkt.Bucket(bucketKeyObjectLeases) + if lbkt != nil { + if err := lbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + libkt := lbkt.Bucket(k) + + cbkt := libkt.Bucket(bucketKeyObjectContent) + if cbkt != nil { + if err := cbkt.ForEach(func(k, v []byte) error { + select { + case nc <- gcnode(ResourceContent, ns, string(k)): + case <-ctx.Done(): + return ctx.Err() + } + return nil + }); err != nil { + return err + } + } + + sbkt := libkt.Bucket(bucketKeyObjectSnapshots) + if sbkt != nil { + if err := sbkt.ForEach(func(sk, sv []byte) error { + if sv != nil { + return nil + } + snbkt := sbkt.Bucket(sk) + + return snbkt.ForEach(func(k, v []byte) error { + select { + case nc <- gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k)): + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + }); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + } + + ibkt := nbkt.Bucket(bucketKeyObjectImages) + if ibkt != nil { + if err := ibkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + + target := ibkt.Bucket(k).Bucket(bucketKeyTarget) + if target != nil { + contentKey := string(target.Get(bucketKeyDigest)) + select { + case nc <- gcnode(ResourceContent, ns, contentKey): + case <-ctx.Done(): + return ctx.Err() + } + } + return sendSnapshotRefs(ns, ibkt.Bucket(k), func(n gc.Node) { + select { + case nc <- n: + case <-ctx.Done(): + } + }) + }); err != nil { + return err + } + } + + cbkt := nbkt.Bucket(bucketKeyObjectContent) + if cbkt != nil { + cbkt = cbkt.Bucket(bucketKeyObjectBlob) + } + if cbkt != nil { + if err := cbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + return sendRootRef(ctx, nc, gcnode(ResourceContent, ns, string(k)), cbkt.Bucket(k)) + }); err != nil { + return err + } + } + + cbkt = nbkt.Bucket(bucketKeyObjectContainers) + if cbkt != nil { + if err := cbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + snapshotter := string(cbkt.Bucket(k).Get(bucketKeySnapshotter)) + if snapshotter != "" { + ss := string(cbkt.Bucket(k).Get(bucketKeySnapshotKey)) + select { + case nc <- gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, ss)): + case <-ctx.Done(): + return ctx.Err() + } + } + + // TODO: Send additional snapshot refs through labels + return sendSnapshotRefs(ns, cbkt.Bucket(k), func(n gc.Node) { + select { + case nc <- n: + case <-ctx.Done(): + } + }) + }); err != nil { + return err + } + } + + sbkt := nbkt.Bucket(bucketKeyObjectSnapshots) + if sbkt != nil { + if err := sbkt.ForEach(func(sk, sv []byte) error { + if sv != nil { + return nil + } + snbkt := sbkt.Bucket(sk) + + return snbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + + return sendRootRef(ctx, nc, gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k)), snbkt.Bucket(k)) + }) + }); err != nil { + return err + } + } + } + return nil +} + +func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error { + if node.Type == ResourceContent { + bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(node.Key)) + if bkt == nil { + // Node may be created from dead edge + return nil + } + + if err := sendSnapshotRefs(node.Namespace, bkt, fn); err != nil { + return err + } + return sendContentRefs(node.Namespace, bkt, fn) + } else if node.Type == ResourceSnapshot { + parts := strings.SplitN(node.Key, "/", 2) + if len(parts) != 2 { + return errors.Errorf("invalid snapshot gc key %s", node.Key) + } + ss := parts[0] + name := parts[1] + + bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectSnapshots, []byte(ss), []byte(name)) + if bkt == nil { + getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectSnapshots).ForEach(func(k, v []byte) error { + return nil + }) + + // Node may be created from dead edge + return nil + } + + if pv := bkt.Get(bucketKeyParent); len(pv) > 0 { + fn(gcnode(ResourceSnapshot, node.Namespace, fmt.Sprintf("%s/%s", ss, pv))) + } + + return sendSnapshotRefs(node.Namespace, bkt, fn) + } + + return nil +} + +func scanAll(ctx context.Context, tx *bolt.Tx, fn func(ctx context.Context, n gc.Node) error) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + // iterate through each namespace + v1c := v1bkt.Cursor() + + for k, v := v1c.First(); k != nil; k, v = v1c.Next() { + if v != nil { + continue + } + nbkt := v1bkt.Bucket(k) + ns := string(k) + + sbkt := nbkt.Bucket(bucketKeyObjectSnapshots) + if sbkt != nil { + if err := sbkt.ForEach(func(sk, sv []byte) error { + if sv != nil { + return nil + } + snbkt := sbkt.Bucket(sk) + return snbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + node := gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k)) + return fn(ctx, node) + }) + }); err != nil { + return err + } + } + + cbkt := nbkt.Bucket(bucketKeyObjectContent) + if cbkt != nil { + cbkt = cbkt.Bucket(bucketKeyObjectBlob) + } + if cbkt != nil { + if err := cbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + node := gcnode(ResourceContent, ns, string(k)) + return fn(ctx, node) + }); err != nil { + return err + } + } + } + + return nil +} + +func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + nsbkt := v1bkt.Bucket([]byte(node.Namespace)) + if nsbkt == nil { + return nil + } + + switch node.Type { + case ResourceContent: + cbkt := nsbkt.Bucket(bucketKeyObjectContent) + if cbkt != nil { + cbkt = cbkt.Bucket(bucketKeyObjectBlob) + } + if cbkt != nil { + log.G(ctx).WithField("key", node.Key).Debug("remove content") + return cbkt.DeleteBucket([]byte(node.Key)) + } + case ResourceSnapshot: + sbkt := nsbkt.Bucket(bucketKeyObjectSnapshots) + if sbkt != nil { + parts := strings.SplitN(node.Key, "/", 2) + if len(parts) != 2 { + return errors.Errorf("invalid snapshot gc key %s", node.Key) + } + ssbkt := sbkt.Bucket([]byte(parts[0])) + if ssbkt != nil { + log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("remove snapshot") + return ssbkt.DeleteBucket([]byte(parts[1])) + } + } + } + + return nil +} + +// sendSnapshotRefs sends all snapshot references referred to by the labels in the bkt +func sendSnapshotRefs(ns string, bkt *bolt.Bucket, fn func(gc.Node)) error { + lbkt := bkt.Bucket(bucketKeyObjectLabels) + if lbkt != nil { + lc := lbkt.Cursor() + + for k, v := lc.Seek(labelGCSnapRef); k != nil && strings.HasPrefix(string(k), string(labelGCSnapRef)); k, v = lc.Next() { + snapshotter := string(k[len(labelGCSnapRef):]) + fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, v))) + } + } + return nil +} + +// sendContentRefs sends all content references referred to by the labels in the bkt +func sendContentRefs(ns string, bkt *bolt.Bucket, fn func(gc.Node)) error { + lbkt := bkt.Bucket(bucketKeyObjectLabels) + if lbkt != nil { + lc := lbkt.Cursor() + + labelRef := string(labelGCContentRef) + for k, v := lc.Seek(labelGCContentRef); k != nil && strings.HasPrefix(string(k), labelRef); k, v = lc.Next() { + if ks := string(k); ks != labelRef { + // Allow reference naming, ignore names + if ks[len(labelRef)] != '.' { + continue + } + } + + fn(gcnode(ResourceContent, ns, string(v))) + } + } + return nil +} + +func isRootRef(bkt *bolt.Bucket) bool { + lbkt := bkt.Bucket(bucketKeyObjectLabels) + if lbkt != nil { + rv := lbkt.Get(labelGCRoot) + if rv != nil { + // TODO: interpret rv as a timestamp and skip if expired + return true + } + } + return false +} + +func sendRootRef(ctx context.Context, nc chan<- gc.Node, n gc.Node, bkt *bolt.Bucket) error { + if isRootRef(bkt) { + select { + case nc <- n: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil +} + +func gcnode(t gc.ResourceType, ns, key string) gc.Node { + return gc.Node{ + Type: t, + Namespace: ns, + Key: key, + } +} diff --git a/vendor/github.com/containerd/containerd/metadata/images.go b/vendor/github.com/containerd/containerd/metadata/images.go new file mode 100644 index 000000000..62b1179cd --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/images.go @@ -0,0 +1,369 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "encoding/binary" + "fmt" + "strings" + "time" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/namespaces" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type imageStore struct { + db *DB +} + +// NewImageStore returns a store backed by a bolt DB +func NewImageStore(db *DB) images.Store { + return &imageStore{db: db} +} + +func (s *imageStore) Get(ctx context.Context, name string) (images.Image, error) { + var image images.Image + + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return images.Image{}, err + } + + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getImagesBucket(tx, namespace) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + } + + ibkt := bkt.Bucket([]byte(name)) + if ibkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + } + + image.Name = name + if err := readImage(&image, ibkt); err != nil { + return errors.Wrapf(err, "image %q", name) + } + + return nil + }); err != nil { + return images.Image{}, err + } + + return image, nil +} + +func (s *imageStore) List(ctx context.Context, fs ...string) ([]images.Image, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error()) + } + + var m []images.Image + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getImagesBucket(tx, namespace) + if bkt == nil { + return nil // empty store + } + + return bkt.ForEach(func(k, v []byte) error { + var ( + image = images.Image{ + Name: string(k), + } + kbkt = bkt.Bucket(k) + ) + + if err := readImage(&image, kbkt); err != nil { + return err + } + + if filter.Match(adaptImage(image)) { + m = append(m, image) + } + return nil + }) + }); err != nil { + return nil, err + } + + return m, nil +} + +func (s *imageStore) Create(ctx context.Context, image images.Image) (images.Image, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return images.Image{}, err + } + + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + if err := validateImage(&image); err != nil { + return err + } + + bkt, err := createImagesBucket(tx, namespace) + if err != nil { + return err + } + + ibkt, err := bkt.CreateBucket([]byte(image.Name)) + if err != nil { + if err != bolt.ErrBucketExists { + return err + } + + return errors.Wrapf(errdefs.ErrAlreadyExists, "image %q", image.Name) + } + + image.CreatedAt = time.Now().UTC() + image.UpdatedAt = image.CreatedAt + return writeImage(ibkt, &image) + }); err != nil { + return images.Image{}, err + } + + return image, nil +} + +func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return images.Image{}, err + } + + if image.Name == "" { + return images.Image{}, errors.Wrapf(errdefs.ErrInvalidArgument, "image name is required for update") + } + + var updated images.Image + + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt, err := createImagesBucket(tx, namespace) + if err != nil { + return err + } + + ibkt := bkt.Bucket([]byte(image.Name)) + if ibkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "image %q", image.Name) + } + + if err := readImage(&updated, ibkt); err != nil { + return errors.Wrapf(err, "image %q", image.Name) + } + createdat := updated.CreatedAt + updated.Name = image.Name + + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = image.Labels[key] + continue + } + + switch path { + case "labels": + updated.Labels = image.Labels + case "target": + // NOTE(stevvooe): While we allow setting individual labels, we + // only support replacing the target as a unit, since that is + // commonly pulled as a unit from other sources. It often doesn't + // make sense to modify the size or digest without touching the + // mediatype, as well, for example. + updated.Target = image.Target + default: + return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on image %q", path, image.Name) + } + } + } else { + updated = image + } + + if err := validateImage(&updated); err != nil { + return err + } + + updated.CreatedAt = createdat + updated.UpdatedAt = time.Now().UTC() + return writeImage(ibkt, &updated) + }); err != nil { + return images.Image{}, err + } + + return updated, nil + +} + +func (s *imageStore) Delete(ctx context.Context, name string, opts ...images.DeleteOpt) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return update(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getImagesBucket(tx, namespace) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + } + + err = bkt.DeleteBucket([]byte(name)) + if err == bolt.ErrBucketNotFound { + return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + } + + // A reference to a piece of content has been removed, + // mark content store as dirty for triggering garbage + // collection + s.db.dirtyL.Lock() + s.db.dirtyCS = true + s.db.dirtyL.Unlock() + + return err + }) +} + +func validateImage(image *images.Image) error { + if image.Name == "" { + return errors.Wrapf(errdefs.ErrInvalidArgument, "image name must not be empty") + } + + for k, v := range image.Labels { + if err := labels.Validate(k, v); err != nil { + return errors.Wrapf(err, "image.Labels") + } + } + + return validateTarget(&image.Target) +} + +func validateTarget(target *ocispec.Descriptor) error { + // NOTE(stevvooe): Only validate fields we actually store. + + if err := target.Digest.Validate(); err != nil { + return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.Digest %q invalid: %v", target.Digest, err) + } + + if target.Size <= 0 { + return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.Size must be greater than zero") + } + + if target.MediaType == "" { + return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.MediaType must be set") + } + + return nil +} + +func readImage(image *images.Image, bkt *bolt.Bucket) error { + if err := boltutil.ReadTimestamps(bkt, &image.CreatedAt, &image.UpdatedAt); err != nil { + return err + } + + labels, err := boltutil.ReadLabels(bkt) + if err != nil { + return err + } + image.Labels = labels + + tbkt := bkt.Bucket(bucketKeyTarget) + if tbkt == nil { + return errors.New("unable to read target bucket") + } + return tbkt.ForEach(func(k, v []byte) error { + if v == nil { + return nil // skip it? a bkt maybe? + } + + // TODO(stevvooe): This is why we need to use byte values for + // keys, rather than full arrays. + switch string(k) { + case string(bucketKeyDigest): + image.Target.Digest = digest.Digest(v) + case string(bucketKeyMediaType): + image.Target.MediaType = string(v) + case string(bucketKeySize): + image.Target.Size, _ = binary.Varint(v) + } + + return nil + }) +} + +func writeImage(bkt *bolt.Bucket, image *images.Image) error { + if err := boltutil.WriteTimestamps(bkt, image.CreatedAt, image.UpdatedAt); err != nil { + return err + } + + if err := boltutil.WriteLabels(bkt, image.Labels); err != nil { + return errors.Wrapf(err, "writing labels for image %v", image.Name) + } + + // write the target bucket + tbkt, err := bkt.CreateBucketIfNotExists(bucketKeyTarget) + if err != nil { + return err + } + + sizeEncoded, err := encodeInt(image.Target.Size) + if err != nil { + return err + } + + for _, v := range [][2][]byte{ + {bucketKeyDigest, []byte(image.Target.Digest)}, + {bucketKeyMediaType, []byte(image.Target.MediaType)}, + {bucketKeySize, sizeEncoded}, + } { + if err := tbkt.Put(v[0], v[1]); err != nil { + return err + } + } + + return nil +} + +func encodeInt(i int64) ([]byte, error) { + var ( + buf [binary.MaxVarintLen64]byte + iEncoded = buf[:] + ) + iEncoded = iEncoded[:binary.PutVarint(iEncoded, i)] + + if len(iEncoded) == 0 { + return nil, fmt.Errorf("failed encoding integer = %v", i) + } + return iEncoded, nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/leases.go b/vendor/github.com/containerd/containerd/metadata/leases.go new file mode 100644 index 000000000..317f000ce --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/leases.go @@ -0,0 +1,257 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "time" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/namespaces" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Lease retains resources to prevent garbage collection before +// the resources can be fully referenced. +type Lease struct { + ID string + CreatedAt time.Time + Labels map[string]string + + Content []string + Snapshots map[string][]string +} + +// LeaseManager manages the create/delete lifecyle of leases +// and also returns existing leases +type LeaseManager struct { + tx *bolt.Tx +} + +// NewLeaseManager creates a new lease manager for managing leases using +// the provided database transaction. +func NewLeaseManager(tx *bolt.Tx) *LeaseManager { + return &LeaseManager{ + tx: tx, + } +} + +// Create creates a new lease using the provided lease +func (lm *LeaseManager) Create(ctx context.Context, lid string, labels map[string]string) (Lease, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return Lease{}, err + } + + topbkt, err := createBucketIfNotExists(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) + if err != nil { + return Lease{}, err + } + + txbkt, err := topbkt.CreateBucket([]byte(lid)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errdefs.ErrAlreadyExists + } + return Lease{}, errors.Wrapf(err, "lease %q", lid) + } + + t := time.Now().UTC() + createdAt, err := t.MarshalBinary() + if err != nil { + return Lease{}, err + } + if err := txbkt.Put(bucketKeyCreatedAt, createdAt); err != nil { + return Lease{}, err + } + + if labels != nil { + if err := boltutil.WriteLabels(txbkt, labels); err != nil { + return Lease{}, err + } + } + + return Lease{ + ID: lid, + CreatedAt: t, + Labels: labels, + }, nil +} + +// Delete delets the lease with the provided lease ID +func (lm *LeaseManager) Delete(ctx context.Context, lid string) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + topbkt := getBucket(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) + if topbkt == nil { + return nil + } + if err := topbkt.DeleteBucket([]byte(lid)); err != nil && err != bolt.ErrBucketNotFound { + return err + } + return nil +} + +// List lists all active leases +func (lm *LeaseManager) List(ctx context.Context, includeResources bool, filter ...string) ([]Lease, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + var leases []Lease + + topbkt := getBucket(lm.tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) + if topbkt == nil { + return leases, nil + } + + if err := topbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + txbkt := topbkt.Bucket(k) + + l := Lease{ + ID: string(k), + } + + if v := txbkt.Get(bucketKeyCreatedAt); v != nil { + t := &l.CreatedAt + if err := t.UnmarshalBinary(v); err != nil { + return err + } + } + + labels, err := boltutil.ReadLabels(txbkt) + if err != nil { + return err + } + l.Labels = labels + + // TODO: Read Snapshots + // TODO: Read Content + + leases = append(leases, l) + + return nil + }); err != nil { + return nil, err + } + + return leases, nil +} + +func addSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) error { + lid, ok := leases.Lease(ctx) + if !ok { + return nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be checked") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid)) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "lease does not exist") + } + + bkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectSnapshots) + if err != nil { + return err + } + + bkt, err = bkt.CreateBucketIfNotExists([]byte(snapshotter)) + if err != nil { + return err + } + + return bkt.Put([]byte(key), nil) +} + +func removeSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) error { + lid, ok := leases.Lease(ctx) + if !ok { + return nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be checked") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid), bucketKeyObjectSnapshots, []byte(snapshotter)) + if bkt == nil { + // Key does not exist so we return nil + return nil + } + + return bkt.Delete([]byte(key)) +} + +func addContentLease(ctx context.Context, tx *bolt.Tx, dgst digest.Digest) error { + lid, ok := leases.Lease(ctx) + if !ok { + return nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be required") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid)) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "lease does not exist") + } + + bkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectContent) + if err != nil { + return err + } + + return bkt.Put([]byte(dgst.String()), nil) +} + +func removeContentLease(ctx context.Context, tx *bolt.Tx, dgst digest.Digest) error { + lid, ok := leases.Lease(ctx) + if !ok { + return nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be checked") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid), bucketKeyObjectContent) + if bkt == nil { + // Key does not exist so we return nil + return nil + } + + return bkt.Delete([]byte(dgst.String())) +} diff --git a/vendor/github.com/containerd/containerd/metadata/migrations.go b/vendor/github.com/containerd/containerd/metadata/migrations.go new file mode 100644 index 000000000..5be5f8301 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/migrations.go @@ -0,0 +1,101 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import "github.com/boltdb/bolt" + +type migration struct { + schema string + version int + migrate func(*bolt.Tx) error +} + +// migrations stores the list of database migrations +// for each update to the database schema. The migrations +// array MUST be ordered by version from least to greatest. +// The last entry in the array should correspond to the +// schemaVersion and dbVersion constants. +// A migration test MUST be added for each migration in +// the array. +// The migrate function can safely assume the version +// of the data it is migrating from is the previous version +// of the database. +var migrations = []migration{ + { + schema: "v1", + version: 1, + migrate: addChildLinks, + }, +} + +// addChildLinks Adds children key to the snapshotters to enforce snapshot +// entries cannot be removed which have children +func addChildLinks(tx *bolt.Tx) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + // iterate through each namespace + v1c := v1bkt.Cursor() + + for k, v := v1c.First(); k != nil; k, v = v1c.Next() { + if v != nil { + continue + } + nbkt := v1bkt.Bucket(k) + + sbkt := nbkt.Bucket(bucketKeyObjectSnapshots) + if sbkt != nil { + // Iterate through each snapshotter + if err := sbkt.ForEach(func(sk, sv []byte) error { + if sv != nil { + return nil + } + snbkt := sbkt.Bucket(sk) + + // Iterate through each snapshot + return snbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + parent := snbkt.Bucket(k).Get(bucketKeyParent) + if len(parent) > 0 { + pbkt := snbkt.Bucket(parent) + if pbkt == nil { + // Not enforcing consistency during migration, skip + return nil + } + cbkt, err := pbkt.CreateBucketIfNotExists(bucketKeyChildren) + if err != nil { + return err + } + if err := cbkt.Put(k, nil); err != nil { + return err + } + } + + return nil + }) + }); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/namespaces.go b/vendor/github.com/containerd/containerd/metadata/namespaces.go new file mode 100644 index 000000000..000bef6c9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/namespaces.go @@ -0,0 +1,186 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/errdefs" + l "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/namespaces" + "github.com/pkg/errors" +) + +type namespaceStore struct { + tx *bolt.Tx +} + +// NewNamespaceStore returns a store backed by a bolt DB +func NewNamespaceStore(tx *bolt.Tx) namespaces.Store { + return &namespaceStore{tx: tx} +} + +func (s *namespaceStore) Create(ctx context.Context, namespace string, labels map[string]string) error { + topbkt, err := createBucketIfNotExists(s.tx, bucketKeyVersion) + if err != nil { + return err + } + + if err := namespaces.Validate(namespace); err != nil { + return err + } + + for k, v := range labels { + if err := l.Validate(k, v); err != nil { + return errors.Wrapf(err, "namespace.Labels") + } + } + + // provides the already exists error. + bkt, err := topbkt.CreateBucket([]byte(namespace)) + if err != nil { + if err == bolt.ErrBucketExists { + return errors.Wrapf(errdefs.ErrAlreadyExists, "namespace %q", namespace) + } + + return err + } + + lbkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectLabels) + if err != nil { + return err + } + + for k, v := range labels { + if err := lbkt.Put([]byte(k), []byte(v)); err != nil { + return err + } + } + + return nil +} + +func (s *namespaceStore) Labels(ctx context.Context, namespace string) (map[string]string, error) { + labels := map[string]string{} + + bkt := getNamespaceLabelsBucket(s.tx, namespace) + if bkt == nil { + return labels, nil + } + + if err := bkt.ForEach(func(k, v []byte) error { + labels[string(k)] = string(v) + return nil + }); err != nil { + return nil, err + } + + return labels, nil +} + +func (s *namespaceStore) SetLabel(ctx context.Context, namespace, key, value string) error { + if err := l.Validate(key, value); err != nil { + return errors.Wrapf(err, "namespace.Labels") + } + + return withNamespacesLabelsBucket(s.tx, namespace, func(bkt *bolt.Bucket) error { + if value == "" { + return bkt.Delete([]byte(key)) + } + + return bkt.Put([]byte(key), []byte(value)) + }) + +} + +func (s *namespaceStore) List(ctx context.Context) ([]string, error) { + bkt := getBucket(s.tx, bucketKeyVersion) + if bkt == nil { + return nil, nil // no namespaces! + } + + var namespaces []string + if err := bkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil // not a bucket + } + + namespaces = append(namespaces, string(k)) + return nil + }); err != nil { + return nil, err + } + + return namespaces, nil +} + +func (s *namespaceStore) Delete(ctx context.Context, namespace string) error { + bkt := getBucket(s.tx, bucketKeyVersion) + if empty, err := s.namespaceEmpty(ctx, namespace); err != nil { + return err + } else if !empty { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace %q must be empty", namespace) + } + + if err := bkt.DeleteBucket([]byte(namespace)); err != nil { + if err == bolt.ErrBucketNotFound { + return errors.Wrapf(errdefs.ErrNotFound, "namespace %q", namespace) + } + + return err + } + + return nil +} + +func (s *namespaceStore) namespaceEmpty(ctx context.Context, namespace string) (bool, error) { + // Get all data buckets + buckets := []*bolt.Bucket{ + getImagesBucket(s.tx, namespace), + getBlobsBucket(s.tx, namespace), + getContainersBucket(s.tx, namespace), + } + if snbkt := getSnapshottersBucket(s.tx, namespace); snbkt != nil { + if err := snbkt.ForEach(func(k, v []byte) error { + if v == nil { + buckets = append(buckets, snbkt.Bucket(k)) + } + return nil + }); err != nil { + return false, err + } + } + + // Ensure data buckets are empty + for _, bkt := range buckets { + if !isBucketEmpty(bkt) { + return false, nil + } + } + + return true, nil +} + +func isBucketEmpty(bkt *bolt.Bucket) bool { + if bkt == nil { + return true + } + + k, _ := bkt.Cursor().First() + return k == nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go new file mode 100644 index 000000000..b126bc984 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go @@ -0,0 +1,754 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/snapshots" + "github.com/pkg/errors" +) + +type snapshotter struct { + snapshots.Snapshotter + name string + db *DB + l sync.RWMutex +} + +// newSnapshotter returns a new Snapshotter which namespaces the given snapshot +// using the provided name and database. +func newSnapshotter(db *DB, name string, sn snapshots.Snapshotter) *snapshotter { + return &snapshotter{ + Snapshotter: sn, + name: name, + db: db, + } +} + +func createKey(id uint64, namespace, key string) string { + return fmt.Sprintf("%s/%d/%s", namespace, id, key) +} + +func getKey(tx *bolt.Tx, ns, name, key string) string { + bkt := getSnapshotterBucket(tx, ns, name) + if bkt == nil { + return "" + } + bkt = bkt.Bucket([]byte(key)) + if bkt == nil { + return "" + } + v := bkt.Get(bucketKeyName) + if len(v) == 0 { + return "" + } + return string(v) +} + +func (s *snapshotter) resolveKey(ctx context.Context, key string) (string, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return "", err + } + + var id string + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + id = getKey(tx, ns, s.name, key) + if id == "" { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + } + return nil + }); err != nil { + return "", err + } + + return id, nil +} + +func (s *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return snapshots.Info{}, err + } + + var ( + bkey string + local = snapshots.Info{ + Name: key, + } + ) + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getSnapshotterBucket(tx, ns, s.name) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + } + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + } + local.Labels, err = boltutil.ReadLabels(sbkt) + if err != nil { + return errors.Wrap(err, "failed to read labels") + } + if err := boltutil.ReadTimestamps(sbkt, &local.Created, &local.Updated); err != nil { + return errors.Wrap(err, "failed to read timestamps") + } + bkey = string(sbkt.Get(bucketKeyName)) + local.Parent = string(sbkt.Get(bucketKeyParent)) + + return nil + }); err != nil { + return snapshots.Info{}, err + } + + info, err := s.Snapshotter.Stat(ctx, bkey) + if err != nil { + return snapshots.Info{}, err + } + + return overlayInfo(info, local), nil +} + +func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + s.l.RLock() + defer s.l.RUnlock() + + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return snapshots.Info{}, err + } + + if info.Name == "" { + return snapshots.Info{}, errors.Wrap(errdefs.ErrInvalidArgument, "") + } + + var ( + bkey string + local = snapshots.Info{ + Name: info.Name, + } + ) + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getSnapshotterBucket(tx, ns, s.name) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", info.Name) + } + sbkt := bkt.Bucket([]byte(info.Name)) + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", info.Name) + } + + local.Labels, err = boltutil.ReadLabels(sbkt) + if err != nil { + return errors.Wrap(err, "failed to read labels") + } + if err := boltutil.ReadTimestamps(sbkt, &local.Created, &local.Updated); err != nil { + return errors.Wrap(err, "failed to read timestamps") + } + + // Handle field updates + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if local.Labels == nil { + local.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + local.Labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + local.Labels = info.Labels + default: + return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on snapshot %q", path, info.Name) + } + } + } else { + local.Labels = info.Labels + } + if err := validateSnapshot(&local); err != nil { + return err + } + local.Updated = time.Now().UTC() + + if err := boltutil.WriteTimestamps(sbkt, local.Created, local.Updated); err != nil { + return errors.Wrap(err, "failed to read timestamps") + } + if err := boltutil.WriteLabels(sbkt, local.Labels); err != nil { + return errors.Wrap(err, "failed to read labels") + } + bkey = string(sbkt.Get(bucketKeyName)) + local.Parent = string(sbkt.Get(bucketKeyParent)) + + return nil + }); err != nil { + return snapshots.Info{}, err + } + + info, err = s.Snapshotter.Stat(ctx, bkey) + if err != nil { + return snapshots.Info{}, err + } + + return overlayInfo(info, local), nil +} + +func overlayInfo(info, overlay snapshots.Info) snapshots.Info { + // Merge info + info.Name = overlay.Name + info.Created = overlay.Created + info.Updated = overlay.Updated + info.Parent = overlay.Parent + if info.Labels == nil { + info.Labels = overlay.Labels + } else { + for k, v := range overlay.Labels { + overlay.Labels[k] = v + } + } + return info +} + +func (s *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { + bkey, err := s.resolveKey(ctx, key) + if err != nil { + return snapshots.Usage{}, err + } + return s.Snapshotter.Usage(ctx, bkey) +} + +func (s *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { + bkey, err := s.resolveKey(ctx, key) + if err != nil { + return nil, err + } + return s.Snapshotter.Mounts(ctx, bkey) +} + +func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return s.createSnapshot(ctx, key, parent, false, opts) +} + +func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return s.createSnapshot(ctx, key, parent, true, opts) +} + +func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, readonly bool, opts []snapshots.Opt) ([]mount.Mount, error) { + s.l.RLock() + defer s.l.RUnlock() + + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + var base snapshots.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return nil, err + } + } + + if err := validateSnapshot(&base); err != nil { + return nil, err + } + + var m []mount.Mount + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt, err := createSnapshotterBucket(tx, ns, s.name) + if err != nil { + return err + } + + bbkt, err := bkt.CreateBucket([]byte(key)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", key) + } + return err + } + if err := addSnapshotLease(ctx, tx, s.name, key); err != nil { + return err + } + + var bparent string + if parent != "" { + pbkt := bkt.Bucket([]byte(parent)) + if pbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", parent) + } + bparent = string(pbkt.Get(bucketKeyName)) + + cbkt, err := pbkt.CreateBucketIfNotExists(bucketKeyChildren) + if err != nil { + return err + } + if err := cbkt.Put([]byte(key), nil); err != nil { + return err + } + + if err := bbkt.Put(bucketKeyParent, []byte(parent)); err != nil { + return err + } + } + + sid, err := bkt.NextSequence() + if err != nil { + return err + } + bkey := createKey(sid, ns, key) + if err := bbkt.Put(bucketKeyName, []byte(bkey)); err != nil { + return err + } + + ts := time.Now().UTC() + if err := boltutil.WriteTimestamps(bbkt, ts, ts); err != nil { + return err + } + if err := boltutil.WriteLabels(bbkt, base.Labels); err != nil { + return err + } + + // TODO: Consider doing this outside of transaction to lessen + // metadata lock time + if readonly { + m, err = s.Snapshotter.View(ctx, bkey, bparent) + } else { + m, err = s.Snapshotter.Prepare(ctx, bkey, bparent) + } + return err + }); err != nil { + return nil, err + } + return m, nil +} + +func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + s.l.RLock() + defer s.l.RUnlock() + + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + var base snapshots.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + + if err := validateSnapshot(&base); err != nil { + return err + } + + return update(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getSnapshotterBucket(tx, ns, s.name) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, + "can not find snapshotter %q", s.name) + } + + bbkt, err := bkt.CreateBucket([]byte(name)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", name) + } + return err + } + if err := addSnapshotLease(ctx, tx, s.name, name); err != nil { + return err + } + + obkt := bkt.Bucket([]byte(key)) + if obkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + } + + bkey := string(obkt.Get(bucketKeyName)) + + sid, err := bkt.NextSequence() + if err != nil { + return err + } + + nameKey := createKey(sid, ns, name) + + if err := bbkt.Put(bucketKeyName, []byte(nameKey)); err != nil { + return err + } + + parent := obkt.Get(bucketKeyParent) + if len(parent) > 0 { + pbkt := bkt.Bucket(parent) + if pbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", string(parent)) + } + + cbkt, err := pbkt.CreateBucketIfNotExists(bucketKeyChildren) + if err != nil { + return err + } + if err := cbkt.Delete([]byte(key)); err != nil { + return err + } + if err := cbkt.Put([]byte(name), nil); err != nil { + return err + } + + if err := bbkt.Put(bucketKeyParent, parent); err != nil { + return err + } + } + ts := time.Now().UTC() + if err := boltutil.WriteTimestamps(bbkt, ts, ts); err != nil { + return err + } + if err := boltutil.WriteLabels(bbkt, base.Labels); err != nil { + return err + } + + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return err + } + if err := removeSnapshotLease(ctx, tx, s.name, key); err != nil { + return err + } + + // TODO: Consider doing this outside of transaction to lessen + // metadata lock time + return s.Snapshotter.Commit(ctx, nameKey, bkey) + }) + +} + +func (s *snapshotter) Remove(ctx context.Context, key string) error { + s.l.RLock() + defer s.l.RUnlock() + + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return update(ctx, s.db, func(tx *bolt.Tx) error { + var sbkt *bolt.Bucket + bkt := getSnapshotterBucket(tx, ns, s.name) + if bkt != nil { + sbkt = bkt.Bucket([]byte(key)) + } + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + } + + cbkt := sbkt.Bucket(bucketKeyChildren) + if cbkt != nil { + if child, _ := cbkt.Cursor().First(); child != nil { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot remove snapshot with child") + } + } + + parent := sbkt.Get(bucketKeyParent) + if len(parent) > 0 { + pbkt := bkt.Bucket(parent) + if pbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", string(parent)) + } + cbkt := pbkt.Bucket(bucketKeyChildren) + if cbkt != nil { + if err := cbkt.Delete([]byte(key)); err != nil { + return errors.Wrap(err, "failed to remove child link") + } + } + } + + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return err + } + if err := removeSnapshotLease(ctx, tx, s.name, key); err != nil { + return err + } + + // Mark snapshotter as dirty for triggering garbage collection + s.db.dirtyL.Lock() + s.db.dirtySS[s.name] = struct{}{} + s.db.dirtyL.Unlock() + + return nil + }) +} + +type infoPair struct { + bkey string + info snapshots.Info +} + +func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + var ( + batchSize = 100 + pairs = []infoPair{} + lastKey string + ) + + for { + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getSnapshotterBucket(tx, ns, s.name) + if bkt == nil { + return nil + } + + c := bkt.Cursor() + + var k, v []byte + if lastKey == "" { + k, v = c.First() + } else { + k, v = c.Seek([]byte(lastKey)) + } + + for k != nil { + if v == nil { + if len(pairs) >= batchSize { + break + } + sbkt := bkt.Bucket(k) + + pair := infoPair{ + bkey: string(sbkt.Get(bucketKeyName)), + info: snapshots.Info{ + Name: string(k), + Parent: string(sbkt.Get(bucketKeyParent)), + }, + } + + err := boltutil.ReadTimestamps(sbkt, &pair.info.Created, &pair.info.Updated) + if err != nil { + return err + } + pair.info.Labels, err = boltutil.ReadLabels(sbkt) + if err != nil { + return err + } + + pairs = append(pairs, pair) + } + + k, v = c.Next() + } + + lastKey = string(k) + + return nil + }); err != nil { + return err + } + + for _, pair := range pairs { + info, err := s.Snapshotter.Stat(ctx, pair.bkey) + if err != nil { + if errdefs.IsNotFound(err) { + continue + } + return err + } + + if err := fn(ctx, overlayInfo(info, pair.info)); err != nil { + return err + } + } + + if lastKey == "" { + break + } + + pairs = pairs[:0] + + } + + return nil +} + +func validateSnapshot(info *snapshots.Info) error { + for k, v := range info.Labels { + if err := labels.Validate(k, v); err != nil { + return errors.Wrapf(err, "info.Labels") + } + } + + return nil +} + +func (s *snapshotter) garbageCollect(ctx context.Context) (d time.Duration, err error) { + s.l.Lock() + t1 := time.Now() + defer func() { + if err == nil { + d = time.Now().Sub(t1) + } + s.l.Unlock() + }() + + seen := map[string]struct{}{} + if err := s.db.View(func(tx *bolt.Tx) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + // iterate through each namespace + v1c := v1bkt.Cursor() + + for k, v := v1c.First(); k != nil; k, v = v1c.Next() { + if v != nil { + continue + } + + sbkt := v1bkt.Bucket(k).Bucket(bucketKeyObjectSnapshots) + if sbkt == nil { + continue + } + + // Load specific snapshotter + ssbkt := sbkt.Bucket([]byte(s.name)) + if ssbkt == nil { + continue + } + + if err := ssbkt.ForEach(func(sk, sv []byte) error { + if sv == nil { + bkey := ssbkt.Bucket(sk).Get(bucketKeyName) + if len(bkey) > 0 { + seen[string(bkey)] = struct{}{} + } + } + return nil + }); err != nil { + return err + } + } + + return nil + }); err != nil { + return 0, err + } + + roots, err := s.walkTree(ctx, seen) + if err != nil { + return 0, err + } + + // TODO: Unlock before removal (once nodes are fully unavailable). + // This could be achieved through doing prune inside the lock + // and having a cleanup method which actually performs the + // deletions on the snapshotters which support it. + + for _, node := range roots { + if err := s.pruneBranch(ctx, node); err != nil { + return 0, err + } + } + + return +} + +type treeNode struct { + info snapshots.Info + remove bool + children []*treeNode +} + +func (s *snapshotter) walkTree(ctx context.Context, seen map[string]struct{}) ([]*treeNode, error) { + roots := []*treeNode{} + nodes := map[string]*treeNode{} + + if err := s.Snapshotter.Walk(ctx, func(ctx context.Context, info snapshots.Info) error { + _, isSeen := seen[info.Name] + node, ok := nodes[info.Name] + if !ok { + node = &treeNode{} + nodes[info.Name] = node + } + + node.remove = !isSeen + node.info = info + + if info.Parent == "" { + roots = append(roots, node) + } else { + parent, ok := nodes[info.Parent] + if !ok { + parent = &treeNode{} + nodes[info.Parent] = parent + } + parent.children = append(parent.children, node) + } + + return nil + }); err != nil { + return nil, err + } + + return roots, nil +} + +func (s *snapshotter) pruneBranch(ctx context.Context, node *treeNode) error { + for _, child := range node.children { + if err := s.pruneBranch(ctx, child); err != nil { + return err + } + } + + if node.remove { + logger := log.G(ctx).WithField("snapshotter", s.name) + if err := s.Snapshotter.Remove(ctx, node.info.Name); err != nil { + if !errdefs.IsFailedPrecondition(err) { + return err + } + logger.WithError(err).WithField("key", node.info.Name).Warnf("failed to remove snapshot") + } else { + logger.WithField("key", node.info.Name).Debug("removed snapshot") + } + } + + return nil +} + +// Close closes s.Snapshotter but not db +func (s *snapshotter) Close() error { + return s.Snapshotter.Close() +} diff --git a/vendor/github.com/containerd/containerd/metrics/cgroups/blkio.go b/vendor/github.com/containerd/containerd/metrics/cgroups/blkio.go new file mode 100644 index 000000000..9d2adabe5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metrics/cgroups/blkio.go @@ -0,0 +1,132 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "strconv" + + "github.com/containerd/cgroups" + metrics "github.com/docker/go-metrics" + "github.com/prometheus/client_golang/prometheus" +) + +var blkioMetrics = []*metric{ + { + name: "blkio_io_merged_recursive", + help: "The blkio io merged recursive", + unit: metrics.Total, + vt: prometheus.GaugeValue, + labels: []string{"op", "device", "major", "minor"}, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Blkio == nil { + return nil + } + return blkioValues(stats.Blkio.IoMergedRecursive) + }, + }, + { + name: "blkio_io_queued_recursive", + help: "The blkio io queued recursive", + unit: metrics.Total, + vt: prometheus.GaugeValue, + labels: []string{"op", "device", "major", "minor"}, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Blkio == nil { + return nil + } + return blkioValues(stats.Blkio.IoQueuedRecursive) + }, + }, + { + name: "blkio_io_service_bytes_recursive", + help: "The blkio io service bytes recursive", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + labels: []string{"op", "device", "major", "minor"}, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Blkio == nil { + return nil + } + return blkioValues(stats.Blkio.IoServiceBytesRecursive) + }, + }, + { + name: "blkio_io_service_time_recursive", + help: "The blkio io servie time recursive", + unit: metrics.Total, + vt: prometheus.GaugeValue, + labels: []string{"op", "device", "major", "minor"}, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Blkio == nil { + return nil + } + return blkioValues(stats.Blkio.IoServiceTimeRecursive) + }, + }, + { + name: "blkio_io_serviced_recursive", + help: "The blkio io servied recursive", + unit: metrics.Total, + vt: prometheus.GaugeValue, + labels: []string{"op", "device", "major", "minor"}, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Blkio == nil { + return nil + } + return blkioValues(stats.Blkio.IoServicedRecursive) + }, + }, + { + name: "blkio_io_time_recursive", + help: "The blkio io time recursive", + unit: metrics.Total, + vt: prometheus.GaugeValue, + labels: []string{"op", "device", "major", "minor"}, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Blkio == nil { + return nil + } + return blkioValues(stats.Blkio.IoTimeRecursive) + }, + }, + { + name: "blkio_sectors_recursive", + help: "The blkio sectors recursive", + unit: metrics.Total, + vt: prometheus.GaugeValue, + labels: []string{"op", "device", "major", "minor"}, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Blkio == nil { + return nil + } + return blkioValues(stats.Blkio.SectorsRecursive) + }, + }, +} + +func blkioValues(l []*cgroups.BlkIOEntry) []value { + var out []value + for _, e := range l { + out = append(out, value{ + v: float64(e.Value), + l: []string{e.Op, e.Device, strconv.FormatUint(e.Major, 10), strconv.FormatUint(e.Minor, 10)}, + }) + } + return out +} diff --git a/vendor/github.com/containerd/containerd/metrics/cgroups/cgroups.go b/vendor/github.com/containerd/containerd/metrics/cgroups/cgroups.go new file mode 100644 index 000000000..9fbfa756c --- /dev/null +++ b/vendor/github.com/containerd/containerd/metrics/cgroups/cgroups.go @@ -0,0 +1,121 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "github.com/containerd/cgroups" + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/linux" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/runtime" + metrics "github.com/docker/go-metrics" + "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +// Config for the cgroups monitor +type Config struct { + NoPrometheus bool `toml:"no_prometheus"` +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.TaskMonitorPlugin, + ID: "cgroups", + InitFn: New, + Config: &Config{}, + }) +} + +// New returns a new cgroups monitor +func New(ic *plugin.InitContext) (interface{}, error) { + var ns *metrics.Namespace + config := ic.Config.(*Config) + if !config.NoPrometheus { + ns = metrics.NewNamespace("container", "", nil) + } + collector := newCollector(ns) + oom, err := newOOMCollector(ns) + if err != nil { + return nil, err + } + if ns != nil { + metrics.Register(ns) + } + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) + return &cgroupsMonitor{ + collector: collector, + oom: oom, + context: ic.Context, + publisher: ic.Events, + }, nil +} + +type cgroupsMonitor struct { + collector *collector + oom *oomCollector + context context.Context + publisher events.Publisher +} + +func (m *cgroupsMonitor) Monitor(c runtime.Task) error { + info := c.Info() + t := c.(*linux.Task) + cg, err := t.Cgroup() + if err != nil { + return err + } + if err := m.collector.Add(info.ID, info.Namespace, cg); err != nil { + return err + } + err = m.oom.Add(info.ID, info.Namespace, cg, m.trigger) + if err == cgroups.ErrMemoryNotSupported { + logrus.WithError(err).Warn("OOM monitoring failed") + return nil + } + return err +} + +func (m *cgroupsMonitor) Stop(c runtime.Task) error { + info := c.Info() + t := c.(*linux.Task) + + cgroup, err := t.Cgroup() + if err != nil { + log.G(m.context).WithError(err).Warnf("unable to retrieve cgroup on stop") + } else { + m.collector.collect(info.ID, info.Namespace, cgroup, m.collector.storedMetrics, false, nil) + } + + m.collector.Remove(info.ID, info.Namespace) + return nil +} + +func (m *cgroupsMonitor) trigger(id, namespace string, cg cgroups.Cgroup) { + ctx := namespaces.WithNamespace(m.context, namespace) + if err := m.publisher.Publish(ctx, runtime.TaskOOMEventTopic, &eventstypes.TaskOOM{ + ContainerID: id, + }); err != nil { + log.G(m.context).WithError(err).Error("post OOM event") + } +} diff --git a/vendor/github.com/containerd/containerd/metrics/cgroups/cpu.go b/vendor/github.com/containerd/containerd/metrics/cgroups/cpu.go new file mode 100644 index 000000000..1fe988ce1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metrics/cgroups/cpu.go @@ -0,0 +1,146 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "strconv" + + "github.com/containerd/cgroups" + metrics "github.com/docker/go-metrics" + "github.com/prometheus/client_golang/prometheus" +) + +var cpuMetrics = []*metric{ + { + name: "cpu_total", + help: "The total cpu time", + unit: metrics.Nanoseconds, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.CPU == nil { + return nil + } + return []value{ + { + v: float64(stats.CPU.Usage.Total), + }, + } + }, + }, + { + name: "cpu_kernel", + help: "The total kernel cpu time", + unit: metrics.Nanoseconds, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.CPU == nil { + return nil + } + return []value{ + { + v: float64(stats.CPU.Usage.Kernel), + }, + } + }, + }, + { + name: "cpu_user", + help: "The total user cpu time", + unit: metrics.Nanoseconds, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.CPU == nil { + return nil + } + return []value{ + { + v: float64(stats.CPU.Usage.User), + }, + } + }, + }, + { + name: "per_cpu", + help: "The total cpu time per cpu", + unit: metrics.Nanoseconds, + vt: prometheus.GaugeValue, + labels: []string{"cpu"}, + getValues: func(stats *cgroups.Metrics) []value { + if stats.CPU == nil { + return nil + } + var out []value + for i, v := range stats.CPU.Usage.PerCPU { + out = append(out, value{ + v: float64(v), + l: []string{strconv.Itoa(i)}, + }) + } + return out + }, + }, + { + name: "cpu_throttle_periods", + help: "The total cpu throttle periods", + unit: metrics.Total, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.CPU == nil { + return nil + } + return []value{ + { + v: float64(stats.CPU.Throttling.Periods), + }, + } + }, + }, + { + name: "cpu_throttled_periods", + help: "The total cpu throttled periods", + unit: metrics.Total, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.CPU == nil { + return nil + } + return []value{ + { + v: float64(stats.CPU.Throttling.ThrottledPeriods), + }, + } + }, + }, + { + name: "cpu_throttled_time", + help: "The total cpu throttled time", + unit: metrics.Nanoseconds, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.CPU == nil { + return nil + } + return []value{ + { + v: float64(stats.CPU.Throttling.ThrottledTime), + }, + } + }, + }, +} diff --git a/vendor/github.com/containerd/containerd/metrics/cgroups/hugetlb.go b/vendor/github.com/containerd/containerd/metrics/cgroups/hugetlb.go new file mode 100644 index 000000000..ec173ae1c --- /dev/null +++ b/vendor/github.com/containerd/containerd/metrics/cgroups/hugetlb.go @@ -0,0 +1,88 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "github.com/containerd/cgroups" + metrics "github.com/docker/go-metrics" + "github.com/prometheus/client_golang/prometheus" +) + +var hugetlbMetrics = []*metric{ + { + name: "hugetlb_usage", + help: "The hugetlb usage", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + labels: []string{"page"}, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Hugetlb == nil { + return nil + } + var out []value + for _, v := range stats.Hugetlb { + out = append(out, value{ + v: float64(v.Usage), + l: []string{v.Pagesize}, + }) + } + return out + }, + }, + { + name: "hugetlb_failcnt", + help: "The hugetlb failcnt", + unit: metrics.Total, + vt: prometheus.GaugeValue, + labels: []string{"page"}, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Hugetlb == nil { + return nil + } + var out []value + for _, v := range stats.Hugetlb { + out = append(out, value{ + v: float64(v.Failcnt), + l: []string{v.Pagesize}, + }) + } + return out + }, + }, + { + name: "hugetlb_max", + help: "The hugetlb maximum usage", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + labels: []string{"page"}, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Hugetlb == nil { + return nil + } + var out []value + for _, v := range stats.Hugetlb { + out = append(out, value{ + v: float64(v.Max), + l: []string{v.Pagesize}, + }) + } + return out + }, + }, +} diff --git a/vendor/github.com/containerd/containerd/metrics/cgroups/memory.go b/vendor/github.com/containerd/containerd/metrics/cgroups/memory.go new file mode 100644 index 000000000..e90295fb3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metrics/cgroups/memory.go @@ -0,0 +1,796 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "github.com/containerd/cgroups" + metrics "github.com/docker/go-metrics" + "github.com/prometheus/client_golang/prometheus" +) + +var memoryMetrics = []*metric{ + { + name: "memory_cache", + help: "The cache amount used", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Cache), + }, + } + }, + }, + { + name: "memory_rss", + help: "The rss amount used", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.RSS), + }, + } + }, + }, + { + name: "memory_rss_huge", + help: "The rss_huge amount used", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.RSSHuge), + }, + } + }, + }, + { + name: "memory_mapped_file", + help: "The mapped_file amount used", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.MappedFile), + }, + } + }, + }, + { + name: "memory_dirty", + help: "The dirty amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Dirty), + }, + } + }, + }, + { + name: "memory_writeback", + help: "The writeback amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Writeback), + }, + } + }, + }, + { + name: "memory_pgpgin", + help: "The pgpgin amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.PgPgIn), + }, + } + }, + }, + { + name: "memory_pgpgout", + help: "The pgpgout amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.PgPgOut), + }, + } + }, + }, + { + name: "memory_pgfault", + help: "The pgfault amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.PgFault), + }, + } + }, + }, + { + name: "memory_pgmajfault", + help: "The pgmajfault amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.PgMajFault), + }, + } + }, + }, + { + name: "memory_inactive_anon", + help: "The inactive_anon amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.InactiveAnon), + }, + } + }, + }, + { + name: "memory_active_anon", + help: "The active_anon amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.ActiveAnon), + }, + } + }, + }, + { + name: "memory_inactive_file", + help: "The inactive_file amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.InactiveFile), + }, + } + }, + }, + { + name: "memory_active_file", + help: "The active_file amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.ActiveFile), + }, + } + }, + }, + { + name: "memory_unevictable", + help: "The unevictable amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Unevictable), + }, + } + }, + }, + { + name: "memory_hierarchical_memory_limit", + help: "The hierarchical_memory_limit amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.HierarchicalMemoryLimit), + }, + } + }, + }, + { + name: "memory_hierarchical_memsw_limit", + help: "The hierarchical_memsw_limit amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.HierarchicalSwapLimit), + }, + } + }, + }, + { + name: "memory_total_cache", + help: "The total_cache amount used", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalCache), + }, + } + }, + }, + { + name: "memory_total_rss", + help: "The total_rss amount used", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalRSS), + }, + } + }, + }, + { + name: "memory_total_rss_huge", + help: "The total_rss_huge amount used", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalRSSHuge), + }, + } + }, + }, + { + name: "memory_total_mapped_file", + help: "The total_mapped_file amount used", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalMappedFile), + }, + } + }, + }, + { + name: "memory_total_dirty", + help: "The total_dirty amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalDirty), + }, + } + }, + }, + { + name: "memory_total_writeback", + help: "The total_writeback amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalWriteback), + }, + } + }, + }, + { + name: "memory_total_pgpgin", + help: "The total_pgpgin amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalPgPgIn), + }, + } + }, + }, + { + name: "memory_total_pgpgout", + help: "The total_pgpgout amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalPgPgOut), + }, + } + }, + }, + { + name: "memory_total_pgfault", + help: "The total_pgfault amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalPgFault), + }, + } + }, + }, + { + name: "memory_total_pgmajfault", + help: "The total_pgmajfault amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalPgMajFault), + }, + } + }, + }, + { + name: "memory_total_inactive_anon", + help: "The total_inactive_anon amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalInactiveAnon), + }, + } + }, + }, + { + name: "memory_total_active_anon", + help: "The total_active_anon amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalActiveAnon), + }, + } + }, + }, + { + name: "memory_total_inactive_file", + help: "The total_inactive_file amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalInactiveFile), + }, + } + }, + }, + { + name: "memory_total_active_file", + help: "The total_active_file amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalActiveFile), + }, + } + }, + }, + { + name: "memory_total_unevictable", + help: "The total_unevictable amount", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.TotalUnevictable), + }, + } + }, + }, + { + name: "memory_usage_failcnt", + help: "The usage failcnt", + unit: metrics.Total, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Usage.Failcnt), + }, + } + }, + }, + { + name: "memory_usage_limit", + help: "The memory limit", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Usage.Limit), + }, + } + }, + }, + { + name: "memory_usage_max", + help: "The memory maximum usage", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Usage.Max), + }, + } + }, + }, + { + name: "memory_usage_usage", + help: "The memory usage", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Usage.Usage), + }, + } + }, + }, + { + name: "memory_swap_failcnt", + help: "The swap failcnt", + unit: metrics.Total, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Swap.Failcnt), + }, + } + }, + }, + { + name: "memory_swap_limit", + help: "The swap limit", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Swap.Limit), + }, + } + }, + }, + { + name: "memory_swap_max", + help: "The swap maximum usage", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Swap.Max), + }, + } + }, + }, + { + name: "memory_swap_usage", + help: "The swap usage", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Swap.Usage), + }, + } + }, + }, + { + name: "memory_kernel_failcnt", + help: "The kernel failcnt", + unit: metrics.Total, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Kernel.Failcnt), + }, + } + }, + }, + { + name: "memory_kernel_limit", + help: "The kernel limit", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Kernel.Limit), + }, + } + }, + }, + { + name: "memory_kernel_max", + help: "The kernel maximum usage", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Kernel.Max), + }, + } + }, + }, + { + name: "memory_kernel_usage", + help: "The kernel usage", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.Kernel.Usage), + }, + } + }, + }, + { + name: "memory_kerneltcp_failcnt", + help: "The kerneltcp failcnt", + unit: metrics.Total, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.KernelTCP.Failcnt), + }, + } + }, + }, + { + name: "memory_kerneltcp_limit", + help: "The kerneltcp limit", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.KernelTCP.Limit), + }, + } + }, + }, + { + name: "memory_kerneltcp_max", + help: "The kerneltcp maximum usage", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.KernelTCP.Max), + }, + } + }, + }, + { + name: "memory_kerneltcp_usage", + help: "The kerneltcp usage", + unit: metrics.Bytes, + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Memory == nil { + return nil + } + return []value{ + { + v: float64(stats.Memory.KernelTCP.Usage), + }, + } + }, + }, +} diff --git a/vendor/github.com/containerd/containerd/metrics/cgroups/metric.go b/vendor/github.com/containerd/containerd/metrics/cgroups/metric.go new file mode 100644 index 000000000..02ad17776 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metrics/cgroups/metric.go @@ -0,0 +1,61 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "github.com/containerd/cgroups" + metrics "github.com/docker/go-metrics" + "github.com/prometheus/client_golang/prometheus" +) + +type value struct { + v float64 + l []string +} + +type metric struct { + name string + help string + unit metrics.Unit + vt prometheus.ValueType + labels []string + // getValues returns the value and labels for the data + getValues func(stats *cgroups.Metrics) []value +} + +func (m *metric) desc(ns *metrics.Namespace) *prometheus.Desc { + // the namespace label is for containerd namespaces + return ns.NewDesc(m.name, m.help, m.unit, append([]string{"container_id", "namespace"}, m.labels...)...) +} + +func (m *metric) collect(id, namespace string, stats *cgroups.Metrics, ns *metrics.Namespace, ch chan<- prometheus.Metric, block bool) { + values := m.getValues(stats) + for _, v := range values { + // block signals to block on the sending the metrics so none are missed + if block { + ch <- prometheus.MustNewConstMetric(m.desc(ns), m.vt, v.v, append([]string{id, namespace}, v.l...)...) + continue + } + // non-blocking metrics can be dropped if the chan is full + select { + case ch <- prometheus.MustNewConstMetric(m.desc(ns), m.vt, v.v, append([]string{id, namespace}, v.l...)...): + default: + } + } +} diff --git a/vendor/github.com/containerd/containerd/metrics/cgroups/metrics.go b/vendor/github.com/containerd/containerd/metrics/cgroups/metrics.go new file mode 100644 index 000000000..544025162 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metrics/cgroups/metrics.go @@ -0,0 +1,153 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "errors" + "fmt" + "sync" + + "github.com/containerd/cgroups" + "github.com/containerd/containerd/log" + metrics "github.com/docker/go-metrics" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // ErrAlreadyCollected is returned when a cgroups is already being monitored + ErrAlreadyCollected = errors.New("cgroup is already being collected") + // ErrCgroupNotExists is returns when a cgroup no longer exists + ErrCgroupNotExists = errors.New("cgroup does not exist in the collector") +) + +// Trigger will be called when an event happens and provides the cgroup +// where the event originated from +type Trigger func(string, string, cgroups.Cgroup) + +// newCollector registers the collector with the provided namespace and returns it so +// that cgroups can be added for collection +func newCollector(ns *metrics.Namespace) *collector { + if ns == nil { + return &collector{} + } + // add machine cpus and memory info + c := &collector{ + ns: ns, + cgroups: make(map[string]*task), + } + c.metrics = append(c.metrics, pidMetrics...) + c.metrics = append(c.metrics, cpuMetrics...) + c.metrics = append(c.metrics, memoryMetrics...) + c.metrics = append(c.metrics, hugetlbMetrics...) + c.metrics = append(c.metrics, blkioMetrics...) + c.storedMetrics = make(chan prometheus.Metric, 100*len(c.metrics)) + ns.Add(c) + return c +} + +type task struct { + id string + namespace string + cgroup cgroups.Cgroup +} + +func taskID(id, namespace string) string { + return fmt.Sprintf("%s-%s", id, namespace) +} + +// collector provides the ability to collect container stats and export +// them in the prometheus format +type collector struct { + mu sync.RWMutex + + cgroups map[string]*task + ns *metrics.Namespace + metrics []*metric + storedMetrics chan prometheus.Metric +} + +func (c *collector) Describe(ch chan<- *prometheus.Desc) { + for _, m := range c.metrics { + ch <- m.desc(c.ns) + } +} + +func (c *collector) Collect(ch chan<- prometheus.Metric) { + c.mu.RLock() + wg := &sync.WaitGroup{} + for _, t := range c.cgroups { + wg.Add(1) + go c.collect(t.id, t.namespace, t.cgroup, ch, true, wg) + } +storedLoop: + for { + // read stored metrics until the channel is flushed + select { + case m := <-c.storedMetrics: + ch <- m + default: + break storedLoop + } + } + c.mu.RUnlock() + wg.Wait() +} + +func (c *collector) collect(id, namespace string, cg cgroups.Cgroup, ch chan<- prometheus.Metric, block bool, wg *sync.WaitGroup) { + if wg != nil { + defer wg.Done() + } + + stats, err := cg.Stat(cgroups.IgnoreNotExist) + if err != nil { + log.L.WithError(err).Errorf("stat cgroup %s", id) + return + } + for _, m := range c.metrics { + m.collect(id, namespace, stats, c.ns, ch, block) + } +} + +// Add adds the provided cgroup and id so that metrics are collected and exported +func (c *collector) Add(id, namespace string, cg cgroups.Cgroup) error { + if c.ns == nil { + return nil + } + c.mu.Lock() + defer c.mu.Unlock() + if _, ok := c.cgroups[taskID(id, namespace)]; ok { + return ErrAlreadyCollected + } + c.cgroups[taskID(id, namespace)] = &task{ + id: id, + namespace: namespace, + cgroup: cg, + } + return nil +} + +// Remove removes the provided cgroup by id from the collector +func (c *collector) Remove(id, namespace string) { + if c.ns == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + delete(c.cgroups, taskID(id, namespace)) +} diff --git a/vendor/github.com/containerd/containerd/metrics/cgroups/oom.go b/vendor/github.com/containerd/containerd/metrics/cgroups/oom.go new file mode 100644 index 000000000..1aff17113 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metrics/cgroups/oom.go @@ -0,0 +1,157 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "sync" + "sync/atomic" + + "golang.org/x/sys/unix" + + "github.com/containerd/cgroups" + metrics "github.com/docker/go-metrics" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +func newOOMCollector(ns *metrics.Namespace) (*oomCollector, error) { + fd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if err != nil { + return nil, err + } + var desc *prometheus.Desc + if ns != nil { + desc = ns.NewDesc("memory_oom", "The number of times a container has received an oom event", metrics.Total, "container_id", "namespace") + } + c := &oomCollector{ + fd: fd, + desc: desc, + set: make(map[uintptr]*oom), + } + if ns != nil { + ns.Add(c) + } + go c.start() + return c, nil +} + +type oomCollector struct { + mu sync.Mutex + + desc *prometheus.Desc + fd int + set map[uintptr]*oom +} + +type oom struct { + // count needs to stay the first member of this struct to ensure 64bits + // alignment on a 32bits machine (e.g. arm32). This is necessary as we use + // the sync/atomic operations on this field. + count int64 + id string + namespace string + c cgroups.Cgroup + triggers []Trigger +} + +func (o *oomCollector) Add(id, namespace string, cg cgroups.Cgroup, triggers ...Trigger) error { + o.mu.Lock() + defer o.mu.Unlock() + fd, err := cg.OOMEventFD() + if err != nil { + return err + } + o.set[fd] = &oom{ + id: id, + c: cg, + triggers: triggers, + namespace: namespace, + } + event := unix.EpollEvent{ + Fd: int32(fd), + Events: unix.EPOLLHUP | unix.EPOLLIN | unix.EPOLLERR, + } + return unix.EpollCtl(o.fd, unix.EPOLL_CTL_ADD, int(fd), &event) +} + +func (o *oomCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- o.desc +} + +func (o *oomCollector) Collect(ch chan<- prometheus.Metric) { + o.mu.Lock() + defer o.mu.Unlock() + for _, t := range o.set { + c := atomic.LoadInt64(&t.count) + ch <- prometheus.MustNewConstMetric(o.desc, prometheus.CounterValue, float64(c), t.id, t.namespace) + } +} + +// Close closes the epoll fd +func (o *oomCollector) Close() error { + return unix.Close(o.fd) +} + +func (o *oomCollector) start() { + var events [128]unix.EpollEvent + for { + n, err := unix.EpollWait(o.fd, events[:], -1) + if err != nil { + if err == unix.EINTR { + continue + } + logrus.WithField("error", err).Fatal("cgroups: epoll wait") + } + for i := 0; i < n; i++ { + o.process(uintptr(events[i].Fd), events[i].Events) + } + } +} + +func (o *oomCollector) process(fd uintptr, event uint32) { + // make sure to always flush the fd + flush(fd) + + o.mu.Lock() + info, ok := o.set[fd] + if !ok { + o.mu.Unlock() + return + } + o.mu.Unlock() + // if we received an event but it was caused by the cgroup being deleted and the fd + // being closed make sure we close our copy and remove the container from the set + if info.c.State() == cgroups.Deleted { + o.mu.Lock() + delete(o.set, fd) + o.mu.Unlock() + unix.Close(int(fd)) + return + } + atomic.AddInt64(&info.count, 1) + for _, t := range info.triggers { + t(info.id, info.namespace, info.c) + } +} + +func flush(fd uintptr) error { + var buf [8]byte + _, err := unix.Read(int(fd), buf[:]) + return err +} diff --git a/vendor/github.com/containerd/containerd/metrics/cgroups/pids.go b/vendor/github.com/containerd/containerd/metrics/cgroups/pids.go new file mode 100644 index 000000000..82c294e25 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metrics/cgroups/pids.go @@ -0,0 +1,60 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "github.com/containerd/cgroups" + metrics "github.com/docker/go-metrics" + "github.com/prometheus/client_golang/prometheus" +) + +var pidMetrics = []*metric{ + { + name: "pids", + help: "The limit to the number of pids allowed", + unit: metrics.Unit("limit"), + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Pids == nil { + return nil + } + return []value{ + { + v: float64(stats.Pids.Limit), + }, + } + }, + }, + { + name: "pids", + help: "The current number of pids", + unit: metrics.Unit("current"), + vt: prometheus.GaugeValue, + getValues: func(stats *cgroups.Metrics) []value { + if stats.Pids == nil { + return nil + } + return []value{ + { + v: float64(stats.Pids.Current), + }, + } + }, + }, +} diff --git a/vendor/github.com/containerd/containerd/mount/lookup_unix.go b/vendor/github.com/containerd/containerd/mount/lookup_unix.go index 6e4fd0d24..e8b0a0b48 100644 --- a/vendor/github.com/containerd/containerd/mount/lookup_unix.go +++ b/vendor/github.com/containerd/containerd/mount/lookup_unix.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount import ( diff --git a/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go b/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go index e5f84e7f2..46ec66a90 100644 --- a/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go +++ b/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go @@ -1,5 +1,21 @@ // +build windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount import ( diff --git a/vendor/github.com/containerd/containerd/mount/mount.go b/vendor/github.com/containerd/containerd/mount/mount.go index 323e6e2fb..9a5b429ea 100644 --- a/vendor/github.com/containerd/containerd/mount/mount.go +++ b/vendor/github.com/containerd/containerd/mount/mount.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount import ( diff --git a/vendor/github.com/containerd/containerd/mount/mount_linux.go b/vendor/github.com/containerd/containerd/mount/mount_linux.go index de2e8bb7d..82fc0b279 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_linux.go +++ b/vendor/github.com/containerd/containerd/mount/mount_linux.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount import ( diff --git a/vendor/github.com/containerd/containerd/mount/mount_unix.go b/vendor/github.com/containerd/containerd/mount/mount_unix.go index edb0e8dd1..6741293f8 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_unix.go +++ b/vendor/github.com/containerd/containerd/mount/mount_unix.go @@ -1,5 +1,21 @@ // +build darwin freebsd +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount import "github.com/pkg/errors" diff --git a/vendor/github.com/containerd/containerd/mount/mount_windows.go b/vendor/github.com/containerd/containerd/mount/mount_windows.go index 6d37fda2a..f7c97894b 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_windows.go +++ b/vendor/github.com/containerd/containerd/mount/mount_windows.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount import ( diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo.go b/vendor/github.com/containerd/containerd/mount/mountinfo.go index 2192cce20..e7a68402f 100644 --- a/vendor/github.com/containerd/containerd/mount/mountinfo.go +++ b/vendor/github.com/containerd/containerd/mount/mountinfo.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount // Info reveals information about a particular mounted filesystem. This diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go b/vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go index d2bc075a6..bbe79767e 100644 --- a/vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go +++ b/vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount /* diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go b/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go index 023772264..9c442c8ed 100644 --- a/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go +++ b/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go @@ -1,5 +1,21 @@ // +build linux +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount import ( diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go b/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go index aa659d899..eba602f1a 100644 --- a/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go +++ b/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go @@ -1,5 +1,21 @@ // +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package mount import ( diff --git a/vendor/github.com/containerd/containerd/namespaces.go b/vendor/github.com/containerd/containerd/namespaces.go index 36fc50cab..eea70ca33 100644 --- a/vendor/github.com/containerd/containerd/namespaces.go +++ b/vendor/github.com/containerd/containerd/namespaces.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go index 708711487..afcd9d1b0 100644 --- a/vendor/github.com/containerd/containerd/namespaces/context.go +++ b/vendor/github.com/containerd/containerd/namespaces/context.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package namespaces import ( diff --git a/vendor/github.com/containerd/containerd/namespaces/grpc.go b/vendor/github.com/containerd/containerd/namespaces/grpc.go index c18fc933d..fe1ca1d7c 100644 --- a/vendor/github.com/containerd/containerd/namespaces/grpc.go +++ b/vendor/github.com/containerd/containerd/namespaces/grpc.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package namespaces import ( diff --git a/vendor/github.com/containerd/containerd/namespaces/store.go b/vendor/github.com/containerd/containerd/namespaces/store.go index 68ff714bb..0b5c98569 100644 --- a/vendor/github.com/containerd/containerd/namespaces/store.go +++ b/vendor/github.com/containerd/containerd/namespaces/store.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package namespaces import "context" diff --git a/vendor/github.com/containerd/containerd/namespaces/validate.go b/vendor/github.com/containerd/containerd/namespaces/validate.go index ff97a8cc8..222da3ea4 100644 --- a/vendor/github.com/containerd/containerd/namespaces/validate.go +++ b/vendor/github.com/containerd/containerd/namespaces/validate.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + // Package namespaces provides tools for working with namespaces across // containerd. // diff --git a/vendor/github.com/containerd/containerd/oci/client.go b/vendor/github.com/containerd/containerd/oci/client.go index d2cd355c5..9923101bf 100644 --- a/vendor/github.com/containerd/containerd/oci/client.go +++ b/vendor/github.com/containerd/containerd/oci/client.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package oci import ( diff --git a/vendor/github.com/containerd/containerd/oci/spec.go b/vendor/github.com/containerd/containerd/oci/spec.go index 558a3570f..78284187a 100644 --- a/vendor/github.com/containerd/containerd/oci/spec.go +++ b/vendor/github.com/containerd/containerd/oci/spec.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package oci import ( diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go index 19126b22f..53741ea80 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package oci import ( @@ -36,10 +52,10 @@ func WithHostname(name string) SpecOpts { } // WithEnv appends environment variables -func WithEnv(environmnetVariables []string) SpecOpts { +func WithEnv(environmentVariables []string) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error { - if len(environmnetVariables) > 0 { - s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, environmnetVariables) + if len(environmentVariables) > 0 { + s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, environmentVariables) } return nil } diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go index 8bbdd78b1..87d313add 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package oci import ( diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go index 796ad5598..5b8ebba13 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go @@ -1,5 +1,21 @@ // +build windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package oci import ( diff --git a/vendor/github.com/containerd/containerd/oci/spec_unix.go b/vendor/github.com/containerd/containerd/oci/spec_unix.go index c8f3b37af..e52e42228 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_unix.go +++ b/vendor/github.com/containerd/containerd/oci/spec_unix.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package oci import ( diff --git a/vendor/github.com/containerd/containerd/oci/spec_windows.go b/vendor/github.com/containerd/containerd/oci/spec_windows.go index 64c228883..f8cdb8a9b 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_windows.go +++ b/vendor/github.com/containerd/containerd/oci/spec_windows.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package oci import ( diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index b7c23cc19..a5c5ab42b 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package platforms import ( diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go index 362b005a6..df9cc2392 100644 --- a/vendor/github.com/containerd/containerd/platforms/database.go +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package platforms import ( diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go index ed493ab2c..dee59abad 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package platforms import ( diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 56c6ddc51..77b6d8410 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + // Package platforms provides a toolkit for normalizing, matching and // specifying container platforms. // diff --git a/vendor/github.com/containerd/containerd/plugin/context.go b/vendor/github.com/containerd/containerd/plugin/context.go index eca4a0204..1211c907e 100644 --- a/vendor/github.com/containerd/containerd/plugin/context.go +++ b/vendor/github.com/containerd/containerd/plugin/context.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package plugin import ( diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go index c4528471c..470429a0c 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package plugin import ( diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go index eee0e3fdb..5b82db868 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go @@ -1,5 +1,21 @@ // +build go1.8,!windows,amd64,!static_build +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package plugin import ( diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_other.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go index 180917af8..2978f60fd 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin_other.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go @@ -1,5 +1,21 @@ // +build !go1.8 windows !amd64 static_build +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package plugin func loadPlugins(path string) error { diff --git a/vendor/github.com/containerd/containerd/process.go b/vendor/github.com/containerd/containerd/process.go index 32049cf07..6b1120337 100644 --- a/vendor/github.com/containerd/containerd/process.go +++ b/vendor/github.com/containerd/containerd/process.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/doc.go b/vendor/github.com/containerd/containerd/protobuf/google/rpc/doc.go index 9ab1e3e8e..c76291766 100644 --- a/vendor/github.com/containerd/containerd/protobuf/google/rpc/doc.go +++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/doc.go @@ -1 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package rpc diff --git a/vendor/github.com/containerd/containerd/reaper/reaper.go b/vendor/github.com/containerd/containerd/reaper/reaper.go new file mode 100644 index 000000000..bdcb14c26 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reaper/reaper.go @@ -0,0 +1,110 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package reaper + +import ( + "os/exec" + "sync" + "time" + + "github.com/containerd/containerd/sys" + runc "github.com/containerd/go-runc" + "github.com/pkg/errors" +) + +// ErrNoSuchProcess is returned when the process no longer exists +var ErrNoSuchProcess = errors.New("no such process") + +const bufferSize = 32 + +// Reap should be called when the process receives an SIGCHLD. Reap will reap +// all exited processes and close their wait channels +func Reap() error { + now := time.Now() + exits, err := sys.Reap(false) + Default.Lock() + for c := range Default.subscribers { + for _, e := range exits { + c <- runc.Exit{ + Timestamp: now, + Pid: e.Pid, + Status: e.Status, + } + } + + } + Default.Unlock() + return err +} + +// Default is the default monitor initialized for the package +var Default = &Monitor{ + subscribers: make(map[chan runc.Exit]struct{}), +} + +// Monitor monitors the underlying system for process status changes +type Monitor struct { + sync.Mutex + + subscribers map[chan runc.Exit]struct{} +} + +// Start starts the command a registers the process with the reaper +func (m *Monitor) Start(c *exec.Cmd) (chan runc.Exit, error) { + ec := m.Subscribe() + if err := c.Start(); err != nil { + m.Unsubscribe(ec) + return nil, err + } + return ec, nil +} + +// Wait blocks until a process is signal as dead. +// User should rely on the value of the exit status to determine if the +// command was successful or not. +func (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) { + for e := range ec { + if e.Pid == c.Process.Pid { + // make sure we flush all IO + c.Wait() + m.Unsubscribe(ec) + return e.Status, nil + } + } + // return no such process if the ec channel is closed and no more exit + // events will be sent + return -1, ErrNoSuchProcess +} + +// Subscribe to process exit changes +func (m *Monitor) Subscribe() chan runc.Exit { + c := make(chan runc.Exit, bufferSize) + m.Lock() + m.subscribers[c] = struct{}{} + m.Unlock() + return c +} + +// Unsubscribe to process exit changes +func (m *Monitor) Unsubscribe(c chan runc.Exit) { + m.Lock() + delete(m.subscribers, c) + close(c) + m.Unlock() +} diff --git a/vendor/github.com/containerd/containerd/reference/reference.go b/vendor/github.com/containerd/containerd/reference/reference.go index 55c43b881..79f165de0 100644 --- a/vendor/github.com/containerd/containerd/reference/reference.go +++ b/vendor/github.com/containerd/containerd/reference/reference.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package reference import ( diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth.go b/vendor/github.com/containerd/containerd/remotes/docker/auth.go index aa33752a6..80bcb9dcf 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/auth.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package docker import ( diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go index 222cf83c0..51e605e12 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package docker import ( diff --git a/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go index f6de60a27..5a7778953 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package docker import ( diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index 405480b54..e2caf7028 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package docker import ( diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index edc994158..06b1724b4 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package docker import ( diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go index e7d58acfd..1cf4dd7a1 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package schema1 import ( diff --git a/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/vendor/github.com/containerd/containerd/remotes/docker/scope.go index 9cf0997dc..52c244311 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/scope.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/scope.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package docker import ( diff --git a/vendor/github.com/containerd/containerd/remotes/docker/status.go b/vendor/github.com/containerd/containerd/remotes/docker/status.go index 4b8dbbc5c..8069d6767 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/status.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/status.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package docker import ( diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go index a37af65e0..335384810 100644 --- a/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -1,8 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package remotes import ( "context" - "encoding/json" "fmt" "io" "math/rand" @@ -102,83 +117,27 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc break } + ws, err := cw.Status() + if err != nil { + return err + } + + if ws.Offset == desc.Size { + // If writer is already complete, commit and return + err := cw.Commit(ctx, desc.Size, desc.Digest) + if err != nil && !errdefs.IsAlreadyExists(err) { + return errors.Wrapf(err, "failed commit on ref %q", ws.Ref) + } + return nil + } + rc, err := fetcher.Fetch(ctx, desc) if err != nil { return err } defer rc.Close() - r, opts := commitOpts(desc, rc) - return content.Copy(ctx, cw, r, desc.Size, desc.Digest, opts...) -} - -// commitOpts gets the appropriate content options to alter -// the content info on commit based on media type. -func commitOpts(desc ocispec.Descriptor, r io.Reader) (io.Reader, []content.Opt) { - var childrenF func(r io.Reader) ([]ocispec.Descriptor, error) - - // TODO(AkihiroSuda): use images/oci.GetChildrenDescriptors? - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - childrenF = func(r io.Reader) ([]ocispec.Descriptor, error) { - var ( - manifest ocispec.Manifest - decoder = json.NewDecoder(r) - ) - if err := decoder.Decode(&manifest); err != nil { - return nil, err - } - - return append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...), nil - } - case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - childrenF = func(r io.Reader) ([]ocispec.Descriptor, error) { - var ( - index ocispec.Index - decoder = json.NewDecoder(r) - ) - if err := decoder.Decode(&index); err != nil { - return nil, err - } - - return index.Manifests, nil - } - default: - return r, nil - } - - pr, pw := io.Pipe() - - var children []ocispec.Descriptor - errC := make(chan error) - - go func() { - defer close(errC) - ch, err := childrenF(pr) - if err != nil { - errC <- err - } - children = ch - }() - - opt := func(info *content.Info) error { - err := <-errC - if err != nil { - return errors.Wrap(err, "unable to get commit labels") - } - - if len(children) > 0 { - if info.Labels == nil { - info.Labels = map[string]string{} - } - for i, ch := range children { - info.Labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = ch.Digest.String() - } - } - return nil - } - - return io.TeeReader(r, pw), []content.Opt{opt} + return content.Copy(ctx, cw, rc, desc.Size, desc.Digest) } // PushHandler returns a handler that will push all content from the provider @@ -243,7 +202,7 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, pr pushHandler := PushHandler(pusher, provider) handlers := append(baseHandlers, - images.ChildrenHandler(provider, platforms.Default()), + images.FilterPlatform(platforms.Default(), images.ChildrenHandler(provider)), filterHandler, pushHandler, ) diff --git a/vendor/github.com/containerd/containerd/remotes/resolver.go b/vendor/github.com/containerd/containerd/remotes/resolver.go index caf4c97ce..a9b2b78aa 100644 --- a/vendor/github.com/containerd/containerd/remotes/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/resolver.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package remotes import ( diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go index 21db6782c..73613337d 100644 --- a/vendor/github.com/containerd/containerd/rootfs/apply.go +++ b/vendor/github.com/containerd/containerd/rootfs/apply.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package rootfs import ( diff --git a/vendor/github.com/containerd/containerd/rootfs/diff.go b/vendor/github.com/containerd/containerd/rootfs/diff.go index ae5dd17af..c7f954e9f 100644 --- a/vendor/github.com/containerd/containerd/rootfs/diff.go +++ b/vendor/github.com/containerd/containerd/rootfs/diff.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package rootfs import ( diff --git a/vendor/github.com/containerd/containerd/rootfs/init.go b/vendor/github.com/containerd/containerd/rootfs/init.go index 4f32f11ae..326f30f70 100644 --- a/vendor/github.com/containerd/containerd/rootfs/init.go +++ b/vendor/github.com/containerd/containerd/rootfs/init.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package rootfs import ( diff --git a/vendor/github.com/containerd/containerd/rootfs/init_linux.go b/vendor/github.com/containerd/containerd/rootfs/init_linux.go index cabc4577e..84dc56522 100644 --- a/vendor/github.com/containerd/containerd/rootfs/init_linux.go +++ b/vendor/github.com/containerd/containerd/rootfs/init_linux.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package rootfs import ( diff --git a/vendor/github.com/containerd/containerd/rootfs/init_other.go b/vendor/github.com/containerd/containerd/rootfs/init_other.go index b5e04e2e6..261121085 100644 --- a/vendor/github.com/containerd/containerd/rootfs/init_other.go +++ b/vendor/github.com/containerd/containerd/rootfs/init_other.go @@ -1,5 +1,21 @@ // +build !linux +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package rootfs const ( diff --git a/vendor/github.com/containerd/containerd/runtime/events.go b/vendor/github.com/containerd/containerd/runtime/events.go new file mode 100644 index 000000000..4a064c885 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/events.go @@ -0,0 +1,42 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runtime + +const ( + // TaskCreateEventTopic for task create + TaskCreateEventTopic = "/tasks/create" + // TaskStartEventTopic for task start + TaskStartEventTopic = "/tasks/start" + // TaskOOMEventTopic for task oom + TaskOOMEventTopic = "/tasks/oom" + // TaskExitEventTopic for task exit + TaskExitEventTopic = "/tasks/exit" + // TaskDeleteEventTopic for task delete + TaskDeleteEventTopic = "/tasks/delete" + // TaskExecAddedEventTopic for task exec create + TaskExecAddedEventTopic = "/tasks/exec-added" + // TaskExecStartedEventTopic for task exec start + TaskExecStartedEventTopic = "/tasks/exec-started" + // TaskPausedEventTopic for task pause + TaskPausedEventTopic = "/tasks/paused" + // TaskResumedEventTopic for task resume + TaskResumedEventTopic = "/tasks/resumed" + // TaskCheckpointedEventTopic for task checkpoint + TaskCheckpointedEventTopic = "/tasks/checkpointed" + // TaskUnknownTopic for unknown task events + TaskUnknownTopic = "/tasks/?" +) diff --git a/vendor/github.com/containerd/containerd/runtime/monitor.go b/vendor/github.com/containerd/containerd/runtime/monitor.go new file mode 100644 index 000000000..eb07ebdb4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/monitor.go @@ -0,0 +1,70 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runtime + +// TaskMonitor provides an interface for monitoring of containers within containerd +type TaskMonitor interface { + // Monitor adds the provided container to the monitor + Monitor(Task) error + // Stop stops and removes the provided container from the monitor + Stop(Task) error +} + +// NewMultiTaskMonitor returns a new TaskMonitor broadcasting to the provided monitors +func NewMultiTaskMonitor(monitors ...TaskMonitor) TaskMonitor { + return &multiTaskMonitor{ + monitors: monitors, + } +} + +// NewNoopMonitor is a task monitor that does nothing +func NewNoopMonitor() TaskMonitor { + return &noopTaskMonitor{} +} + +type noopTaskMonitor struct { +} + +func (mm *noopTaskMonitor) Monitor(c Task) error { + return nil +} + +func (mm *noopTaskMonitor) Stop(c Task) error { + return nil +} + +type multiTaskMonitor struct { + monitors []TaskMonitor +} + +func (mm *multiTaskMonitor) Monitor(c Task) error { + for _, m := range mm.monitors { + if err := m.Monitor(c); err != nil { + return err + } + } + return nil +} + +func (mm *multiTaskMonitor) Stop(c Task) error { + for _, m := range mm.monitors { + if err := m.Stop(c); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/runtime.go b/vendor/github.com/containerd/containerd/runtime/runtime.go new file mode 100644 index 000000000..000af4ad3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/runtime.go @@ -0,0 +1,70 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runtime + +import ( + "context" + "time" + + "github.com/containerd/containerd/mount" + "github.com/gogo/protobuf/types" +) + +// IO holds process IO information +type IO struct { + Stdin string + Stdout string + Stderr string + Terminal bool +} + +// CreateOpts contains task creation data +type CreateOpts struct { + // Spec is the OCI runtime spec + Spec *types.Any + // Rootfs mounts to perform to gain access to the container's filesystem + Rootfs []mount.Mount + // IO for the container's main process + IO IO + // Checkpoint digest to restore container state + Checkpoint string + // Options for the runtime and container + Options *types.Any +} + +// Exit information for a process +type Exit struct { + Pid uint32 + Status uint32 + Timestamp time.Time +} + +// Runtime is responsible for the creation of containers for a certain platform, +// arch, or custom usage. +type Runtime interface { + // ID of the runtime + ID() string + // Create creates a task with the provided id and options. + Create(ctx context.Context, id string, opts CreateOpts) (Task, error) + // Get returns a task. + Get(context.Context, string) (Task, error) + // Tasks returns all the current tasks for the runtime. + // Any container runs at most one task at a time. + Tasks(context.Context) ([]Task, error) + // Delete removes the task in the runtime. + Delete(context.Context, Task) (*Exit, error) +} diff --git a/vendor/github.com/containerd/containerd/runtime/task.go b/vendor/github.com/containerd/containerd/runtime/task.go new file mode 100644 index 000000000..f8a67cfae --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/task.go @@ -0,0 +1,132 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runtime + +import ( + "context" + "time" + + "github.com/gogo/protobuf/types" +) + +// TaskInfo provides task specific information +type TaskInfo struct { + ID string + Runtime string + Spec []byte + Namespace string +} + +// Process is a runtime object for an executing process inside a container +type Process interface { + ID() string + // State returns the process state + State(context.Context) (State, error) + // Kill signals a container + Kill(context.Context, uint32, bool) error + // Pty resizes the processes pty/console + ResizePty(context.Context, ConsoleSize) error + // CloseStdin closes the processes stdin + CloseIO(context.Context) error + // Start the container's user defined process + Start(context.Context) error + // Wait for the process to exit + Wait(context.Context) (*Exit, error) +} + +// Task is the runtime object for an executing container +type Task interface { + Process + + // Information of the container + Info() TaskInfo + // Pause pauses the container process + Pause(context.Context) error + // Resume unpauses the container process + Resume(context.Context) error + // Exec adds a process into the container + Exec(context.Context, string, ExecOpts) (Process, error) + // Pids returns all pids + Pids(context.Context) ([]ProcessInfo, error) + // Checkpoint checkpoints a container to an image with live system data + Checkpoint(context.Context, string, *types.Any) error + // DeleteProcess deletes a specific exec process via its id + DeleteProcess(context.Context, string) (*Exit, error) + // Update sets the provided resources to a running task + Update(context.Context, *types.Any) error + // Process returns a process within the task for the provided id + Process(context.Context, string) (Process, error) + // Metrics returns runtime specific metrics for a task + Metrics(context.Context) (interface{}, error) +} + +// ExecOpts provides additional options for additional processes running in a task +type ExecOpts struct { + Spec *types.Any + IO IO +} + +// ConsoleSize of a pty or windows terminal +type ConsoleSize struct { + Width uint32 + Height uint32 +} + +// Status is the runtime status of a task and/or process +type Status int + +const ( + // CreatedStatus when a process has been created + CreatedStatus Status = iota + 1 + // RunningStatus when a process is running + RunningStatus + // StoppedStatus when a process has stopped + StoppedStatus + // DeletedStatus when a process has been deleted + DeletedStatus + // PausedStatus when a process is paused + PausedStatus + // PausingStatus when a process is currently pausing + PausingStatus +) + +// State information for a process +type State struct { + // Status is the current status of the container + Status Status + // Pid is the main process id for the container + Pid uint32 + // ExitStatus of the process + // Only valid if the Status is Stopped + ExitStatus uint32 + // ExitedAt is the time at which the process exited + // Only valid if the Status is Stopped + ExitedAt time.Time + Stdin string + Stdout string + Stderr string + Terminal bool +} + +// ProcessInfo holds platform specific process information +type ProcessInfo struct { + // Pid is the process ID + Pid uint32 + // Info includes additional process information + // Info varies by platform + Info interface{} +} diff --git a/vendor/github.com/containerd/containerd/runtime/task_list.go b/vendor/github.com/containerd/containerd/runtime/task_list.go new file mode 100644 index 000000000..92f25715c --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/task_list.go @@ -0,0 +1,122 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runtime + +import ( + "context" + "sync" + + "github.com/containerd/containerd/namespaces" + "github.com/pkg/errors" +) + +var ( + // ErrTaskNotExists is returned when a task does not exist + ErrTaskNotExists = errors.New("task does not exist") + // ErrTaskAlreadyExists is returned when a task already exists + ErrTaskAlreadyExists = errors.New("task already exists") +) + +// NewTaskList returns a new TaskList +func NewTaskList() *TaskList { + return &TaskList{ + tasks: make(map[string]map[string]Task), + } +} + +// TaskList holds and provides locking around tasks +type TaskList struct { + mu sync.Mutex + tasks map[string]map[string]Task +} + +// Get a task +func (l *TaskList) Get(ctx context.Context, id string) (Task, error) { + l.mu.Lock() + defer l.mu.Unlock() + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + tasks, ok := l.tasks[namespace] + if !ok { + return nil, ErrTaskNotExists + } + t, ok := tasks[id] + if !ok { + return nil, ErrTaskNotExists + } + return t, nil +} + +// GetAll tasks under a namespace +func (l *TaskList) GetAll(ctx context.Context) ([]Task, error) { + l.mu.Lock() + defer l.mu.Unlock() + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + var o []Task + tasks, ok := l.tasks[namespace] + if !ok { + return o, nil + } + for _, t := range tasks { + o = append(o, t) + } + return o, nil +} + +// Add a task +func (l *TaskList) Add(ctx context.Context, t Task) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + return l.AddWithNamespace(namespace, t) +} + +// AddWithNamespace adds a task with the provided namespace +func (l *TaskList) AddWithNamespace(namespace string, t Task) error { + l.mu.Lock() + defer l.mu.Unlock() + + id := t.ID() + if _, ok := l.tasks[namespace]; !ok { + l.tasks[namespace] = make(map[string]Task) + } + if _, ok := l.tasks[namespace][id]; ok { + return errors.Wrap(ErrTaskAlreadyExists, id) + } + l.tasks[namespace][id] = t + return nil +} + +// Delete a task +func (l *TaskList) Delete(ctx context.Context, id string) { + l.mu.Lock() + defer l.mu.Unlock() + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return + } + tasks, ok := l.tasks[namespace] + if ok { + delete(tasks, id) + } +} diff --git a/vendor/github.com/containerd/containerd/runtime/typeurl.go b/vendor/github.com/containerd/containerd/runtime/typeurl.go new file mode 100644 index 000000000..eb54e250f --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/typeurl.go @@ -0,0 +1,34 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runtime + +import ( + "strconv" + + "github.com/containerd/typeurl" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func init() { + const prefix = "types.containerd.io" + // register TypeUrls for commonly marshaled external types + major := strconv.Itoa(specs.VersionMajor) + typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec") + typeurl.Register(&specs.Process{}, prefix, "opencontainers/runtime-spec", major, "Process") + typeurl.Register(&specs.LinuxResources{}, prefix, "opencontainers/runtime-spec", major, "LinuxResources") + typeurl.Register(&specs.WindowsResources{}, prefix, "opencontainers/runtime-spec", major, "WindowsResources") +} diff --git a/vendor/github.com/containerd/containerd/server/config.go b/vendor/github.com/containerd/containerd/server/config.go new file mode 100644 index 000000000..67d1f4bcb --- /dev/null +++ b/vendor/github.com/containerd/containerd/server/config.go @@ -0,0 +1,99 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "github.com/BurntSushi/toml" + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +// Config provides containerd configuration data for the server +type Config struct { + // Root is the path to a directory where containerd will store persistent data + Root string `toml:"root"` + // State is the path to a directory where containerd will store transient data + State string `toml:"state"` + // GRPC configuration settings + GRPC GRPCConfig `toml:"grpc"` + // Debug and profiling settings + Debug Debug `toml:"debug"` + // Metrics and monitoring settings + Metrics MetricsConfig `toml:"metrics"` + // DisabledPlugins are IDs of plugins to disable. Disabled plugins won't be + // initialized and started. + DisabledPlugins []string `toml:"disabled_plugins"` + // Plugins provides plugin specific configuration for the initialization of a plugin + Plugins map[string]toml.Primitive `toml:"plugins"` + // OOMScore adjust the containerd's oom score + OOMScore int `toml:"oom_score"` + // Cgroup specifies cgroup information for the containerd daemon process + Cgroup CgroupConfig `toml:"cgroup"` + + md toml.MetaData +} + +// GRPCConfig provides GRPC configuration for the socket +type GRPCConfig struct { + Address string `toml:"address"` + UID int `toml:"uid"` + GID int `toml:"gid"` +} + +// Debug provides debug configuration +type Debug struct { + Address string `toml:"address"` + UID int `toml:"uid"` + GID int `toml:"gid"` + Level string `toml:"level"` +} + +// MetricsConfig provides metrics configuration +type MetricsConfig struct { + Address string `toml:"address"` + GRPCHistogram bool `toml:"grpc_histogram"` +} + +// CgroupConfig provides cgroup configuration +type CgroupConfig struct { + Path string `toml:"path"` +} + +// Decode unmarshals a plugin specific configuration by plugin id +func (c *Config) Decode(id string, v interface{}) (interface{}, error) { + data, ok := c.Plugins[id] + if !ok { + return v, nil + } + if err := c.md.PrimitiveDecode(data, v); err != nil { + return nil, err + } + return v, nil +} + +// LoadConfig loads the containerd server config from the provided path +func LoadConfig(path string, v *Config) error { + if v == nil { + return errors.Wrapf(errdefs.ErrInvalidArgument, "argument v must not be nil") + } + md, err := toml.DecodeFile(path, v) + if err != nil { + return err + } + v.md = md + return nil +} diff --git a/vendor/github.com/containerd/containerd/server/server.go b/vendor/github.com/containerd/containerd/server/server.go new file mode 100644 index 000000000..f5b24f636 --- /dev/null +++ b/vendor/github.com/containerd/containerd/server/server.go @@ -0,0 +1,272 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "expvar" + "io" + "net" + "net/http" + "net/http/pprof" + "os" + "path/filepath" + "strings" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/local" + "github.com/containerd/containerd/events/exchange" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/snapshots" + metrics "github.com/docker/go-metrics" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/pkg/errors" + "golang.org/x/net/context" + + "google.golang.org/grpc" +) + +// New creates and initializes a new containerd server +func New(ctx context.Context, config *Config) (*Server, error) { + switch { + case config.Root == "": + return nil, errors.New("root must be specified") + case config.State == "": + return nil, errors.New("state must be specified") + case config.Root == config.State: + return nil, errors.New("root and state must be different paths") + } + + if err := os.MkdirAll(config.Root, 0711); err != nil { + return nil, err + } + if err := os.MkdirAll(config.State, 0711); err != nil { + return nil, err + } + if err := apply(ctx, config); err != nil { + return nil, err + } + plugins, err := LoadPlugins(config) + if err != nil { + return nil, err + } + rpc := grpc.NewServer( + grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), + grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), + ) + var ( + services []plugin.Service + s = &Server{ + rpc: rpc, + events: exchange.NewExchange(), + config: config, + } + initialized = plugin.NewPluginSet() + ) + for _, p := range plugins { + id := p.URI() + log.G(ctx).WithField("type", p.Type).Infof("loading plugin %q...", id) + + initContext := plugin.NewContext( + ctx, + p, + initialized, + config.Root, + config.State, + ) + initContext.Events = s.events + initContext.Address = config.GRPC.Address + + // load the plugin specific configuration if it is provided + if p.Config != nil { + pluginConfig, err := config.Decode(p.ID, p.Config) + if err != nil { + return nil, err + } + initContext.Config = pluginConfig + } + result := p.Init(initContext) + if err := initialized.Add(result); err != nil { + return nil, errors.Wrapf(err, "could not add plugin result to plugin set") + } + + instance, err := result.Instance() + if err != nil { + if plugin.IsSkipPlugin(err) { + log.G(ctx).WithField("type", p.Type).Infof("skip loading plugin %q...", id) + } else { + log.G(ctx).WithError(err).Warnf("failed to load plugin %s", id) + } + continue + } + // check for grpc services that should be registered with the server + if service, ok := instance.(plugin.Service); ok { + services = append(services, service) + } + s.plugins = append(s.plugins, result) + } + // register services after all plugins have been initialized + for _, service := range services { + if err := service.Register(rpc); err != nil { + return nil, err + } + } + return s, nil +} + +// Server is the containerd main daemon +type Server struct { + rpc *grpc.Server + events *exchange.Exchange + config *Config + plugins []*plugin.Plugin +} + +// ServeGRPC provides the containerd grpc APIs on the provided listener +func (s *Server) ServeGRPC(l net.Listener) error { + if s.config.Metrics.GRPCHistogram { + // enable grpc time histograms to measure rpc latencies + grpc_prometheus.EnableHandlingTimeHistogram() + } + // before we start serving the grpc API regster the grpc_prometheus metrics + // handler. This needs to be the last service registered so that it can collect + // metrics for every other service + grpc_prometheus.Register(s.rpc) + return trapClosedConnErr(s.rpc.Serve(l)) +} + +// ServeMetrics provides a prometheus endpoint for exposing metrics +func (s *Server) ServeMetrics(l net.Listener) error { + m := http.NewServeMux() + m.Handle("/v1/metrics", metrics.Handler()) + return trapClosedConnErr(http.Serve(l, m)) +} + +// ServeDebug provides a debug endpoint +func (s *Server) ServeDebug(l net.Listener) error { + // don't use the default http server mux to make sure nothing gets registered + // that we don't want to expose via containerd + m := http.NewServeMux() + m.Handle("/debug/vars", expvar.Handler()) + m.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) + m.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) + m.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) + m.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) + m.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) + return trapClosedConnErr(http.Serve(l, m)) +} + +// Stop the containerd server canceling any open connections +func (s *Server) Stop() { + s.rpc.Stop() + for i := len(s.plugins) - 1; i >= 0; i-- { + p := s.plugins[i] + instance, err := p.Instance() + if err != nil { + log.L.WithError(err).WithField("id", p.Registration.ID). + Errorf("could not get plugin instance") + continue + } + closer, ok := instance.(io.Closer) + if !ok { + continue + } + if err := closer.Close(); err != nil { + log.L.WithError(err).WithField("id", p.Registration.ID). + Errorf("failed to close plugin") + } + } +} + +// LoadPlugins loads all plugins into containerd and generates an ordered graph +// of all plugins. +func LoadPlugins(config *Config) ([]*plugin.Registration, error) { + // load all plugins into containerd + if err := plugin.Load(filepath.Join(config.Root, "plugins")); err != nil { + return nil, err + } + // load additional plugins that don't automatically register themselves + plugin.Register(&plugin.Registration{ + Type: plugin.ContentPlugin, + ID: "content", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + ic.Meta.Exports["root"] = ic.Root + return local.NewStore(ic.Root) + }, + }) + plugin.Register(&plugin.Registration{ + Type: plugin.MetadataPlugin, + ID: "bolt", + Requires: []plugin.Type{ + plugin.ContentPlugin, + plugin.SnapshotPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + if err := os.MkdirAll(ic.Root, 0711); err != nil { + return nil, err + } + cs, err := ic.Get(plugin.ContentPlugin) + if err != nil { + return nil, err + } + + snapshottersRaw, err := ic.GetByType(plugin.SnapshotPlugin) + if err != nil { + return nil, err + } + + snapshotters := make(map[string]snapshots.Snapshotter) + for name, sn := range snapshottersRaw { + sn, err := sn.Instance() + if err != nil { + log.G(ic.Context).WithError(err). + Warnf("could not use snapshotter %v in metadata plugin", name) + continue + } + snapshotters[name] = sn.(snapshots.Snapshotter) + } + + path := filepath.Join(ic.Root, "meta.db") + ic.Meta.Exports["path"] = path + + db, err := bolt.Open(path, 0644, nil) + if err != nil { + return nil, err + } + mdb := metadata.NewDB(db, cs.(content.Store), snapshotters) + if err := mdb.Init(ic.Context); err != nil { + return nil, err + } + return mdb, nil + }, + }) + + // return the ordered graph for plugins + return plugin.Graph(config.DisabledPlugins), nil +} + +func trapClosedConnErr(err error) error { + if err == nil { + return nil + } + if strings.Contains(err.Error(), "use of closed network connection") { + return nil + } + return err +} diff --git a/vendor/github.com/containerd/containerd/server/server_linux.go b/vendor/github.com/containerd/containerd/server/server_linux.go new file mode 100644 index 000000000..c45ccd3d5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/server/server_linux.go @@ -0,0 +1,54 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "context" + "os" + + "github.com/containerd/cgroups" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/sys" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// apply sets config settings on the server process +func apply(ctx context.Context, config *Config) error { + if config.OOMScore != 0 { + log.G(ctx).Debugf("changing OOM score to %d", config.OOMScore) + if err := sys.SetOOMScore(os.Getpid(), config.OOMScore); err != nil { + log.G(ctx).WithError(err).Errorf("failed to change OOM score to %d", config.OOMScore) + } + } + if config.Cgroup.Path != "" { + cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(config.Cgroup.Path)) + if err != nil { + if err != cgroups.ErrCgroupDeleted { + return err + } + if cg, err = cgroups.New(cgroups.V1, cgroups.StaticPath(config.Cgroup.Path), &specs.LinuxResources{}); err != nil { + return err + } + } + if err := cg.Add(cgroups.Process{ + Pid: os.Getpid(), + }); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/server/server_solaris.go b/vendor/github.com/containerd/containerd/server/server_solaris.go new file mode 100644 index 000000000..0dbbb9fea --- /dev/null +++ b/vendor/github.com/containerd/containerd/server/server_solaris.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import "context" + +func apply(_ context.Context, _ *Config) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/server/server_unsupported.go b/vendor/github.com/containerd/containerd/server/server_unsupported.go new file mode 100644 index 000000000..c6211dbb2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/server/server_unsupported.go @@ -0,0 +1,25 @@ +// +build !linux,!windows,!solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import "context" + +func apply(_ context.Context, _ *Config) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/server/server_windows.go b/vendor/github.com/containerd/containerd/server/server_windows.go new file mode 100644 index 000000000..ac0b8481c --- /dev/null +++ b/vendor/github.com/containerd/containerd/server/server_windows.go @@ -0,0 +1,27 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package server + +import ( + "context" +) + +func apply(_ context.Context, _ *Config) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/services/containers/helpers.go b/vendor/github.com/containerd/containerd/services/containers/helpers.go new file mode 100644 index 000000000..dde4caed1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/containers/helpers.go @@ -0,0 +1,70 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containers + +import ( + api "github.com/containerd/containerd/api/services/containers/v1" + "github.com/containerd/containerd/containers" +) + +func containersToProto(containers []containers.Container) []api.Container { + var containerspb []api.Container + + for _, image := range containers { + containerspb = append(containerspb, containerToProto(&image)) + } + + return containerspb +} + +func containerToProto(container *containers.Container) api.Container { + return api.Container{ + ID: container.ID, + Labels: container.Labels, + Image: container.Image, + Runtime: &api.Container_Runtime{ + Name: container.Runtime.Name, + Options: container.Runtime.Options, + }, + Spec: container.Spec, + Snapshotter: container.Snapshotter, + SnapshotKey: container.SnapshotKey, + CreatedAt: container.CreatedAt, + UpdatedAt: container.UpdatedAt, + Extensions: container.Extensions, + } +} + +func containerFromProto(containerpb *api.Container) containers.Container { + var runtime containers.RuntimeInfo + if containerpb.Runtime != nil { + runtime = containers.RuntimeInfo{ + Name: containerpb.Runtime.Name, + Options: containerpb.Runtime.Options, + } + } + return containers.Container{ + ID: containerpb.ID, + Labels: containerpb.Labels, + Image: containerpb.Image, + Runtime: runtime, + Spec: containerpb.Spec, + Snapshotter: containerpb.Snapshotter, + SnapshotKey: containerpb.SnapshotKey, + Extensions: containerpb.Extensions, + } +} diff --git a/vendor/github.com/containerd/containerd/services/containers/service.go b/vendor/github.com/containerd/containerd/services/containers/service.go new file mode 100644 index 000000000..162cae630 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/containers/service.go @@ -0,0 +1,193 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containers + +import ( + "github.com/boltdb/bolt" + eventstypes "github.com/containerd/containerd/api/events" + api "github.com/containerd/containerd/api/services/containers/v1" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + ptypes "github.com/gogo/protobuf/types" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "containers", + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + return NewService(m.(*metadata.DB), ic.Events), nil + }, + }) +} + +type service struct { + db *metadata.DB + publisher events.Publisher +} + +// NewService returns the container GRPC server +func NewService(db *metadata.DB, publisher events.Publisher) api.ContainersServer { + return &service{db: db, publisher: publisher} +} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterContainersServer(server, s) + return nil +} + +func (s *service) Get(ctx context.Context, req *api.GetContainerRequest) (*api.GetContainerResponse, error) { + var resp api.GetContainerResponse + + return &resp, errdefs.ToGRPC(s.withStoreView(ctx, func(ctx context.Context, store containers.Store) error { + container, err := store.Get(ctx, req.ID) + if err != nil { + return err + } + containerpb := containerToProto(&container) + resp.Container = containerpb + + return nil + })) +} + +func (s *service) List(ctx context.Context, req *api.ListContainersRequest) (*api.ListContainersResponse, error) { + var resp api.ListContainersResponse + + return &resp, errdefs.ToGRPC(s.withStoreView(ctx, func(ctx context.Context, store containers.Store) error { + containers, err := store.List(ctx, req.Filters...) + if err != nil { + return err + } + + resp.Containers = containersToProto(containers) + return nil + })) +} + +func (s *service) Create(ctx context.Context, req *api.CreateContainerRequest) (*api.CreateContainerResponse, error) { + var resp api.CreateContainerResponse + + if err := s.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { + container := containerFromProto(&req.Container) + + created, err := store.Create(ctx, container) + if err != nil { + return err + } + + resp.Container = containerToProto(&created) + + return nil + }); err != nil { + return &resp, errdefs.ToGRPC(err) + } + if err := s.publisher.Publish(ctx, "/containers/create", &eventstypes.ContainerCreate{ + ID: resp.Container.ID, + Image: resp.Container.Image, + Runtime: &eventstypes.ContainerCreate_Runtime{ + Name: resp.Container.Runtime.Name, + Options: resp.Container.Runtime.Options, + }, + }); err != nil { + return &resp, err + } + + return &resp, nil +} + +func (s *service) Update(ctx context.Context, req *api.UpdateContainerRequest) (*api.UpdateContainerResponse, error) { + if req.Container.ID == "" { + return nil, status.Errorf(codes.InvalidArgument, "Container.ID required") + } + var ( + resp api.UpdateContainerResponse + container = containerFromProto(&req.Container) + ) + + if err := s.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { + var fieldpaths []string + if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { + for _, path := range req.UpdateMask.Paths { + fieldpaths = append(fieldpaths, path) + } + } + + updated, err := store.Update(ctx, container, fieldpaths...) + if err != nil { + return err + } + + resp.Container = containerToProto(&updated) + return nil + }); err != nil { + return &resp, errdefs.ToGRPC(err) + } + + if err := s.publisher.Publish(ctx, "/containers/update", &eventstypes.ContainerUpdate{ + ID: resp.Container.ID, + Image: resp.Container.Image, + Labels: resp.Container.Labels, + SnapshotKey: resp.Container.SnapshotKey, + }); err != nil { + return &resp, err + } + + return &resp, nil +} + +func (s *service) Delete(ctx context.Context, req *api.DeleteContainerRequest) (*ptypes.Empty, error) { + if err := s.withStoreUpdate(ctx, func(ctx context.Context, store containers.Store) error { + return store.Delete(ctx, req.ID) + }); err != nil { + return &ptypes.Empty{}, errdefs.ToGRPC(err) + } + + if err := s.publisher.Publish(ctx, "/containers/delete", &eventstypes.ContainerDelete{ + ID: req.ID, + }); err != nil { + return &ptypes.Empty{}, err + } + + return &ptypes.Empty{}, nil +} + +func (s *service) withStore(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) func(tx *bolt.Tx) error { + return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewContainerStore(tx)) } +} + +func (s *service) withStoreView(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) error { + return s.db.View(s.withStore(ctx, fn)) +} + +func (s *service) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store containers.Store) error) error { + return s.db.Update(s.withStore(ctx, fn)) +} diff --git a/vendor/github.com/containerd/containerd/services/content/service.go b/vendor/github.com/containerd/containerd/services/content/service.go new file mode 100644 index 000000000..d24fb6eba --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/content/service.go @@ -0,0 +1,490 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "io" + "sync" + + eventstypes "github.com/containerd/containerd/api/events" + api "github.com/containerd/containerd/api/services/content/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + ptypes "github.com/gogo/protobuf/types" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type service struct { + store content.Store + publisher events.Publisher +} + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +var _ api.ContentServer = &service{} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "content", + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + + s, err := NewService(m.(*metadata.DB).ContentStore(), ic.Events) + return s, err + }, + }) +} + +// NewService returns the content GRPC server +func NewService(cs content.Store, publisher events.Publisher) (api.ContentServer, error) { + return &service{ + store: cs, + publisher: publisher, + }, nil +} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterContentServer(server, s) + return nil +} + +func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) { + if err := req.Digest.Validate(); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest) + } + + bi, err := s.store.Info(ctx, req.Digest) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &api.InfoResponse{ + Info: infoToGRPC(bi), + }, nil +} + +func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) { + if err := req.Info.Digest.Validate(); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest) + } + + info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &api.UpdateResponse{ + Info: infoToGRPC(info), + }, nil +} + +func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error { + var ( + buffer []api.Info + sendBlock = func(block []api.Info) error { + // send last block + return session.Send(&api.ListContentResponse{ + Info: block, + }) + } + ) + + if err := s.store.Walk(session.Context(), func(info content.Info) error { + buffer = append(buffer, api.Info{ + Digest: info.Digest, + Size_: info.Size, + CreatedAt: info.CreatedAt, + Labels: info.Labels, + }) + + if len(buffer) >= 100 { + if err := sendBlock(buffer); err != nil { + return err + } + + buffer = buffer[:0] + } + + return nil + }, req.Filters...); err != nil { + return err + } + + if len(buffer) > 0 { + // send last block + if err := sendBlock(buffer); err != nil { + return err + } + } + + return nil +} + +func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) { + log.G(ctx).WithField("digest", req.Digest).Debugf("delete content") + if err := req.Digest.Validate(); err != nil { + return nil, status.Errorf(codes.InvalidArgument, err.Error()) + } + + if err := s.store.Delete(ctx, req.Digest); err != nil { + return nil, errdefs.ToGRPC(err) + } + + if err := s.publisher.Publish(ctx, "/content/delete", &eventstypes.ContentDelete{ + Digest: req.Digest, + }); err != nil { + return nil, err + } + + return &ptypes.Empty{}, nil +} + +func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error { + if err := req.Digest.Validate(); err != nil { + return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err) + } + + oi, err := s.store.Info(session.Context(), req.Digest) + if err != nil { + return errdefs.ToGRPC(err) + } + + ra, err := s.store.ReaderAt(session.Context(), req.Digest) + if err != nil { + return errdefs.ToGRPC(err) + } + defer ra.Close() + + var ( + offset = req.Offset + size = req.Size_ + + // TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably + // little inefficient for work over a fast network. We can tune this later. + p = bufPool.Get().(*[]byte) + ) + defer bufPool.Put(p) + + if offset < 0 { + offset = 0 + } + + if size <= 0 { + size = oi.Size - offset + } + + if offset+size > oi.Size { + return status.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size) + } + + _, err = io.CopyBuffer( + &readResponseWriter{session: session}, + io.NewSectionReader(ra, offset, size), *p) + return err +} + +// readResponseWriter is a writer that places the output into ReadContentRequest messages. +// +// This allows io.CopyBuffer to do the heavy lifting of chunking the responses +// into the buffer size. +type readResponseWriter struct { + offset int64 + session api.Content_ReadServer +} + +func (rw *readResponseWriter) Write(p []byte) (n int, err error) { + if err := rw.session.Send(&api.ReadContentResponse{ + Offset: rw.offset, + Data: p, + }); err != nil { + return 0, err + } + + rw.offset += int64(len(p)) + return len(p), nil +} + +func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) { + status, err := s.store.Status(ctx, req.Ref) + if err != nil { + return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref) + } + + var resp api.StatusResponse + resp.Status = &api.Status{ + StartedAt: status.StartedAt, + UpdatedAt: status.UpdatedAt, + Ref: status.Ref, + Offset: status.Offset, + Total: status.Total, + Expected: status.Expected, + } + + return &resp, nil +} + +func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) { + statuses, err := s.store.ListStatuses(ctx, req.Filters...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + var resp api.ListStatusesResponse + for _, status := range statuses { + resp.Statuses = append(resp.Statuses, api.Status{ + StartedAt: status.StartedAt, + UpdatedAt: status.UpdatedAt, + Ref: status.Ref, + Offset: status.Offset, + Total: status.Total, + Expected: status.Expected, + }) + } + + return &resp, nil +} + +func (s *service) Write(session api.Content_WriteServer) (err error) { + var ( + ctx = session.Context() + msg api.WriteContentResponse + req *api.WriteContentRequest + ref string + total int64 + expected digest.Digest + ) + + defer func(msg *api.WriteContentResponse) { + // pump through the last message if no error was encountered + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() != codes.AlreadyExists { + // TODO(stevvooe): Really need a log line here to track which + // errors are actually causing failure on the server side. May want + // to configure the service with an interceptor to make this work + // identically across all GRPC methods. + // + // This is pretty noisy, so we can remove it but leave it for now. + log.G(ctx).WithError(err).Error("(*service).Write failed") + } + + return + } + + err = session.Send(msg) + }(&msg) + + // handle the very first request! + req, err = session.Recv() + if err != nil { + return err + } + + ref = req.Ref + + if ref == "" { + return status.Errorf(codes.InvalidArgument, "first message must have a reference") + } + + fields := logrus.Fields{ + "ref": ref, + } + total = req.Total + expected = req.Expected + if total > 0 { + fields["total"] = total + } + + if expected != "" { + fields["expected"] = expected + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields)) + + log.G(ctx).Debug("(*service).Write started") + // this action locks the writer for the session. + wr, err := s.store.Writer(ctx, ref, total, expected) + if err != nil { + return errdefs.ToGRPC(err) + } + defer wr.Close() + + for { + msg.Action = req.Action + ws, err := wr.Status() + if err != nil { + return errdefs.ToGRPC(err) + } + + msg.Offset = ws.Offset // always set the offset. + + // NOTE(stevvooe): In general, there are two cases underwhich a remote + // writer is used. + // + // For pull, we almost always have this before fetching large content, + // through descriptors. We allow predeclaration of the expected size + // and digest. + // + // For push, it is more complex. If we want to cut through content into + // storage, we may have no expectation until we are done processing the + // content. The case here is the following: + // + // 1. Start writing content. + // 2. Compress inline. + // 3. Validate digest and size (maybe). + // + // Supporting these two paths is quite awkward but it lets both API + // users use the same writer style for each with a minimum of overhead. + if req.Expected != "" { + if expected != "" && expected != req.Expected { + return status.Errorf(codes.InvalidArgument, "inconsistent digest provided: %v != %v", req.Expected, expected) + } + expected = req.Expected + + if _, err := s.store.Info(session.Context(), req.Expected); err == nil { + if err := s.store.Abort(session.Context(), ref); err != nil { + log.G(ctx).WithError(err).Error("failed to abort write") + } + + return status.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected) + } + } + + if req.Total > 0 { + // Update the expected total. Typically, this could be seen at + // negotiation time or on a commit message. + if total > 0 && req.Total != total { + return status.Errorf(codes.InvalidArgument, "inconsistent total provided: %v != %v", req.Total, total) + } + total = req.Total + } + + switch req.Action { + case api.WriteActionStat: + msg.Digest = wr.Digest() + msg.StartedAt = ws.StartedAt + msg.UpdatedAt = ws.UpdatedAt + msg.Total = total + case api.WriteActionWrite, api.WriteActionCommit: + if req.Offset > 0 { + // validate the offset if provided + if req.Offset != ws.Offset { + return status.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset) + } + } + + if req.Offset == 0 && ws.Offset > 0 { + if err := wr.Truncate(req.Offset); err != nil { + return errors.Wrapf(err, "truncate failed") + } + msg.Offset = req.Offset + } + + // issue the write if we actually have data. + if len(req.Data) > 0 { + // While this looks like we could use io.WriterAt here, because we + // maintain the offset as append only, we just issue the write. + n, err := wr.Write(req.Data) + if err != nil { + return err + } + + if n != len(req.Data) { + // TODO(stevvooe): Perhaps, we can recover this by including it + // in the offset on the write return. + return status.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data)) + } + + msg.Offset += int64(n) + } + + if req.Action == api.WriteActionCommit { + var opts []content.Opt + if req.Labels != nil { + opts = append(opts, content.WithLabels(req.Labels)) + } + if err := wr.Commit(ctx, total, expected, opts...); err != nil { + return err + } + } + + msg.Digest = wr.Digest() + } + + if err := session.Send(&msg); err != nil { + return err + } + + req, err = session.Recv() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + } +} + +func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Empty, error) { + if err := s.store.Abort(ctx, req.Ref); err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &ptypes.Empty{}, nil +} + +func infoToGRPC(info content.Info) api.Info { + return api.Info{ + Digest: info.Digest, + Size_: info.Size, + CreatedAt: info.CreatedAt, + UpdatedAt: info.UpdatedAt, + Labels: info.Labels, + } +} + +func infoFromGRPC(info api.Info) content.Info { + return content.Info{ + Digest: info.Digest, + Size: info.Size_, + CreatedAt: info.CreatedAt, + UpdatedAt: info.UpdatedAt, + Labels: info.Labels, + } +} diff --git a/vendor/github.com/containerd/containerd/services/diff/service.go b/vendor/github.com/containerd/containerd/services/diff/service.go new file mode 100644 index 000000000..7053dace1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/diff/service.go @@ -0,0 +1,180 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + diffapi "github.com/containerd/containerd/api/services/diff/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/plugin" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type config struct { + // Order is the order of preference in which to try diff algorithms, the + // first differ which is supported is used. + // Note when multiple differs may be supported, this order will be + // respected for which is choosen. Each differ should return the same + // correct output, allowing any ordering to be used to prefer + // more optimimal implementations. + Order []string `toml:"default"` +} + +type differ interface { + diff.Comparer + diff.Applier +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "diff", + Requires: []plugin.Type{ + plugin.DiffPlugin, + }, + Config: defaultDifferConfig, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + differs, err := ic.GetByType(plugin.DiffPlugin) + if err != nil { + return nil, err + } + + orderedNames := ic.Config.(*config).Order + ordered := make([]differ, len(orderedNames)) + for i, n := range orderedNames { + differp, ok := differs[n] + if !ok { + return nil, errors.Errorf("needed differ not loaded: %s", n) + } + d, err := differp.Instance() + if err != nil { + return nil, errors.Wrapf(err, "could not load required differ due plugin init error: %s", n) + } + + ordered[i], ok = d.(differ) + if !ok { + return nil, errors.Errorf("differ does not implement Comparer and Applier interface: %s", n) + } + } + + return &service{ + differs: ordered, + }, nil + }, + }) +} + +type service struct { + differs []differ +} + +func (s *service) Register(gs *grpc.Server) error { + diffapi.RegisterDiffServer(gs, s) + return nil +} + +func (s *service) Apply(ctx context.Context, er *diffapi.ApplyRequest) (*diffapi.ApplyResponse, error) { + var ( + ocidesc ocispec.Descriptor + err error + desc = toDescriptor(er.Diff) + mounts = toMounts(er.Mounts) + ) + + for _, differ := range s.differs { + ocidesc, err = differ.Apply(ctx, desc, mounts) + if !errdefs.IsNotImplemented(err) { + break + } + } + + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &diffapi.ApplyResponse{ + Applied: fromDescriptor(ocidesc), + }, nil + +} + +func (s *service) Diff(ctx context.Context, dr *diffapi.DiffRequest) (*diffapi.DiffResponse, error) { + var ( + ocidesc ocispec.Descriptor + err error + aMounts = toMounts(dr.Left) + bMounts = toMounts(dr.Right) + ) + + var opts []diff.Opt + if dr.MediaType != "" { + opts = append(opts, diff.WithMediaType(dr.MediaType)) + } + if dr.Ref != "" { + opts = append(opts, diff.WithReference(dr.Ref)) + } + if dr.Labels != nil { + opts = append(opts, diff.WithLabels(dr.Labels)) + } + + for _, d := range s.differs { + ocidesc, err = d.Compare(ctx, aMounts, bMounts, opts...) + if !errdefs.IsNotImplemented(err) { + break + } + } + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &diffapi.DiffResponse{ + Diff: fromDescriptor(ocidesc), + }, nil +} + +func toMounts(apim []*types.Mount) []mount.Mount { + mounts := make([]mount.Mount, len(apim)) + for i, m := range apim { + mounts[i] = mount.Mount{ + Type: m.Type, + Source: m.Source, + Options: m.Options, + } + } + return mounts +} + +func toDescriptor(d *types.Descriptor) ocispec.Descriptor { + return ocispec.Descriptor{ + MediaType: d.MediaType, + Digest: d.Digest, + Size: d.Size_, + } +} + +func fromDescriptor(d ocispec.Descriptor) *types.Descriptor { + return &types.Descriptor{ + MediaType: d.MediaType, + Digest: d.Digest, + Size_: d.Size, + } +} diff --git a/vendor/github.com/containerd/containerd/services/diff/service_unix.go b/vendor/github.com/containerd/containerd/services/diff/service_unix.go new file mode 100644 index 000000000..04a85f7c4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/diff/service_unix.go @@ -0,0 +1,23 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +var defaultDifferConfig = &config{ + Order: []string{"walking"}, +} diff --git a/vendor/github.com/containerd/containerd/services/diff/service_windows.go b/vendor/github.com/containerd/containerd/services/diff/service_windows.go new file mode 100644 index 000000000..91f54cca0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/diff/service_windows.go @@ -0,0 +1,23 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +var defaultDifferConfig = &config{ + Order: []string{"windows"}, +} diff --git a/vendor/github.com/containerd/containerd/services/events/service.go b/vendor/github.com/containerd/containerd/services/events/service.go new file mode 100644 index 000000000..58a0d02f5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/events/service.go @@ -0,0 +1,108 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package events + +import ( + api "github.com/containerd/containerd/api/services/events/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/events/exchange" + "github.com/containerd/containerd/plugin" + ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "events", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return NewService(ic.Events), nil + }, + }) +} + +type service struct { + events *exchange.Exchange +} + +// NewService returns the GRPC events server +func NewService(events *exchange.Exchange) api.EventsServer { + return &service{events: events} +} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterEventsServer(server, s) + return nil +} + +func (s *service) Publish(ctx context.Context, r *api.PublishRequest) (*ptypes.Empty, error) { + if err := s.events.Publish(ctx, r.Topic, r.Event); err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &ptypes.Empty{}, nil +} + +func (s *service) Forward(ctx context.Context, r *api.ForwardRequest) (*ptypes.Empty, error) { + if err := s.events.Forward(ctx, fromProto(r.Envelope)); err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &ptypes.Empty{}, nil +} + +func (s *service) Subscribe(req *api.SubscribeRequest, srv api.Events_SubscribeServer) error { + ctx, cancel := context.WithCancel(srv.Context()) + defer cancel() + + eventq, errq := s.events.Subscribe(ctx, req.Filters...) + for { + select { + case ev := <-eventq: + if err := srv.Send(toProto(ev)); err != nil { + return errors.Wrapf(err, "failed sending event to subscriber") + } + case err := <-errq: + if err != nil { + return errors.Wrapf(err, "subscription error") + } + + return nil + } + } +} + +func toProto(env *events.Envelope) *api.Envelope { + return &api.Envelope{ + Timestamp: env.Timestamp, + Namespace: env.Namespace, + Topic: env.Topic, + Event: env.Event, + } +} + +func fromProto(env *api.Envelope) *events.Envelope { + return &events.Envelope{ + Timestamp: env.Timestamp, + Namespace: env.Namespace, + Topic: env.Topic, + Event: env.Event, + } +} diff --git a/vendor/github.com/containerd/containerd/services/healthcheck/service.go b/vendor/github.com/containerd/containerd/services/healthcheck/service.go new file mode 100644 index 000000000..f924f042c --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/healthcheck/service.go @@ -0,0 +1,50 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package healthcheck + +import ( + "github.com/containerd/containerd/plugin" + + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +type service struct { + serve *health.Server +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "healthcheck", + InitFn: func(*plugin.InitContext) (interface{}, error) { + return newService() + }, + }) +} + +func newService() (*service, error) { + return &service{ + health.NewServer(), + }, nil +} + +func (s *service) Register(server *grpc.Server) error { + grpc_health_v1.RegisterHealthServer(server, s.serve) + return nil +} diff --git a/vendor/github.com/containerd/containerd/services/images/helpers.go b/vendor/github.com/containerd/containerd/services/images/helpers.go new file mode 100644 index 000000000..8ad0d117e --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/images/helpers.go @@ -0,0 +1,70 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + imagesapi "github.com/containerd/containerd/api/services/images/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/images" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func imagesToProto(images []images.Image) []imagesapi.Image { + var imagespb []imagesapi.Image + + for _, image := range images { + imagespb = append(imagespb, imageToProto(&image)) + } + + return imagespb +} + +func imageToProto(image *images.Image) imagesapi.Image { + return imagesapi.Image{ + Name: image.Name, + Labels: image.Labels, + Target: descToProto(&image.Target), + CreatedAt: image.CreatedAt, + UpdatedAt: image.UpdatedAt, + } +} + +func imageFromProto(imagepb *imagesapi.Image) images.Image { + return images.Image{ + Name: imagepb.Name, + Labels: imagepb.Labels, + Target: descFromProto(&imagepb.Target), + CreatedAt: imagepb.CreatedAt, + UpdatedAt: imagepb.UpdatedAt, + } +} + +func descFromProto(desc *types.Descriptor) ocispec.Descriptor { + return ocispec.Descriptor{ + MediaType: desc.MediaType, + Size: desc.Size_, + Digest: desc.Digest, + } +} + +func descToProto(desc *ocispec.Descriptor) types.Descriptor { + return types.Descriptor{ + MediaType: desc.MediaType, + Size_: desc.Size, + Digest: desc.Digest, + } +} diff --git a/vendor/github.com/containerd/containerd/services/images/service.go b/vendor/github.com/containerd/containerd/services/images/service.go new file mode 100644 index 000000000..d17c684ae --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/images/service.go @@ -0,0 +1,190 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + gocontext "context" + + eventstypes "github.com/containerd/containerd/api/events" + imagesapi "github.com/containerd/containerd/api/services/images/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + ptypes "github.com/gogo/protobuf/types" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "images", + Requires: []plugin.Type{ + plugin.MetadataPlugin, + plugin.GCPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + g, err := ic.Get(plugin.GCPlugin) + if err != nil { + return nil, err + } + + return NewService(metadata.NewImageStore(m.(*metadata.DB)), ic.Events, g.(gcScheduler)), nil + }, + }) +} + +type gcScheduler interface { + ScheduleAndWait(gocontext.Context) (gc.Stats, error) +} + +type service struct { + store images.Store + gc gcScheduler + publisher events.Publisher +} + +// NewService returns the GRPC image server +func NewService(is images.Store, publisher events.Publisher, gc gcScheduler) imagesapi.ImagesServer { + return &service{ + store: is, + gc: gc, + publisher: publisher, + } +} + +func (s *service) Register(server *grpc.Server) error { + imagesapi.RegisterImagesServer(server, s) + return nil +} + +func (s *service) Get(ctx context.Context, req *imagesapi.GetImageRequest) (*imagesapi.GetImageResponse, error) { + image, err := s.store.Get(ctx, req.Name) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + imagepb := imageToProto(&image) + return &imagesapi.GetImageResponse{ + Image: &imagepb, + }, nil +} + +func (s *service) List(ctx context.Context, req *imagesapi.ListImagesRequest) (*imagesapi.ListImagesResponse, error) { + images, err := s.store.List(ctx, req.Filters...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &imagesapi.ListImagesResponse{ + Images: imagesToProto(images), + }, nil +} + +func (s *service) Create(ctx context.Context, req *imagesapi.CreateImageRequest) (*imagesapi.CreateImageResponse, error) { + log.G(ctx).WithField("name", req.Image.Name).WithField("target", req.Image.Target.Digest).Debugf("create image") + if req.Image.Name == "" { + return nil, status.Errorf(codes.InvalidArgument, "Image.Name required") + } + + var ( + image = imageFromProto(&req.Image) + resp imagesapi.CreateImageResponse + ) + created, err := s.store.Create(ctx, image) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + resp.Image = imageToProto(&created) + + if err := s.publisher.Publish(ctx, "/images/create", &eventstypes.ImageCreate{ + Name: resp.Image.Name, + Labels: resp.Image.Labels, + }); err != nil { + return nil, err + } + + return &resp, nil + +} + +func (s *service) Update(ctx context.Context, req *imagesapi.UpdateImageRequest) (*imagesapi.UpdateImageResponse, error) { + if req.Image.Name == "" { + return nil, status.Errorf(codes.InvalidArgument, "Image.Name required") + } + + var ( + image = imageFromProto(&req.Image) + resp imagesapi.UpdateImageResponse + fieldpaths []string + ) + + if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { + for _, path := range req.UpdateMask.Paths { + fieldpaths = append(fieldpaths, path) + } + } + + updated, err := s.store.Update(ctx, image, fieldpaths...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + resp.Image = imageToProto(&updated) + + if err := s.publisher.Publish(ctx, "/images/update", &eventstypes.ImageUpdate{ + Name: resp.Image.Name, + Labels: resp.Image.Labels, + }); err != nil { + return nil, err + } + + return &resp, nil +} + +func (s *service) Delete(ctx context.Context, req *imagesapi.DeleteImageRequest) (*ptypes.Empty, error) { + log.G(ctx).WithField("name", req.Name).Debugf("delete image") + + if err := s.store.Delete(ctx, req.Name); err != nil { + return nil, errdefs.ToGRPC(err) + } + + if err := s.publisher.Publish(ctx, "/images/delete", &eventstypes.ImageDelete{ + Name: req.Name, + }); err != nil { + return nil, err + } + + if req.Sync { + if _, err := s.gc.ScheduleAndWait(ctx); err != nil { + return nil, err + } + } + + return &ptypes.Empty{}, nil +} diff --git a/vendor/github.com/containerd/containerd/services/introspection/service.go b/vendor/github.com/containerd/containerd/services/introspection/service.go new file mode 100644 index 000000000..6fb27a785 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/introspection/service.go @@ -0,0 +1,163 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package introspection + +import ( + api "github.com/containerd/containerd/api/services/introspection/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/protobuf/google/rpc" + ptypes "github.com/gogo/protobuf/types" + context "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/status" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "introspection", + Requires: []plugin.Type{"*"}, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + // this service works by using the plugin context up till the point + // this service is initialized. Since we require this service last, + // it should provide the full set of plugins. + pluginsPB := pluginsToPB(ic.GetAll()) + return NewService(pluginsPB), nil + }, + }) +} + +type service struct { + plugins []api.Plugin +} + +// NewService returns the GRPC introspection server +func NewService(plugins []api.Plugin) api.IntrospectionServer { + return &service{ + plugins: plugins, + } +} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterIntrospectionServer(server, s) + return nil +} + +func (s *service) Plugins(ctx context.Context, req *api.PluginsRequest) (*api.PluginsResponse, error) { + filter, err := filters.ParseAll(req.Filters...) + if err != nil { + return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, err.Error()) + } + + var plugins []api.Plugin + for _, p := range s.plugins { + if !filter.Match(adaptPlugin(p)) { + continue + } + + plugins = append(plugins, p) + } + + return &api.PluginsResponse{ + Plugins: plugins, + }, nil +} + +func adaptPlugin(o interface{}) filters.Adaptor { + obj := o.(api.Plugin) + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "type": + return obj.Type, len(obj.Type) > 0 + case "id": + return obj.ID, len(obj.ID) > 0 + case "platforms": + // TODO(stevvooe): Another case here where have multiple values. + // May need to refactor the filter system to allow filtering by + // platform, if this is required. + case "capabilities": + // TODO(stevvooe): Need a better way to match against + // collections. We can only return "the value" but really it + // would be best if we could return a set of values for the + // path, any of which could match. + } + + return "", false + }) +} + +func pluginsToPB(plugins []*plugin.Plugin) []api.Plugin { + var pluginsPB []api.Plugin + for _, p := range plugins { + var platforms []types.Platform + for _, p := range p.Meta.Platforms { + platforms = append(platforms, types.Platform{ + OS: p.OS, + Architecture: p.Architecture, + Variant: p.Variant, + }) + } + + var requires []string + for _, r := range p.Registration.Requires { + requires = append(requires, r.String()) + } + + var initErr *rpc.Status + if err := p.Err(); err != nil { + st, ok := status.FromError(errdefs.ToGRPC(err)) + if ok { + var details []*ptypes.Any + for _, d := range st.Proto().Details { + details = append(details, &ptypes.Any{ + TypeUrl: d.TypeUrl, + Value: d.Value, + }) + } + initErr = &rpc.Status{ + Code: int32(st.Code()), + Message: st.Message(), + Details: details, + } + } else { + initErr = &rpc.Status{ + Code: int32(rpc.Code_UNKNOWN), + Message: err.Error(), + } + } + } + + pluginsPB = append(pluginsPB, api.Plugin{ + Type: p.Registration.Type.String(), + ID: p.Registration.ID, + Requires: requires, + Platforms: platforms, + Capabilities: p.Meta.Capabilities, + Exports: p.Meta.Exports, + InitErr: initErr, + }) + } + + return pluginsPB +} diff --git a/vendor/github.com/containerd/containerd/services/leases/service.go b/vendor/github.com/containerd/containerd/services/leases/service.go new file mode 100644 index 000000000..100052f3a --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/leases/service.go @@ -0,0 +1,131 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package leases + +import ( + "crypto/rand" + "encoding/base64" + "fmt" + "time" + + "google.golang.org/grpc" + + "github.com/boltdb/bolt" + api "github.com/containerd/containerd/api/services/leases/v1" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + ptypes "github.com/gogo/protobuf/types" + "golang.org/x/net/context" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "leases", + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + return NewService(m.(*metadata.DB)), nil + }, + }) +} + +type service struct { + db *metadata.DB +} + +// NewService returns the GRPC metadata server +func NewService(db *metadata.DB) api.LeasesServer { + return &service{ + db: db, + } +} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterLeasesServer(server, s) + return nil +} + +func (s *service) Create(ctx context.Context, r *api.CreateRequest) (*api.CreateResponse, error) { + lid := r.ID + if lid == "" { + lid = generateLeaseID() + } + var trans metadata.Lease + if err := s.db.Update(func(tx *bolt.Tx) error { + var err error + trans, err = metadata.NewLeaseManager(tx).Create(ctx, lid, r.Labels) + return err + }); err != nil { + return nil, err + } + return &api.CreateResponse{ + Lease: txToGRPC(trans), + }, nil +} + +func (s *service) Delete(ctx context.Context, r *api.DeleteRequest) (*ptypes.Empty, error) { + if err := s.db.Update(func(tx *bolt.Tx) error { + return metadata.NewLeaseManager(tx).Delete(ctx, r.ID) + }); err != nil { + return nil, err + } + return &ptypes.Empty{}, nil +} + +func (s *service) List(ctx context.Context, r *api.ListRequest) (*api.ListResponse, error) { + var leases []metadata.Lease + if err := s.db.View(func(tx *bolt.Tx) error { + var err error + leases, err = metadata.NewLeaseManager(tx).List(ctx, false, r.Filters...) + return err + }); err != nil { + return nil, err + } + + apileases := make([]*api.Lease, len(leases)) + for i := range leases { + apileases[i] = txToGRPC(leases[i]) + } + + return &api.ListResponse{ + Leases: apileases, + }, nil +} + +func txToGRPC(tx metadata.Lease) *api.Lease { + return &api.Lease{ + ID: tx.ID, + Labels: tx.Labels, + CreatedAt: tx.CreatedAt, + // TODO: Snapshots + // TODO: Content + } +} + +func generateLeaseID() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) +} diff --git a/vendor/github.com/containerd/containerd/services/namespaces/service.go b/vendor/github.com/containerd/containerd/services/namespaces/service.go new file mode 100644 index 000000000..2c5694012 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/namespaces/service.go @@ -0,0 +1,229 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package namespaces + +import ( + "strings" + + "github.com/boltdb/bolt" + eventstypes "github.com/containerd/containerd/api/events" + api "github.com/containerd/containerd/api/services/namespaces/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/plugin" + ptypes "github.com/gogo/protobuf/types" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "namespaces", + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + return NewService(m.(*metadata.DB), ic.Events), nil + }, + }) +} + +type service struct { + db *metadata.DB + publisher events.Publisher +} + +var _ api.NamespacesServer = &service{} + +// NewService returns the GRPC namespaces server +func NewService(db *metadata.DB, publisher events.Publisher) api.NamespacesServer { + return &service{ + db: db, + publisher: publisher, + } +} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterNamespacesServer(server, s) + return nil +} + +func (s *service) Get(ctx context.Context, req *api.GetNamespaceRequest) (*api.GetNamespaceResponse, error) { + var resp api.GetNamespaceResponse + + return &resp, s.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error { + labels, err := store.Labels(ctx, req.Name) + if err != nil { + return errdefs.ToGRPC(err) + } + + resp.Namespace = api.Namespace{ + Name: req.Name, + Labels: labels, + } + + return nil + }) +} + +func (s *service) List(ctx context.Context, req *api.ListNamespacesRequest) (*api.ListNamespacesResponse, error) { + var resp api.ListNamespacesResponse + + return &resp, s.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error { + namespaces, err := store.List(ctx) + if err != nil { + return err + } + + for _, namespace := range namespaces { + labels, err := store.Labels(ctx, namespace) + if err != nil { + // In general, this should be unlikely, since we are holding a + // transaction to service this request. + return errdefs.ToGRPC(err) + } + + resp.Namespaces = append(resp.Namespaces, api.Namespace{ + Name: namespace, + Labels: labels, + }) + } + + return nil + }) +} + +func (s *service) Create(ctx context.Context, req *api.CreateNamespaceRequest) (*api.CreateNamespaceResponse, error) { + var resp api.CreateNamespaceResponse + + if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { + if err := store.Create(ctx, req.Namespace.Name, req.Namespace.Labels); err != nil { + return errdefs.ToGRPC(err) + } + + for k, v := range req.Namespace.Labels { + if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil { + return err + } + } + + resp.Namespace = req.Namespace + return nil + }); err != nil { + return &resp, err + } + + if err := s.publisher.Publish(ctx, "/namespaces/create", &eventstypes.NamespaceCreate{ + Name: req.Namespace.Name, + Labels: req.Namespace.Labels, + }); err != nil { + return &resp, err + } + + return &resp, nil + +} + +func (s *service) Update(ctx context.Context, req *api.UpdateNamespaceRequest) (*api.UpdateNamespaceResponse, error) { + var resp api.UpdateNamespaceResponse + if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { + if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 { + for _, path := range req.UpdateMask.Paths { + switch { + case strings.HasPrefix(path, "labels."): + key := strings.TrimPrefix(path, "labels.") + if err := store.SetLabel(ctx, req.Namespace.Name, key, req.Namespace.Labels[key]); err != nil { + return err + } + default: + return status.Errorf(codes.InvalidArgument, "cannot update %q field", path) + } + } + } else { + // clear out the existing labels and then set them to the incoming request. + // get current set of labels + labels, err := store.Labels(ctx, req.Namespace.Name) + if err != nil { + return errdefs.ToGRPC(err) + } + + for k := range labels { + if err := store.SetLabel(ctx, req.Namespace.Name, k, ""); err != nil { + return err + } + } + + for k, v := range req.Namespace.Labels { + if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil { + return err + } + + } + } + + return nil + }); err != nil { + return &resp, err + } + + if err := s.publisher.Publish(ctx, "/namespaces/update", &eventstypes.NamespaceUpdate{ + Name: req.Namespace.Name, + Labels: req.Namespace.Labels, + }); err != nil { + return &resp, err + } + + return &resp, nil +} + +func (s *service) Delete(ctx context.Context, req *api.DeleteNamespaceRequest) (*ptypes.Empty, error) { + if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error { + return errdefs.ToGRPC(store.Delete(ctx, req.Name)) + }); err != nil { + return &ptypes.Empty{}, err + } + // set the namespace in the context before publishing the event + ctx = namespaces.WithNamespace(ctx, req.Name) + if err := s.publisher.Publish(ctx, "/namespaces/delete", &eventstypes.NamespaceDelete{ + Name: req.Name, + }); err != nil { + return &ptypes.Empty{}, err + } + + return &ptypes.Empty{}, nil +} + +func (s *service) withStore(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) func(tx *bolt.Tx) error { + return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewNamespaceStore(tx)) } +} + +func (s *service) withStoreView(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error { + return s.db.View(s.withStore(ctx, fn)) +} + +func (s *service) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error { + return s.db.Update(s.withStore(ctx, fn)) +} diff --git a/vendor/github.com/containerd/containerd/services/snapshots/service.go b/vendor/github.com/containerd/containerd/services/snapshots/service.go new file mode 100644 index 000000000..2d997bdc9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/snapshots/service.go @@ -0,0 +1,332 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package snapshots + +import ( + gocontext "context" + + eventstypes "github.com/containerd/containerd/api/events" + snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/snapshots" + ptypes "github.com/gogo/protobuf/types" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "snapshots", + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: newService, + }) +} + +var empty = &ptypes.Empty{} + +type service struct { + db *metadata.DB + publisher events.Publisher +} + +func newService(ic *plugin.InitContext) (interface{}, error) { + md, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + + return &service{ + db: md.(*metadata.DB), + publisher: ic.Events, + }, nil +} + +func (s *service) getSnapshotter(name string) (snapshots.Snapshotter, error) { + if name == "" { + return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter argument missing") + } + + sn := s.db.Snapshotter(name) + if sn == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter not loaded: %s", name) + } + return sn, nil +} + +func (s *service) Register(gs *grpc.Server) error { + snapshotsapi.RegisterSnapshotsServer(gs, s) + return nil +} + +func (s *service) Prepare(ctx context.Context, pr *snapshotsapi.PrepareSnapshotRequest) (*snapshotsapi.PrepareSnapshotResponse, error) { + log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("prepare snapshot") + sn, err := s.getSnapshotter(pr.Snapshotter) + if err != nil { + return nil, err + } + + var opts []snapshots.Opt + if pr.Labels != nil { + opts = append(opts, snapshots.WithLabels(pr.Labels)) + } + mounts, err := sn.Prepare(ctx, pr.Key, pr.Parent, opts...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + if err := s.publisher.Publish(ctx, "/snapshot/prepare", &eventstypes.SnapshotPrepare{ + Key: pr.Key, + Parent: pr.Parent, + }); err != nil { + return nil, err + } + return &snapshotsapi.PrepareSnapshotResponse{ + Mounts: fromMounts(mounts), + }, nil +} + +func (s *service) View(ctx context.Context, pr *snapshotsapi.ViewSnapshotRequest) (*snapshotsapi.ViewSnapshotResponse, error) { + log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("prepare view snapshot") + sn, err := s.getSnapshotter(pr.Snapshotter) + if err != nil { + return nil, err + } + var opts []snapshots.Opt + if pr.Labels != nil { + opts = append(opts, snapshots.WithLabels(pr.Labels)) + } + mounts, err := sn.View(ctx, pr.Key, pr.Parent, opts...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &snapshotsapi.ViewSnapshotResponse{ + Mounts: fromMounts(mounts), + }, nil +} + +func (s *service) Mounts(ctx context.Context, mr *snapshotsapi.MountsRequest) (*snapshotsapi.MountsResponse, error) { + log.G(ctx).WithField("key", mr.Key).Debugf("get snapshot mounts") + sn, err := s.getSnapshotter(mr.Snapshotter) + if err != nil { + return nil, err + } + + mounts, err := sn.Mounts(ctx, mr.Key) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &snapshotsapi.MountsResponse{ + Mounts: fromMounts(mounts), + }, nil +} + +func (s *service) Commit(ctx context.Context, cr *snapshotsapi.CommitSnapshotRequest) (*ptypes.Empty, error) { + log.G(ctx).WithField("key", cr.Key).WithField("name", cr.Name).Debugf("commit snapshot") + sn, err := s.getSnapshotter(cr.Snapshotter) + if err != nil { + return nil, err + } + + var opts []snapshots.Opt + if cr.Labels != nil { + opts = append(opts, snapshots.WithLabels(cr.Labels)) + } + if err := sn.Commit(ctx, cr.Name, cr.Key, opts...); err != nil { + return nil, errdefs.ToGRPC(err) + } + + if err := s.publisher.Publish(ctx, "/snapshot/commit", &eventstypes.SnapshotCommit{ + Key: cr.Key, + Name: cr.Name, + }); err != nil { + return nil, err + } + return empty, nil +} + +func (s *service) Remove(ctx context.Context, rr *snapshotsapi.RemoveSnapshotRequest) (*ptypes.Empty, error) { + log.G(ctx).WithField("key", rr.Key).Debugf("remove snapshot") + sn, err := s.getSnapshotter(rr.Snapshotter) + if err != nil { + return nil, err + } + + if err := sn.Remove(ctx, rr.Key); err != nil { + return nil, errdefs.ToGRPC(err) + } + + if err := s.publisher.Publish(ctx, "/snapshot/remove", &eventstypes.SnapshotRemove{ + Key: rr.Key, + }); err != nil { + return nil, err + } + return empty, nil +} + +func (s *service) Stat(ctx context.Context, sr *snapshotsapi.StatSnapshotRequest) (*snapshotsapi.StatSnapshotResponse, error) { + log.G(ctx).WithField("key", sr.Key).Debugf("stat snapshot") + sn, err := s.getSnapshotter(sr.Snapshotter) + if err != nil { + return nil, err + } + + info, err := sn.Stat(ctx, sr.Key) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &snapshotsapi.StatSnapshotResponse{Info: fromInfo(info)}, nil +} + +func (s *service) Update(ctx context.Context, sr *snapshotsapi.UpdateSnapshotRequest) (*snapshotsapi.UpdateSnapshotResponse, error) { + log.G(ctx).WithField("key", sr.Info.Name).Debugf("update snapshot") + sn, err := s.getSnapshotter(sr.Snapshotter) + if err != nil { + return nil, err + } + + info, err := sn.Update(ctx, toInfo(sr.Info), sr.UpdateMask.GetPaths()...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &snapshotsapi.UpdateSnapshotResponse{Info: fromInfo(info)}, nil +} + +func (s *service) List(sr *snapshotsapi.ListSnapshotsRequest, ss snapshotsapi.Snapshots_ListServer) error { + sn, err := s.getSnapshotter(sr.Snapshotter) + if err != nil { + return err + } + + var ( + buffer []snapshotsapi.Info + sendBlock = func(block []snapshotsapi.Info) error { + return ss.Send(&snapshotsapi.ListSnapshotsResponse{ + Info: block, + }) + } + ) + err = sn.Walk(ss.Context(), func(ctx gocontext.Context, info snapshots.Info) error { + buffer = append(buffer, fromInfo(info)) + + if len(buffer) >= 100 { + if err := sendBlock(buffer); err != nil { + return err + } + + buffer = buffer[:0] + } + + return nil + }) + if err != nil { + return err + } + if len(buffer) > 0 { + // Send remaining infos + if err := sendBlock(buffer); err != nil { + return err + } + } + + return nil +} + +func (s *service) Usage(ctx context.Context, ur *snapshotsapi.UsageRequest) (*snapshotsapi.UsageResponse, error) { + sn, err := s.getSnapshotter(ur.Snapshotter) + if err != nil { + return nil, err + } + + usage, err := sn.Usage(ctx, ur.Key) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return fromUsage(usage), nil +} + +func fromKind(kind snapshots.Kind) snapshotsapi.Kind { + if kind == snapshots.KindActive { + return snapshotsapi.KindActive + } + if kind == snapshots.KindView { + return snapshotsapi.KindView + } + return snapshotsapi.KindCommitted +} + +func fromInfo(info snapshots.Info) snapshotsapi.Info { + return snapshotsapi.Info{ + Name: info.Name, + Parent: info.Parent, + Kind: fromKind(info.Kind), + CreatedAt: info.Created, + UpdatedAt: info.Updated, + Labels: info.Labels, + } +} + +func fromUsage(usage snapshots.Usage) *snapshotsapi.UsageResponse { + return &snapshotsapi.UsageResponse{ + Inodes: usage.Inodes, + Size_: usage.Size, + } +} + +func fromMounts(mounts []mount.Mount) []*types.Mount { + out := make([]*types.Mount, len(mounts)) + for i, m := range mounts { + out[i] = &types.Mount{ + Type: m.Type, + Source: m.Source, + Options: m.Options, + } + } + return out +} + +func toInfo(info snapshotsapi.Info) snapshots.Info { + return snapshots.Info{ + Name: info.Name, + Parent: info.Parent, + Kind: toKind(info.Kind), + Created: info.CreatedAt, + Updated: info.UpdatedAt, + Labels: info.Labels, + } +} + +func toKind(kind snapshotsapi.Kind) snapshots.Kind { + if kind == snapshotsapi.KindActive { + return snapshots.KindActive + } + if kind == snapshotsapi.KindView { + return snapshots.KindView + } + return snapshots.KindCommitted +} diff --git a/vendor/github.com/containerd/containerd/services/tasks/service.go b/vendor/github.com/containerd/containerd/services/tasks/service.go new file mode 100644 index 000000000..b1329335b --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/tasks/service.go @@ -0,0 +1,638 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tasks + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/boltdb/bolt" + api "github.com/containerd/containerd/api/services/tasks/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/api/types/task" + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/runtime" + "github.com/containerd/typeurl" + ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + _ = (api.TasksServer)(&service{}) + empty = &ptypes.Empty{} +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "tasks", + Requires: []plugin.Type{ + plugin.RuntimePlugin, + plugin.MetadataPlugin, + }, + InitFn: initFunc, + }) +} + +func initFunc(ic *plugin.InitContext) (interface{}, error) { + rt, err := ic.GetByType(plugin.RuntimePlugin) + if err != nil { + return nil, err + } + + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + cs := m.(*metadata.DB).ContentStore() + runtimes := make(map[string]runtime.Runtime) + for _, rr := range rt { + ri, err := rr.Instance() + if err != nil { + log.G(ic.Context).WithError(err).Warn("could not load runtime instance due to initialization error") + continue + } + r := ri.(runtime.Runtime) + runtimes[r.ID()] = r + } + + if len(runtimes) == 0 { + return nil, errors.New("no runtimes available to create task service") + } + return &service{ + runtimes: runtimes, + db: m.(*metadata.DB), + store: cs, + publisher: ic.Events, + }, nil +} + +type service struct { + runtimes map[string]runtime.Runtime + db *metadata.DB + store content.Store + publisher events.Publisher +} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterTasksServer(server, s) + return nil +} + +func (s *service) Create(ctx context.Context, r *api.CreateTaskRequest) (*api.CreateTaskResponse, error) { + var ( + checkpointPath string + err error + ) + if r.Checkpoint != nil { + checkpointPath, err = ioutil.TempDir("", "ctrd-checkpoint") + if err != nil { + return nil, err + } + if r.Checkpoint.MediaType != images.MediaTypeContainerd1Checkpoint { + return nil, fmt.Errorf("unsupported checkpoint type %q", r.Checkpoint.MediaType) + } + reader, err := s.store.ReaderAt(ctx, r.Checkpoint.Digest) + if err != nil { + return nil, err + } + _, err = archive.Apply(ctx, checkpointPath, content.NewReader(reader)) + reader.Close() + if err != nil { + return nil, err + } + } + + container, err := s.getContainer(ctx, r.ContainerID) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + opts := runtime.CreateOpts{ + Spec: container.Spec, + IO: runtime.IO{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }, + Checkpoint: checkpointPath, + Options: r.Options, + } + for _, m := range r.Rootfs { + opts.Rootfs = append(opts.Rootfs, mount.Mount{ + Type: m.Type, + Source: m.Source, + Options: m.Options, + }) + } + runtime, err := s.getRuntime(container.Runtime.Name) + if err != nil { + return nil, err + } + c, err := runtime.Create(ctx, r.ContainerID, opts) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + state, err := c.State(ctx) + if err != nil { + log.G(ctx).Error(err) + } + + return &api.CreateTaskResponse{ + ContainerID: r.ContainerID, + Pid: state.Pid, + }, nil +} + +func (s *service) Start(ctx context.Context, r *api.StartRequest) (*api.StartResponse, error) { + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(t) + if r.ExecID != "" { + if p, err = t.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + if err := p.Start(ctx); err != nil { + return nil, errdefs.ToGRPC(err) + } + state, err := p.State(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.StartResponse{ + Pid: state.Pid, + }, nil +} + +func (s *service) Delete(ctx context.Context, r *api.DeleteTaskRequest) (*api.DeleteResponse, error) { + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + runtime, err := s.getRuntime(t.Info().Runtime) + if err != nil { + return nil, err + } + exit, err := runtime.Delete(ctx, t) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.DeleteResponse{ + ExitStatus: exit.Status, + ExitedAt: exit.Timestamp, + Pid: exit.Pid, + }, nil +} + +func (s *service) DeleteProcess(ctx context.Context, r *api.DeleteProcessRequest) (*api.DeleteResponse, error) { + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + exit, err := t.DeleteProcess(ctx, r.ExecID) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.DeleteResponse{ + ID: r.ExecID, + ExitStatus: exit.Status, + ExitedAt: exit.Timestamp, + Pid: exit.Pid, + }, nil +} + +func processFromContainerd(ctx context.Context, p runtime.Process) (*task.Process, error) { + state, err := p.State(ctx) + if err != nil { + return nil, err + } + var status task.Status + switch state.Status { + case runtime.CreatedStatus: + status = task.StatusCreated + case runtime.RunningStatus: + status = task.StatusRunning + case runtime.StoppedStatus: + status = task.StatusStopped + case runtime.PausedStatus: + status = task.StatusPaused + case runtime.PausingStatus: + status = task.StatusPausing + default: + log.G(ctx).WithField("status", state.Status).Warn("unknown status") + } + return &task.Process{ + ID: p.ID(), + Pid: state.Pid, + Status: status, + Stdin: state.Stdin, + Stdout: state.Stdout, + Stderr: state.Stderr, + Terminal: state.Terminal, + ExitStatus: state.ExitStatus, + ExitedAt: state.ExitedAt, + }, nil +} + +func (s *service) Get(ctx context.Context, r *api.GetRequest) (*api.GetResponse, error) { + task, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(task) + if r.ExecID != "" { + if p, err = task.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + t, err := processFromContainerd(ctx, p) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.GetResponse{ + Process: t, + }, nil +} + +func (s *service) List(ctx context.Context, r *api.ListTasksRequest) (*api.ListTasksResponse, error) { + resp := &api.ListTasksResponse{} + for _, r := range s.runtimes { + tasks, err := r.Tasks(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + addTasks(ctx, resp, tasks) + } + return resp, nil +} + +func addTasks(ctx context.Context, r *api.ListTasksResponse, tasks []runtime.Task) { + for _, t := range tasks { + tt, err := processFromContainerd(ctx, t) + if err != nil { + if !errdefs.IsNotFound(err) { // handle race with deletion + log.G(ctx).WithError(err).WithField("id", t.ID()).Error("converting task to protobuf") + } + continue + } + r.Tasks = append(r.Tasks, tt) + } +} + +func (s *service) Pause(ctx context.Context, r *api.PauseTaskRequest) (*ptypes.Empty, error) { + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + err = t.Pause(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (s *service) Resume(ctx context.Context, r *api.ResumeTaskRequest) (*ptypes.Empty, error) { + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + err = t.Resume(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (s *service) Kill(ctx context.Context, r *api.KillRequest) (*ptypes.Empty, error) { + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(t) + if r.ExecID != "" { + if p, err = t.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + if err := p.Kill(ctx, r.Signal, r.All); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (s *service) ListPids(ctx context.Context, r *api.ListPidsRequest) (*api.ListPidsResponse, error) { + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + processList, err := t.Pids(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + var processes []*task.ProcessInfo + for _, p := range processList { + pInfo := task.ProcessInfo{ + Pid: p.Pid, + } + if p.Info != nil { + a, err := typeurl.MarshalAny(p.Info) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal process %d info", p.Pid) + } + pInfo.Info = a + } + processes = append(processes, &pInfo) + } + return &api.ListPidsResponse{ + Processes: processes, + }, nil +} + +func (s *service) Exec(ctx context.Context, r *api.ExecProcessRequest) (*ptypes.Empty, error) { + if r.ExecID == "" { + return nil, status.Errorf(codes.InvalidArgument, "exec id cannot be empty") + } + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + if _, err := t.Exec(ctx, r.ExecID, runtime.ExecOpts{ + Spec: r.Spec, + IO: runtime.IO{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }, + }); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (s *service) ResizePty(ctx context.Context, r *api.ResizePtyRequest) (*ptypes.Empty, error) { + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(t) + if r.ExecID != "" { + if p, err = t.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + if err := p.ResizePty(ctx, runtime.ConsoleSize{ + Width: r.Width, + Height: r.Height, + }); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (s *service) CloseIO(ctx context.Context, r *api.CloseIORequest) (*ptypes.Empty, error) { + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(t) + if r.ExecID != "" { + if p, err = t.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + if r.Stdin { + if err := p.CloseIO(ctx); err != nil { + return nil, err + } + } + return empty, nil +} + +func (s *service) Checkpoint(ctx context.Context, r *api.CheckpointTaskRequest) (*api.CheckpointTaskResponse, error) { + container, err := s.getContainer(ctx, r.ContainerID) + if err != nil { + return nil, err + } + t, err := s.getTaskFromContainer(ctx, container) + if err != nil { + return nil, err + } + image, err := ioutil.TempDir("", "ctd-checkpoint") + if err != nil { + return nil, errdefs.ToGRPC(err) + } + defer os.RemoveAll(image) + if err := t.Checkpoint(ctx, image, r.Options); err != nil { + return nil, errdefs.ToGRPC(err) + } + // write checkpoint to the content store + tar := archive.Diff(ctx, "", image) + cp, err := s.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, image, tar) + // close tar first after write + if err := tar.Close(); err != nil { + return nil, err + } + if err != nil { + return nil, err + } + // write the config to the content store + data, err := container.Spec.Marshal() + if err != nil { + return nil, err + } + spec := bytes.NewReader(data) + specD, err := s.writeContent(ctx, images.MediaTypeContainerd1CheckpointConfig, filepath.Join(image, "spec"), spec) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.CheckpointTaskResponse{ + Descriptors: []*types.Descriptor{ + cp, + specD, + }, + }, nil +} + +func (s *service) Update(ctx context.Context, r *api.UpdateTaskRequest) (*ptypes.Empty, error) { + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + if err := t.Update(ctx, r.Resources); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +func (s *service) Metrics(ctx context.Context, r *api.MetricsRequest) (*api.MetricsResponse, error) { + filter, err := filters.ParseAll(r.Filters...) + if err != nil { + return nil, err + } + var resp api.MetricsResponse + for _, r := range s.runtimes { + tasks, err := r.Tasks(ctx) + if err != nil { + return nil, err + } + getTasksMetrics(ctx, filter, tasks, &resp) + } + return &resp, nil +} + +func (s *service) Wait(ctx context.Context, r *api.WaitRequest) (*api.WaitResponse, error) { + t, err := s.getTask(ctx, r.ContainerID) + if err != nil { + return nil, err + } + p := runtime.Process(t) + if r.ExecID != "" { + if p, err = t.Process(ctx, r.ExecID); err != nil { + return nil, errdefs.ToGRPC(err) + } + } + exit, err := p.Wait(ctx) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + return &api.WaitResponse{ + ExitStatus: exit.Status, + ExitedAt: exit.Timestamp, + }, nil +} + +func getTasksMetrics(ctx context.Context, filter filters.Filter, tasks []runtime.Task, r *api.MetricsResponse) { + for _, tk := range tasks { + if !filter.Match(filters.AdapterFunc(func(fieldpath []string) (string, bool) { + t := tk + switch fieldpath[0] { + case "id": + return t.ID(), true + case "namespace": + return t.Info().Namespace, true + case "runtime": + return t.Info().Runtime, true + } + return "", false + })) { + continue + } + + collected := time.Now() + metrics, err := tk.Metrics(ctx) + if err != nil { + if !errdefs.IsNotFound(err) { + log.G(ctx).WithError(err).Errorf("collecting metrics for %s", tk.ID()) + } + continue + } + data, err := typeurl.MarshalAny(metrics) + if err != nil { + log.G(ctx).WithError(err).Errorf("marshal metrics for %s", tk.ID()) + continue + } + r.Metrics = append(r.Metrics, &types.Metric{ + ID: tk.ID(), + Timestamp: collected, + Data: data, + }) + } +} + +func (s *service) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) { + writer, err := s.store.Writer(ctx, ref, 0, "") + if err != nil { + return nil, err + } + defer writer.Close() + size, err := io.Copy(writer, r) + if err != nil { + return nil, err + } + if err := writer.Commit(ctx, 0, ""); err != nil { + return nil, err + } + return &types.Descriptor{ + MediaType: mediaType, + Digest: writer.Digest(), + Size_: size, + }, nil +} + +func (s *service) getContainer(ctx context.Context, id string) (*containers.Container, error) { + var container containers.Container + if err := s.db.View(func(tx *bolt.Tx) error { + store := metadata.NewContainerStore(tx) + var err error + container, err = store.Get(ctx, id) + return err + }); err != nil { + return nil, errdefs.ToGRPC(err) + } + return &container, nil +} + +func (s *service) getTask(ctx context.Context, id string) (runtime.Task, error) { + container, err := s.getContainer(ctx, id) + if err != nil { + return nil, err + } + return s.getTaskFromContainer(ctx, container) +} + +func (s *service) getTaskFromContainer(ctx context.Context, container *containers.Container) (runtime.Task, error) { + runtime, err := s.getRuntime(container.Runtime.Name) + if err != nil { + return nil, errdefs.ToGRPCf(err, "runtime for task %s", container.Runtime.Name) + } + t, err := runtime.Get(ctx, container.ID) + if err != nil { + return nil, status.Errorf(codes.NotFound, "task %v not found", container.ID) + } + return t, nil +} + +func (s *service) getRuntime(name string) (runtime.Runtime, error) { + runtime, ok := s.runtimes[name] + if !ok { + return nil, status.Errorf(codes.NotFound, "unknown runtime %q", name) + } + return runtime, nil +} diff --git a/vendor/github.com/containerd/containerd/services/version/service.go b/vendor/github.com/containerd/containerd/services/version/service.go new file mode 100644 index 000000000..e2da522f5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/version/service.go @@ -0,0 +1,55 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package version + +import ( + api "github.com/containerd/containerd/api/services/version/v1" + "github.com/containerd/containerd/plugin" + ctrdversion "github.com/containerd/containerd/version" + ptypes "github.com/gogo/protobuf/types" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +var _ api.VersionServer = &service{} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "version", + InitFn: initFunc, + }) +} + +func initFunc(ic *plugin.InitContext) (interface{}, error) { + return &service{}, nil +} + +type service struct { +} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterVersionServer(server, s) + return nil +} + +func (s *service) Version(ctx context.Context, _ *ptypes.Empty) (*api.VersionResponse, error) { + return &api.VersionResponse{ + Version: ctrdversion.Version, + Revision: ctrdversion.Revision, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/snapshot.go b/vendor/github.com/containerd/containerd/snapshot.go index 85bdba1b6..155ec718f 100644 --- a/vendor/github.com/containerd/containerd/snapshot.go +++ b/vendor/github.com/containerd/containerd/snapshot.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go b/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go new file mode 100644 index 000000000..3703f8ef9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go @@ -0,0 +1,413 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package overlay + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/snapshots/storage" + "github.com/containerd/continuity/fs" + "github.com/pkg/errors" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.SnapshotPlugin, + ID: "overlayfs", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) + ic.Meta.Exports["root"] = ic.Root + return NewSnapshotter(ic.Root) + }, + }) +} + +type snapshotter struct { + root string + ms *storage.MetaStore +} + +// NewSnapshotter returns a Snapshotter which uses overlayfs. The overlayfs +// diffs are stored under the provided root. A metadata file is stored under +// the root. +func NewSnapshotter(root string) (snapshots.Snapshotter, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + supportsDType, err := fs.SupportsDType(root) + if err != nil { + return nil, err + } + if !supportsDType { + return nil, fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root) + } + ms, err := storage.NewMetaStore(filepath.Join(root, "metadata.db")) + if err != nil { + return nil, err + } + + if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + return &snapshotter{ + root: root, + ms: ms, + }, nil +} + +// Stat returns the info for an active or committed snapshot by name or +// key. +// +// Should be used for parent resolution, existence checks and to discern +// the kind of snapshot. +func (o *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return snapshots.Info{}, err + } + defer t.Rollback() + _, info, _, err := storage.GetInfo(ctx, key) + if err != nil { + return snapshots.Info{}, err + } + + return info, nil +} + +func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return snapshots.Info{}, err + } + + info, err = storage.UpdateInfo(ctx, info, fieldpaths...) + if err != nil { + t.Rollback() + return snapshots.Info{}, err + } + + if err := t.Commit(); err != nil { + return snapshots.Info{}, err + } + + return info, nil +} + +// Usage returns the resources taken by the snapshot identified by key. +// +// For active snapshots, this will scan the usage of the overlay "diff" (aka +// "upper") directory and may take some time. +// +// For committed snapshots, the value is returned from the metadata database. +func (o *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return snapshots.Usage{}, err + } + id, info, usage, err := storage.GetInfo(ctx, key) + t.Rollback() // transaction no longer needed at this point. + + if err != nil { + return snapshots.Usage{}, err + } + + upperPath := o.upperPath(id) + + if info.Kind == snapshots.KindActive { + du, err := fs.DiskUsage(upperPath) + if err != nil { + // TODO(stevvooe): Consider not reporting an error in this case. + return snapshots.Usage{}, err + } + + usage = snapshots.Usage(du) + } + + return usage, nil +} + +func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts) +} + +func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return o.createSnapshot(ctx, snapshots.KindView, key, parent, opts) +} + +// Mounts returns the mounts for the transaction identified by key. Can be +// called on an read-write or readonly transaction. +// +// This can be used to recover mounts after calling View or Prepare. +func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return nil, err + } + s, err := storage.GetSnapshot(ctx, key) + t.Rollback() + if err != nil { + return nil, errors.Wrap(err, "failed to get active mount") + } + return o.mounts(s), nil +} + +func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return err + } + + defer func() { + if err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + } + }() + + // grab the existing id + id, _, _, err := storage.GetInfo(ctx, key) + if err != nil { + return err + } + + usage, err := fs.DiskUsage(o.upperPath(id)) + if err != nil { + return err + } + + if _, err = storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil { + return errors.Wrap(err, "failed to commit snapshot") + } + return t.Commit() +} + +// Remove abandons the transaction identified by key. All resources +// associated with the key will be removed. +func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return err + } + defer func() { + if err != nil && t != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + } + }() + + id, _, err := storage.Remove(ctx, key) + if err != nil { + return errors.Wrap(err, "failed to remove") + } + + path := filepath.Join(o.root, "snapshots", id) + renamed := filepath.Join(o.root, "snapshots", "rm-"+id) + if err := os.Rename(path, renamed); err != nil { + return errors.Wrap(err, "failed to rename") + } + + err = t.Commit() + t = nil + if err != nil { + if err1 := os.Rename(renamed, path); err1 != nil { + // May cause inconsistent data on disk + log.G(ctx).WithError(err1).WithField("path", renamed).Errorf("failed to rename after failed commit") + } + return errors.Wrap(err, "failed to commit") + } + if err := os.RemoveAll(renamed); err != nil { + // Must be cleaned up, any "rm-*" could be removed if no active transactions + log.G(ctx).WithError(err).WithField("path", renamed).Warnf("failed to remove root filesystem") + } + + return nil +} + +// Walk the committed snapshots. +func (o *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return err + } + defer t.Rollback() + return storage.WalkInfo(ctx, fn) +} + +func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) ([]mount.Mount, error) { + var ( + path string + snapshotDir = filepath.Join(o.root, "snapshots") + ) + + td, err := ioutil.TempDir(snapshotDir, "new-") + if err != nil { + return nil, errors.Wrap(err, "failed to create temp dir") + } + defer func() { + if err != nil { + if td != "" { + if err1 := os.RemoveAll(td); err1 != nil { + err = errors.Wrapf(err, "remove failed: %v", err1) + } + } + if path != "" { + if err1 := os.RemoveAll(path); err1 != nil { + err = errors.Wrapf(err, "failed to remove path: %v", err1) + } + } + } + }() + + fs := filepath.Join(td, "fs") + if err = os.MkdirAll(fs, 0755); err != nil { + return nil, err + } + + if kind == snapshots.KindActive { + if err = os.MkdirAll(filepath.Join(td, "work"), 0711); err != nil { + return nil, err + } + } + + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return nil, err + } + rollback := true + defer func() { + if rollback { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + } + }() + + s, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...) + if err != nil { + return nil, errors.Wrap(err, "failed to create snapshot") + } + + if len(s.ParentIDs) > 0 { + st, err := os.Stat(o.upperPath(s.ParentIDs[0])) + if err != nil { + return nil, errors.Wrap(err, "failed to stat parent") + } + + stat := st.Sys().(*syscall.Stat_t) + if err := os.Lchown(fs, int(stat.Uid), int(stat.Gid)); err != nil { + return nil, errors.Wrap(err, "failed to chown") + } + } + + path = filepath.Join(snapshotDir, s.ID) + if err = os.Rename(td, path); err != nil { + return nil, errors.Wrap(err, "failed to rename") + } + td = "" + + rollback = false + if err = t.Commit(); err != nil { + return nil, errors.Wrap(err, "commit failed") + } + + return o.mounts(s), nil +} + +func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount { + if len(s.ParentIDs) == 0 { + // if we only have one layer/no parents then just return a bind mount as overlay + // will not work + roFlag := "rw" + if s.Kind == snapshots.KindView { + roFlag = "ro" + } + + return []mount.Mount{ + { + Source: o.upperPath(s.ID), + Type: "bind", + Options: []string{ + roFlag, + "rbind", + }, + }, + } + } + var options []string + + if s.Kind == snapshots.KindActive { + options = append(options, + fmt.Sprintf("workdir=%s", o.workPath(s.ID)), + fmt.Sprintf("upperdir=%s", o.upperPath(s.ID)), + ) + } else if len(s.ParentIDs) == 1 { + return []mount.Mount{ + { + Source: o.upperPath(s.ParentIDs[0]), + Type: "bind", + Options: []string{ + "ro", + "rbind", + }, + }, + } + } + + parentPaths := make([]string, len(s.ParentIDs)) + for i := range s.ParentIDs { + parentPaths[i] = o.upperPath(s.ParentIDs[i]) + } + + options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(parentPaths, ":"))) + return []mount.Mount{ + { + Type: "overlay", + Source: "overlay", + Options: options, + }, + } + +} + +func (o *snapshotter) upperPath(id string) string { + return filepath.Join(o.root, "snapshots", id, "fs") +} + +func (o *snapshotter) workPath(id string) string { + return filepath.Join(o.root, "snapshots", id, "work") +} + +// Close closes the snapshotter +func (o *snapshotter) Close() error { + return o.ms.Close() +} diff --git a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go index cde4c7261..d11252d1e 100644 --- a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go +++ b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package snapshots import ( diff --git a/vendor/github.com/containerd/containerd/snapshots/storage/bolt.go b/vendor/github.com/containerd/containerd/snapshots/storage/bolt.go new file mode 100644 index 000000000..7299fa11c --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/storage/bolt.go @@ -0,0 +1,586 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package storage + +import ( + "context" + "encoding/binary" + "fmt" + "strings" + "time" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/snapshots" + "github.com/pkg/errors" +) + +var ( + bucketKeyStorageVersion = []byte("v1") + bucketKeySnapshot = []byte("snapshots") + bucketKeyParents = []byte("parents") + + bucketKeyID = []byte("id") + bucketKeyParent = []byte("parent") + bucketKeyKind = []byte("kind") + bucketKeyInodes = []byte("inodes") + bucketKeySize = []byte("size") + + // ErrNoTransaction is returned when an operation is attempted with + // a context which is not inside of a transaction. + ErrNoTransaction = errors.New("no transaction in context") +) + +// parentKey returns a composite key of the parent and child identifiers. The +// parts of the key are separated by a zero byte. +func parentKey(parent, child uint64) []byte { + b := make([]byte, binary.Size([]uint64{parent, child})+1) + i := binary.PutUvarint(b, parent) + j := binary.PutUvarint(b[i+1:], child) + return b[0 : i+j+1] +} + +// parentPrefixKey returns the parent part of the composite key with the +// zero byte separator. +func parentPrefixKey(parent uint64) []byte { + b := make([]byte, binary.Size(parent)+1) + i := binary.PutUvarint(b, parent) + return b[0 : i+1] +} + +// getParentPrefix returns the first part of the composite key which +// represents the parent identifier. +func getParentPrefix(b []byte) uint64 { + parent, _ := binary.Uvarint(b) + return parent +} + +// GetInfo returns the snapshot Info directly from the metadata. Requires a +// context with a storage transaction. +func GetInfo(ctx context.Context, key string) (string, snapshots.Info, snapshots.Usage, error) { + var ( + id uint64 + su snapshots.Usage + si = snapshots.Info{ + Name: key, + } + ) + err := withSnapshotBucket(ctx, key, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + getUsage(bkt, &su) + return readSnapshot(bkt, &id, &si) + }) + if err != nil { + return "", snapshots.Info{}, snapshots.Usage{}, err + } + + return fmt.Sprintf("%d", id), si, su, nil +} + +// UpdateInfo updates an existing snapshot info's data +func UpdateInfo(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + updated := snapshots.Info{ + Name: info.Name, + } + err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(info.Name)) + if sbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + } + if err := readSnapshot(sbkt, nil, &updated); err != nil { + return err + } + + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + updated.Labels = info.Labels + default: + return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on snapshot %q", path, info.Name) + } + } + } else { + // Set mutable fields + updated.Labels = info.Labels + } + updated.Updated = time.Now().UTC() + if err := boltutil.WriteTimestamps(sbkt, updated.Created, updated.Updated); err != nil { + return err + } + + return boltutil.WriteLabels(sbkt, updated.Labels) + }) + if err != nil { + return snapshots.Info{}, err + } + return updated, nil +} + +// WalkInfo iterates through all metadata Info for the stored snapshots and +// calls the provided function for each. Requires a context with a storage +// transaction. +func WalkInfo(ctx context.Context, fn func(context.Context, snapshots.Info) error) error { + return withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + return bkt.ForEach(func(k, v []byte) error { + // skip non buckets + if v != nil { + return nil + } + var ( + sbkt = bkt.Bucket(k) + si = snapshots.Info{ + Name: string(k), + } + ) + if err := readSnapshot(sbkt, nil, &si); err != nil { + return err + } + + return fn(ctx, si) + }) + }) +} + +// GetSnapshot returns the metadata for the active or view snapshot transaction +// referenced by the given key. Requires a context with a storage transaction. +func GetSnapshot(ctx context.Context, key string) (s Snapshot, err error) { + err = withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + } + + s.ID = fmt.Sprintf("%d", readID(sbkt)) + s.Kind = readKind(sbkt) + + if s.Kind != snapshots.KindActive && s.Kind != snapshots.KindView { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "requested snapshot %v not active or view", key) + } + + if parentKey := sbkt.Get(bucketKeyParent); len(parentKey) > 0 { + spbkt := bkt.Bucket(parentKey) + if spbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "parent does not exist") + } + + s.ParentIDs, err = parents(bkt, spbkt, readID(spbkt)) + if err != nil { + return errors.Wrap(err, "failed to get parent chain") + } + } + return nil + }) + if err != nil { + return Snapshot{}, err + } + + return +} + +// CreateSnapshot inserts a record for an active or view snapshot with the provided parent. +func CreateSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts ...snapshots.Opt) (s Snapshot, err error) { + switch kind { + case snapshots.KindActive, snapshots.KindView: + default: + return Snapshot{}, errors.Wrapf(errdefs.ErrInvalidArgument, "snapshot type %v invalid; only snapshots of type Active or View can be created", kind) + } + var base snapshots.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return Snapshot{}, err + } + } + + err = createBucketIfNotExists(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + var ( + spbkt *bolt.Bucket + ) + if parent != "" { + spbkt = bkt.Bucket([]byte(parent)) + if spbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "missing parent %q bucket", parent) + } + + if readKind(spbkt) != snapshots.KindCommitted { + return errors.Wrapf(errdefs.ErrInvalidArgument, "parent %q is not committed snapshot", parent) + } + } + sbkt, err := bkt.CreateBucket([]byte(key)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v", key) + } + return err + } + + id, err := bkt.NextSequence() + if err != nil { + return errors.Wrapf(err, "unable to get identifier for snapshot %q", key) + } + + t := time.Now().UTC() + si := snapshots.Info{ + Parent: parent, + Kind: kind, + Labels: base.Labels, + Created: t, + Updated: t, + } + if err := putSnapshot(sbkt, id, si); err != nil { + return err + } + + if spbkt != nil { + pid := readID(spbkt) + + // Store a backlink from the key to the parent. Store the snapshot name + // as the value to allow following the backlink to the snapshot value. + if err := pbkt.Put(parentKey(pid, id), []byte(key)); err != nil { + return errors.Wrapf(err, "failed to write parent link for snapshot %q", key) + } + + s.ParentIDs, err = parents(bkt, spbkt, pid) + if err != nil { + return errors.Wrapf(err, "failed to get parent chain for snapshot %q", key) + } + } + + s.ID = fmt.Sprintf("%d", id) + s.Kind = kind + return nil + }) + if err != nil { + return Snapshot{}, err + } + + return +} + +// Remove removes a snapshot from the metastore. The string identifier for the +// snapshot is returned as well as the kind. The provided context must contain a +// writable transaction. +func Remove(ctx context.Context, key string) (string, snapshots.Kind, error) { + var ( + id uint64 + si snapshots.Info + ) + + if err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v", key) + } + + if err := readSnapshot(sbkt, &id, &si); err != nil { + errors.Wrapf(err, "failed to read snapshot %s", key) + } + + if pbkt != nil { + k, _ := pbkt.Cursor().Seek(parentPrefixKey(id)) + if getParentPrefix(k) == id { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot remove snapshot with child") + } + + if si.Parent != "" { + spbkt := bkt.Bucket([]byte(si.Parent)) + if spbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v", key) + } + + if err := pbkt.Delete(parentKey(readID(spbkt), id)); err != nil { + return errors.Wrap(err, "failed to delete parent link") + } + } + } + + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return errors.Wrap(err, "failed to delete snapshot") + } + + return nil + }); err != nil { + return "", 0, err + } + + return fmt.Sprintf("%d", id), si.Kind, nil +} + +// CommitActive renames the active snapshot transaction referenced by `key` +// as a committed snapshot referenced by `Name`. The resulting snapshot will be +// committed and readonly. The `key` reference will no longer be available for +// lookup or removal. The returned string identifier for the committed snapshot +// is the same identifier of the original active snapshot. The provided context +// must contain a writable transaction. +func CommitActive(ctx context.Context, key, name string, usage snapshots.Usage, opts ...snapshots.Opt) (string, error) { + var ( + id uint64 + base snapshots.Info + ) + for _, opt := range opts { + if err := opt(&base); err != nil { + return "", err + } + } + + if err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + dbkt, err := bkt.CreateBucket([]byte(name)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errdefs.ErrAlreadyExists + } + return errors.Wrapf(err, "committed snapshot %v", name) + } + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "failed to get active snapshot %q", key) + } + + var si snapshots.Info + if err := readSnapshot(sbkt, &id, &si); err != nil { + return errors.Wrapf(err, "failed to read active snapshot %q", key) + } + + if si.Kind != snapshots.KindActive { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "snapshot %q is not active", key) + } + si.Kind = snapshots.KindCommitted + si.Created = time.Now().UTC() + si.Updated = si.Created + + // Replace labels, do not inherit + si.Labels = base.Labels + + if err := putSnapshot(dbkt, id, si); err != nil { + return err + } + if err := putUsage(dbkt, usage); err != nil { + return err + } + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return errors.Wrapf(err, "failed to delete active snapshot %q", key) + } + if si.Parent != "" { + spbkt := bkt.Bucket([]byte(si.Parent)) + if spbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "missing parent %q of snapshot %q", si.Parent, key) + } + pid := readID(spbkt) + + // Updates parent back link to use new key + if err := pbkt.Put(parentKey(pid, id), []byte(name)); err != nil { + return errors.Wrapf(err, "failed to update parent link %q from %q to %q", pid, key, name) + } + } + + return nil + }); err != nil { + return "", err + } + + return fmt.Sprintf("%d", id), nil +} + +func withSnapshotBucket(ctx context.Context, key string, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + vbkt := tx.Bucket(bucketKeyStorageVersion) + if vbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "bucket does not exist") + } + bkt := vbkt.Bucket(bucketKeySnapshot) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshots bucket does not exist") + } + bkt = bkt.Bucket([]byte(key)) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + } + + return fn(ctx, bkt, vbkt.Bucket(bucketKeyParents)) +} + +func withBucket(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + bkt := tx.Bucket(bucketKeyStorageVersion) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "bucket does not exist") + } + return fn(ctx, bkt.Bucket(bucketKeySnapshot), bkt.Bucket(bucketKeyParents)) +} + +func createBucketIfNotExists(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + + bkt, err := tx.CreateBucketIfNotExists(bucketKeyStorageVersion) + if err != nil { + return errors.Wrap(err, "failed to create version bucket") + } + sbkt, err := bkt.CreateBucketIfNotExists(bucketKeySnapshot) + if err != nil { + return errors.Wrap(err, "failed to create snapshots bucket") + } + pbkt, err := bkt.CreateBucketIfNotExists(bucketKeyParents) + if err != nil { + return errors.Wrap(err, "failed to create parents bucket") + } + return fn(ctx, sbkt, pbkt) +} + +func parents(bkt, pbkt *bolt.Bucket, parent uint64) (parents []string, err error) { + for { + parents = append(parents, fmt.Sprintf("%d", parent)) + + parentKey := pbkt.Get(bucketKeyParent) + if len(parentKey) == 0 { + return + } + pbkt = bkt.Bucket(parentKey) + if pbkt == nil { + return nil, errors.Wrap(errdefs.ErrNotFound, "missing parent") + } + + parent = readID(pbkt) + } +} + +func readKind(bkt *bolt.Bucket) (k snapshots.Kind) { + kind := bkt.Get(bucketKeyKind) + if len(kind) == 1 { + k = snapshots.Kind(kind[0]) + } + return +} + +func readID(bkt *bolt.Bucket) uint64 { + id, _ := binary.Uvarint(bkt.Get(bucketKeyID)) + return id +} + +func readSnapshot(bkt *bolt.Bucket, id *uint64, si *snapshots.Info) error { + if id != nil { + *id = readID(bkt) + } + if si != nil { + si.Kind = readKind(bkt) + si.Parent = string(bkt.Get(bucketKeyParent)) + + if err := boltutil.ReadTimestamps(bkt, &si.Created, &si.Updated); err != nil { + return err + } + + labels, err := boltutil.ReadLabels(bkt) + if err != nil { + return err + } + si.Labels = labels + } + + return nil +} + +func putSnapshot(bkt *bolt.Bucket, id uint64, si snapshots.Info) error { + idEncoded, err := encodeID(id) + if err != nil { + return err + } + + updates := [][2][]byte{ + {bucketKeyID, idEncoded}, + {bucketKeyKind, []byte{byte(si.Kind)}}, + } + if si.Parent != "" { + updates = append(updates, [2][]byte{bucketKeyParent, []byte(si.Parent)}) + } + for _, v := range updates { + if err := bkt.Put(v[0], v[1]); err != nil { + return err + } + } + if err := boltutil.WriteTimestamps(bkt, si.Created, si.Updated); err != nil { + return err + } + return boltutil.WriteLabels(bkt, si.Labels) +} + +func getUsage(bkt *bolt.Bucket, usage *snapshots.Usage) { + usage.Inodes, _ = binary.Varint(bkt.Get(bucketKeyInodes)) + usage.Size, _ = binary.Varint(bkt.Get(bucketKeySize)) +} + +func putUsage(bkt *bolt.Bucket, usage snapshots.Usage) error { + for _, v := range []struct { + key []byte + value int64 + }{ + {bucketKeyInodes, usage.Inodes}, + {bucketKeySize, usage.Size}, + } { + e, err := encodeSize(v.value) + if err != nil { + return err + } + if err := bkt.Put(v.key, e); err != nil { + return err + } + } + return nil +} + +func encodeSize(size int64) ([]byte, error) { + var ( + buf [binary.MaxVarintLen64]byte + sizeEncoded = buf[:] + ) + sizeEncoded = sizeEncoded[:binary.PutVarint(sizeEncoded, size)] + + if len(sizeEncoded) == 0 { + return nil, fmt.Errorf("failed encoding size = %v", size) + } + return sizeEncoded, nil +} + +func encodeID(id uint64) ([]byte, error) { + var ( + buf [binary.MaxVarintLen64]byte + idEncoded = buf[:] + ) + idEncoded = idEncoded[:binary.PutUvarint(idEncoded, id)] + + if len(idEncoded) == 0 { + return nil, fmt.Errorf("failed encoding id = %v", id) + } + return idEncoded, nil +} diff --git a/vendor/github.com/containerd/containerd/snapshots/storage/metastore.go b/vendor/github.com/containerd/containerd/snapshots/storage/metastore.go new file mode 100644 index 000000000..fa5e6c78b --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/storage/metastore.go @@ -0,0 +1,115 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package storage provides a metadata storage implementation for snapshot +// drivers. Drive implementations are responsible for starting and managing +// transactions using the defined context creator. This storage package uses +// BoltDB for storing metadata. Access to the raw boltdb transaction is not +// provided, but the stored object is provided by the proto subpackage. +package storage + +import ( + "context" + "sync" + + "github.com/boltdb/bolt" + "github.com/containerd/containerd/snapshots" + "github.com/pkg/errors" +) + +// Transactor is used to finalize an active transaction. +type Transactor interface { + // Commit commits any changes made during the transaction. On error a + // caller is expected to clean up any resources which would have relied + // on data mutated as part of this transaction. Only writable + // transactions can commit, non-writable must call Rollback. + Commit() error + + // Rollback rolls back any changes made during the transaction. This + // must be called on all non-writable transactions and aborted writable + // transaction. + Rollback() error +} + +// Snapshot hold the metadata for an active or view snapshot transaction. The +// ParentIDs hold the snapshot identifiers for the committed snapshots this +// active or view is based on. The ParentIDs are ordered from the lowest base +// to highest, meaning they should be applied in order from the first index to +// the last index. The last index should always be considered the active +// snapshots immediate parent. +type Snapshot struct { + Kind snapshots.Kind + ID string + ParentIDs []string +} + +// MetaStore is used to store metadata related to a snapshot driver. The +// MetaStore is intended to store metadata related to name, state and +// parentage. Using the MetaStore is not required to implement a snapshot +// driver but can be used to handle the persistence and transactional +// complexities of a driver implementation. +type MetaStore struct { + dbfile string + + dbL sync.Mutex + db *bolt.DB +} + +// NewMetaStore returns a snapshot MetaStore for storage of metadata related to +// a snapshot driver backed by a bolt file database. This implementation is +// strongly consistent and does all metadata changes in a transaction to prevent +// against process crashes causing inconsistent metadata state. +func NewMetaStore(dbfile string) (*MetaStore, error) { + return &MetaStore{ + dbfile: dbfile, + }, nil +} + +type transactionKey struct{} + +// TransactionContext creates a new transaction context. The writable value +// should be set to true for transactions which are expected to mutate data. +func (ms *MetaStore) TransactionContext(ctx context.Context, writable bool) (context.Context, Transactor, error) { + ms.dbL.Lock() + if ms.db == nil { + db, err := bolt.Open(ms.dbfile, 0600, nil) + if err != nil { + ms.dbL.Unlock() + return ctx, nil, errors.Wrap(err, "failed to open database file") + } + ms.db = db + } + ms.dbL.Unlock() + + tx, err := ms.db.Begin(writable) + if err != nil { + return ctx, nil, errors.Wrap(err, "failed to start transaction") + } + + ctx = context.WithValue(ctx, transactionKey{}, tx) + + return ctx, tx, nil +} + +// Close closes the metastore and any underlying database connections +func (ms *MetaStore) Close() error { + ms.dbL.Lock() + defer ms.dbL.Unlock() + if ms.db == nil { + return nil + } + return ms.db.Close() +} diff --git a/vendor/github.com/containerd/containerd/snapshotter_default_linux.go b/vendor/github.com/containerd/containerd/snapshotter_default_linux.go index c432a2d55..d925d4ef9 100644 --- a/vendor/github.com/containerd/containerd/snapshotter_default_linux.go +++ b/vendor/github.com/containerd/containerd/snapshotter_default_linux.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd const ( diff --git a/vendor/github.com/containerd/containerd/snapshotter_default_unix.go b/vendor/github.com/containerd/containerd/snapshotter_default_unix.go index cb8b08ac1..abd8f8694 100644 --- a/vendor/github.com/containerd/containerd/snapshotter_default_unix.go +++ b/vendor/github.com/containerd/containerd/snapshotter_default_unix.go @@ -1,5 +1,21 @@ // +build darwin freebsd solaris +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd const ( diff --git a/vendor/github.com/containerd/containerd/snapshotter_default_windows.go b/vendor/github.com/containerd/containerd/snapshotter_default_windows.go index 3b7358218..320211a4a 100644 --- a/vendor/github.com/containerd/containerd/snapshotter_default_windows.go +++ b/vendor/github.com/containerd/containerd/snapshotter_default_windows.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd const ( diff --git a/vendor/github.com/containerd/containerd/sys/epoll.go b/vendor/github.com/containerd/containerd/sys/epoll.go index 3a4d97cfa..683f38eea 100644 --- a/vendor/github.com/containerd/containerd/sys/epoll.go +++ b/vendor/github.com/containerd/containerd/sys/epoll.go @@ -1,5 +1,21 @@ // +build linux +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys import "golang.org/x/sys/unix" diff --git a/vendor/github.com/containerd/containerd/sys/fds.go b/vendor/github.com/containerd/containerd/sys/fds.go index 3c1ec67e5..db3cf702f 100644 --- a/vendor/github.com/containerd/containerd/sys/fds.go +++ b/vendor/github.com/containerd/containerd/sys/fds.go @@ -1,5 +1,21 @@ // +build !windows,!darwin +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys import ( diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go index ed23609c5..700f44efa 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_unix.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_unix.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys import "os" diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go index 36395c556..dc880c342 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_windows.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_windows.go @@ -1,5 +1,21 @@ // +build windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys import ( diff --git a/vendor/github.com/containerd/containerd/sys/oom_unix.go b/vendor/github.com/containerd/containerd/sys/oom_unix.go index 23fcc9437..1abe7485b 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_unix.go +++ b/vendor/github.com/containerd/containerd/sys/oom_unix.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys import ( diff --git a/vendor/github.com/containerd/containerd/sys/oom_windows.go b/vendor/github.com/containerd/containerd/sys/oom_windows.go index 6e42ddce8..f44bcebd1 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_windows.go +++ b/vendor/github.com/containerd/containerd/sys/oom_windows.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys // SetOOMScore sets the oom score for the process diff --git a/vendor/github.com/containerd/containerd/sys/proc.go b/vendor/github.com/containerd/containerd/sys/proc.go index fbe7b5190..496eb1fea 100644 --- a/vendor/github.com/containerd/containerd/sys/proc.go +++ b/vendor/github.com/containerd/containerd/sys/proc.go @@ -1,5 +1,21 @@ // +build linux +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys import ( diff --git a/vendor/github.com/containerd/containerd/sys/reaper.go b/vendor/github.com/containerd/containerd/sys/reaper.go index bbc5a1e86..23cb040b8 100644 --- a/vendor/github.com/containerd/containerd/sys/reaper.go +++ b/vendor/github.com/containerd/containerd/sys/reaper.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys import "golang.org/x/sys/unix" diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go index 0d5f049aa..4d71c709a 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_unix.go +++ b/vendor/github.com/containerd/containerd/sys/socket_unix.go @@ -1,5 +1,21 @@ // +build !windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys import ( diff --git a/vendor/github.com/containerd/containerd/sys/socket_windows.go b/vendor/github.com/containerd/containerd/sys/socket_windows.go index de25c0860..3ee7679b4 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_windows.go +++ b/vendor/github.com/containerd/containerd/sys/socket_windows.go @@ -1,5 +1,21 @@ // +build windows +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys import ( diff --git a/vendor/github.com/containerd/containerd/sys/stat_bsd.go b/vendor/github.com/containerd/containerd/sys/stat_bsd.go index 84c59ab6f..b9c95d90d 100644 --- a/vendor/github.com/containerd/containerd/sys/stat_bsd.go +++ b/vendor/github.com/containerd/containerd/sys/stat_bsd.go @@ -1,5 +1,21 @@ // +build darwin freebsd +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys import ( diff --git a/vendor/github.com/containerd/containerd/sys/stat_unix.go b/vendor/github.com/containerd/containerd/sys/stat_unix.go index 0f83ab117..21a666dff 100644 --- a/vendor/github.com/containerd/containerd/sys/stat_unix.go +++ b/vendor/github.com/containerd/containerd/sys/stat_unix.go @@ -1,5 +1,21 @@ // +build linux solaris +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package sys import ( diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go index afa5e2400..4421eb006 100644 --- a/vendor/github.com/containerd/containerd/task.go +++ b/vendor/github.com/containerd/containerd/task.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( @@ -572,7 +588,7 @@ func (t *task) writeIndex(ctx context.Context, index *v1.Index) (d v1.Descriptor return writeContent(ctx, t.client.ContentStore(), v1.MediaTypeImageIndex, t.id, buf, content.WithLabels(labels)) } -func writeContent(ctx context.Context, store content.Store, mediaType, ref string, r io.Reader, opts ...content.Opt) (d v1.Descriptor, err error) { +func writeContent(ctx context.Context, store content.Ingester, mediaType, ref string, r io.Reader, opts ...content.Opt) (d v1.Descriptor, err error) { writer, err := store.Writer(ctx, ref, 0, "") if err != nil { return d, err diff --git a/vendor/github.com/containerd/containerd/task_opts.go b/vendor/github.com/containerd/containerd/task_opts.go index a387adb6e..495d4225b 100644 --- a/vendor/github.com/containerd/containerd/task_opts.go +++ b/vendor/github.com/containerd/containerd/task_opts.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/task_opts_linux.go b/vendor/github.com/containerd/containerd/task_opts_linux.go index 5b91cb548..63136fd6a 100644 --- a/vendor/github.com/containerd/containerd/task_opts_linux.go +++ b/vendor/github.com/containerd/containerd/task_opts_linux.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/task_opts_windows.go b/vendor/github.com/containerd/containerd/task_opts_windows.go index d77402c67..60836bc8f 100644 --- a/vendor/github.com/containerd/containerd/task_opts_windows.go +++ b/vendor/github.com/containerd/containerd/task_opts_windows.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package containerd import ( diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf index 66a3d6320..42ac8664a 100644 --- a/vendor/github.com/containerd/containerd/vendor.conf +++ b/vendor/github.com/containerd/containerd/vendor.conf @@ -19,8 +19,6 @@ github.com/opencontainers/runtime-spec v1.0.1 github.com/opencontainers/runc a618ab5a0186905949ee463dbb762c3d23e12a80 github.com/sirupsen/logrus v1.0.0 github.com/containerd/btrfs cc52c4dea2ce11a44e6639e561bb5c2af9ada9e3 -github.com/stretchr/testify v1.1.4 -github.com/davecgh/go-spew v1.1.0 github.com/pmezard/go-difflib v1.0.0 github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c @@ -30,7 +28,7 @@ github.com/pkg/errors v0.8.0 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys github.com/opencontainers/image-spec v1.0.1 -github.com/containerd/continuity 1a794c0014a7b786879312d1dab309ba96ead8bc +github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 @@ -42,3 +40,5 @@ golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 github.com/dmcgowan/go-tar go1.10 github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577 github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 +github.com/gotestyourself/gotestyourself 44dbf532bbf5767611f6f2a61bded572e337010a +github.com/google/go-cmp v0.1.0 diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go new file mode 100644 index 000000000..6ffd9519b --- /dev/null +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package version + +var ( + // Package is filled at linking time + Package = "github.com/containerd/containerd" + + // Version holds the complete version number. Filled in at linking time. + Version = "1.0.0+unknown" + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" +) diff --git a/vendor/github.com/containerd/continuity/fs/diff.go b/vendor/github.com/containerd/continuity/fs/diff.go index 9073d0d92..f2300e845 100644 --- a/vendor/github.com/containerd/continuity/fs/diff.go +++ b/vendor/github.com/containerd/continuity/fs/diff.go @@ -273,7 +273,7 @@ func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { f1 = nil continue - } else if rmdir == "" && f1.f.IsDir() { + } else if f1.f.IsDir() { rmdir = f1.path + string(os.PathSeparator) } else if rmdir != "" { rmdir = "" diff --git a/vendor/github.com/containerd/continuity/fs/stat_linux.go b/vendor/github.com/containerd/continuity/fs/stat_linux.go index 25bc65258..1dbb0212b 100644 --- a/vendor/github.com/containerd/continuity/fs/stat_linux.go +++ b/vendor/github.com/containerd/continuity/fs/stat_linux.go @@ -22,5 +22,6 @@ func StatMtime(st *syscall.Stat_t) syscall.Timespec { // StatATimeAsTime returns st.Atim as a time.Time func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Atim.Sec, st.Atim.Nsec) + // The int64 conversions ensure the line compiles for 32-bit systems as well. + return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert } diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_openbsd.go b/vendor/github.com/containerd/continuity/sysx/xattr_openbsd.go new file mode 100644 index 000000000..723619977 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr_openbsd.go @@ -0,0 +1,7 @@ +package sysx + +import ( + "errors" +) + +var unsupported = errors.New("extended attributes unsupported on OpenBSD") diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go index a8dd9f245..c8389bc13 100644 --- a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go +++ b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go @@ -1,4 +1,4 @@ -// +build freebsd solaris +// +build freebsd openbsd solaris package sysx diff --git a/vendor/github.com/containerd/go-runc/LICENSE b/vendor/github.com/containerd/go-runc/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/go-runc/README.md b/vendor/github.com/containerd/go-runc/README.md new file mode 100644 index 000000000..239601f1e --- /dev/null +++ b/vendor/github.com/containerd/go-runc/README.md @@ -0,0 +1,14 @@ +# go-runc + +[![Build Status](https://travis-ci.org/containerd/go-runc.svg?branch=master)](https://travis-ci.org/containerd/go-runc) + + +This is a package for consuming the [runc](https://github.com/opencontainers/runc) binary in your Go applications. +It tries to expose all the settings and features of the runc CLI. If there is something missing then add it, its opensource! + +This needs runc @ [a9610f2c0](https://github.com/opencontainers/runc/commit/a9610f2c0237d2636d05a031ec8659a70e75ffeb) +or greater. + +## Docs + +Docs can be found at [godoc.org](https://godoc.org/github.com/containerd/go-runc). diff --git a/vendor/github.com/containerd/go-runc/command_linux.go b/vendor/github.com/containerd/go-runc/command_linux.go new file mode 100644 index 000000000..8b4a502d6 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/command_linux.go @@ -0,0 +1,23 @@ +package runc + +import ( + "context" + "os/exec" + "syscall" +) + +func (r *Runc) command(context context.Context, args ...string) *exec.Cmd { + command := r.Command + if command == "" { + command = DefaultCommand + } + cmd := exec.CommandContext(context, command, append(r.args(), args...)...) + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: r.Setpgid, + } + if r.PdeathSignal != 0 { + cmd.SysProcAttr.Pdeathsig = r.PdeathSignal + } + + return cmd +} diff --git a/vendor/github.com/containerd/go-runc/command_other.go b/vendor/github.com/containerd/go-runc/command_other.go new file mode 100644 index 000000000..bf03a2f9d --- /dev/null +++ b/vendor/github.com/containerd/go-runc/command_other.go @@ -0,0 +1,16 @@ +// +build !linux + +package runc + +import ( + "context" + "os/exec" +) + +func (r *Runc) command(context context.Context, args ...string) *exec.Cmd { + command := r.Command + if command == "" { + command = DefaultCommand + } + return exec.CommandContext(context, command, append(r.args(), args...)...) +} diff --git a/vendor/github.com/containerd/go-runc/console.go b/vendor/github.com/containerd/go-runc/console.go new file mode 100644 index 000000000..141e64dde --- /dev/null +++ b/vendor/github.com/containerd/go-runc/console.go @@ -0,0 +1,141 @@ +package runc + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + + "github.com/containerd/console" + "golang.org/x/sys/unix" +) + +// NewConsoleSocket creates a new unix socket at the provided path to accept a +// pty master created by runc for use by the container +func NewConsoleSocket(path string) (*Socket, error) { + abs, err := filepath.Abs(path) + if err != nil { + return nil, err + } + addr, err := net.ResolveUnixAddr("unix", abs) + if err != nil { + return nil, err + } + l, err := net.ListenUnix("unix", addr) + if err != nil { + return nil, err + } + return &Socket{ + l: l, + }, nil +} + +// NewTempConsoleSocket returns a temp console socket for use with a container +// On Close(), the socket is deleted +func NewTempConsoleSocket() (*Socket, error) { + dir, err := ioutil.TempDir("", "pty") + if err != nil { + return nil, err + } + abs, err := filepath.Abs(filepath.Join(dir, "pty.sock")) + if err != nil { + return nil, err + } + addr, err := net.ResolveUnixAddr("unix", abs) + if err != nil { + return nil, err + } + l, err := net.ListenUnix("unix", addr) + if err != nil { + return nil, err + } + return &Socket{ + l: l, + rmdir: true, + }, nil +} + +// Socket is a unix socket that accepts the pty master created by runc +type Socket struct { + rmdir bool + l *net.UnixListener +} + +// Path returns the path to the unix socket on disk +func (c *Socket) Path() string { + return c.l.Addr().String() +} + +// recvFd waits for a file descriptor to be sent over the given AF_UNIX +// socket. The file name of the remote file descriptor will be recreated +// locally (it is sent as non-auxiliary data in the same payload). +func recvFd(socket *net.UnixConn) (*os.File, error) { + const MaxNameLen = 4096 + var oobSpace = unix.CmsgSpace(4) + + name := make([]byte, MaxNameLen) + oob := make([]byte, oobSpace) + + n, oobn, _, _, err := socket.ReadMsgUnix(name, oob) + if err != nil { + return nil, err + } + + if n >= MaxNameLen || oobn != oobSpace { + return nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn) + } + + // Truncate. + name = name[:n] + oob = oob[:oobn] + + scms, err := unix.ParseSocketControlMessage(oob) + if err != nil { + return nil, err + } + if len(scms) != 1 { + return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms)) + } + scm := scms[0] + + fds, err := unix.ParseUnixRights(&scm) + if err != nil { + return nil, err + } + if len(fds) != 1 { + return nil, fmt.Errorf("recvfd: number of fds is not 1: %d", len(fds)) + } + fd := uintptr(fds[0]) + + return os.NewFile(fd, string(name)), nil +} + +// ReceiveMaster blocks until the socket receives the pty master +func (c *Socket) ReceiveMaster() (console.Console, error) { + conn, err := c.l.Accept() + if err != nil { + return nil, err + } + defer conn.Close() + uc, ok := conn.(*net.UnixConn) + if !ok { + return nil, fmt.Errorf("received connection which was not a unix socket") + } + f, err := recvFd(uc) + if err != nil { + return nil, err + } + return console.ConsoleFromFile(f) +} + +// Close closes the unix socket +func (c *Socket) Close() error { + err := c.l.Close() + if c.rmdir { + if rerr := os.RemoveAll(filepath.Dir(c.Path())); err == nil { + err = rerr + } + } + return err +} diff --git a/vendor/github.com/containerd/go-runc/container.go b/vendor/github.com/containerd/go-runc/container.go new file mode 100644 index 000000000..7981aac9c --- /dev/null +++ b/vendor/github.com/containerd/go-runc/container.go @@ -0,0 +1,14 @@ +package runc + +import "time" + +// Container hold information for a runc container +type Container struct { + ID string `json:"id"` + Pid int `json:"pid"` + Status string `json:"status"` + Bundle string `json:"bundle"` + Rootfs string `json:"rootfs"` + Created time.Time `json:"created"` + Annotations map[string]string `json:"annotations"` +} diff --git a/vendor/github.com/containerd/go-runc/events.go b/vendor/github.com/containerd/go-runc/events.go new file mode 100644 index 000000000..ca0e0481c --- /dev/null +++ b/vendor/github.com/containerd/go-runc/events.go @@ -0,0 +1,84 @@ +package runc + +type Event struct { + // Type are the event type generated by runc + // If the type is "error" then check the Err field on the event for + // the actual error + Type string `json:"type"` + ID string `json:"id"` + Stats *Stats `json:"data,omitempty"` + // Err has a read error if we were unable to decode the event from runc + Err error `json:"-"` +} + +type Stats struct { + Cpu Cpu `json:"cpu"` + Memory Memory `json:"memory"` + Pids Pids `json:"pids"` + Blkio Blkio `json:"blkio"` + Hugetlb map[string]Hugetlb `json:"hugetlb"` +} + +type Hugetlb struct { + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type BlkioEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type Blkio struct { + IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"` + IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"` + IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"` + IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"` + IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"` + IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"` + IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"` + SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"` +} + +type Pids struct { + Current uint64 `json:"current,omitempty"` + Limit uint64 `json:"limit,omitempty"` +} + +type Throttling struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` + ThrottledTime uint64 `json:"throttledTime,omitempty"` +} + +type CpuUsage struct { + // Units: nanoseconds. + Total uint64 `json:"total,omitempty"` + Percpu []uint64 `json:"percpu,omitempty"` + Kernel uint64 `json:"kernel"` + User uint64 `json:"user"` +} + +type Cpu struct { + Usage CpuUsage `json:"usage,omitempty"` + Throttling Throttling `json:"throttling,omitempty"` +} + +type MemoryEntry struct { + Limit uint64 `json:"limit"` + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type Memory struct { + Cache uint64 `json:"cache,omitempty"` + Usage MemoryEntry `json:"usage,omitempty"` + Swap MemoryEntry `json:"swap,omitempty"` + Kernel MemoryEntry `json:"kernel,omitempty"` + KernelTCP MemoryEntry `json:"kernelTCP,omitempty"` + Raw map[string]uint64 `json:"raw,omitempty"` +} diff --git a/vendor/github.com/containerd/go-runc/io.go b/vendor/github.com/containerd/go-runc/io.go new file mode 100644 index 000000000..0c3f1a9b3 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/io.go @@ -0,0 +1,213 @@ +package runc + +import ( + "io" + "os" + "os/exec" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +type IO interface { + io.Closer + Stdin() io.WriteCloser + Stdout() io.ReadCloser + Stderr() io.ReadCloser + Set(*exec.Cmd) +} + +type StartCloser interface { + CloseAfterStart() error +} + +// NewPipeIO creates pipe pairs to be used with runc +func NewPipeIO(uid, gid int) (i IO, err error) { + var pipes []*pipe + // cleanup in case of an error + defer func() { + if err != nil { + for _, p := range pipes { + p.Close() + } + } + }() + stdin, err := newPipe() + if err != nil { + return nil, err + } + pipes = append(pipes, stdin) + if err = unix.Fchown(int(stdin.r.Fd()), uid, gid); err != nil { + return nil, errors.Wrap(err, "failed to chown stdin") + } + + stdout, err := newPipe() + if err != nil { + return nil, err + } + pipes = append(pipes, stdout) + if err = unix.Fchown(int(stdout.w.Fd()), uid, gid); err != nil { + return nil, errors.Wrap(err, "failed to chown stdout") + } + + stderr, err := newPipe() + if err != nil { + return nil, err + } + pipes = append(pipes, stderr) + if err = unix.Fchown(int(stderr.w.Fd()), uid, gid); err != nil { + return nil, errors.Wrap(err, "failed to chown stderr") + } + + return &pipeIO{ + in: stdin, + out: stdout, + err: stderr, + }, nil +} + +func newPipe() (*pipe, error) { + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + return &pipe{ + r: r, + w: w, + }, nil +} + +type pipe struct { + r *os.File + w *os.File +} + +func (p *pipe) Close() error { + err := p.r.Close() + if werr := p.w.Close(); err == nil { + err = werr + } + return err +} + +type pipeIO struct { + in *pipe + out *pipe + err *pipe +} + +func (i *pipeIO) Stdin() io.WriteCloser { + return i.in.w +} + +func (i *pipeIO) Stdout() io.ReadCloser { + return i.out.r +} + +func (i *pipeIO) Stderr() io.ReadCloser { + return i.err.r +} + +func (i *pipeIO) Close() error { + var err error + for _, v := range []*pipe{ + i.in, + i.out, + i.err, + } { + if cerr := v.Close(); err == nil { + err = cerr + } + } + return err +} + +func (i *pipeIO) CloseAfterStart() error { + for _, f := range []*os.File{ + i.out.w, + i.err.w, + } { + f.Close() + } + return nil +} + +// Set sets the io to the exec.Cmd +func (i *pipeIO) Set(cmd *exec.Cmd) { + cmd.Stdin = i.in.r + cmd.Stdout = i.out.w + cmd.Stderr = i.err.w +} + +func NewSTDIO() (IO, error) { + return &stdio{}, nil +} + +type stdio struct { +} + +func (s *stdio) Close() error { + return nil +} + +func (s *stdio) Set(cmd *exec.Cmd) { + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr +} + +func (s *stdio) Stdin() io.WriteCloser { + return os.Stdin +} + +func (s *stdio) Stdout() io.ReadCloser { + return os.Stdout +} + +func (s *stdio) Stderr() io.ReadCloser { + return os.Stderr +} + +// NewNullIO returns IO setup for /dev/null use with runc +func NewNullIO() (IO, error) { + f, err := os.Open(os.DevNull) + if err != nil { + return nil, err + } + return &nullIO{ + devNull: f, + }, nil +} + +type nullIO struct { + devNull *os.File +} + +func (n *nullIO) Close() error { + // this should be closed after start but if not + // make sure we close the file but don't return the error + n.devNull.Close() + return nil +} + +func (n *nullIO) Stdin() io.WriteCloser { + return nil +} + +func (n *nullIO) Stdout() io.ReadCloser { + return nil +} + +func (n *nullIO) Stderr() io.ReadCloser { + return nil +} + +func (n *nullIO) Set(c *exec.Cmd) { + // don't set STDIN here + c.Stdout = n.devNull + c.Stderr = n.devNull +} + +func (n *nullIO) CloseAfterStart() error { + return n.devNull.Close() +} diff --git a/vendor/github.com/containerd/go-runc/monitor.go b/vendor/github.com/containerd/go-runc/monitor.go new file mode 100644 index 000000000..2d62c5a41 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/monitor.go @@ -0,0 +1,60 @@ +package runc + +import ( + "os/exec" + "syscall" + "time" +) + +var Monitor ProcessMonitor = &defaultMonitor{} + +type Exit struct { + Timestamp time.Time + Pid int + Status int +} + +// ProcessMonitor is an interface for process monitoring +// +// It allows daemons using go-runc to have a SIGCHLD handler +// to handle exits without introducing races between the handler +// and go's exec.Cmd +// These methods should match the methods exposed by exec.Cmd to provide +// a consistent experience for the caller +type ProcessMonitor interface { + Start(*exec.Cmd) (chan Exit, error) + Wait(*exec.Cmd, chan Exit) (int, error) +} + +type defaultMonitor struct { +} + +func (m *defaultMonitor) Start(c *exec.Cmd) (chan Exit, error) { + if err := c.Start(); err != nil { + return nil, err + } + ec := make(chan Exit, 1) + go func() { + var status int + if err := c.Wait(); err != nil { + status = 255 + if exitErr, ok := err.(*exec.ExitError); ok { + if ws, ok := exitErr.Sys().(syscall.WaitStatus); ok { + status = ws.ExitStatus() + } + } + } + ec <- Exit{ + Timestamp: time.Now(), + Pid: c.Process.Pid, + Status: status, + } + close(ec) + }() + return ec, nil +} + +func (m *defaultMonitor) Wait(c *exec.Cmd, ec chan Exit) (int, error) { + e := <-ec + return e.Status, nil +} diff --git a/vendor/github.com/containerd/go-runc/runc.go b/vendor/github.com/containerd/go-runc/runc.go new file mode 100644 index 000000000..df76ad77a --- /dev/null +++ b/vendor/github.com/containerd/go-runc/runc.go @@ -0,0 +1,660 @@ +package runc + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Format is the type of log formatting options avaliable +type Format string + +const ( + none Format = "" + JSON Format = "json" + Text Format = "text" + // DefaultCommand is the default command for Runc + DefaultCommand = "runc" +) + +// Runc is the client to the runc cli +type Runc struct { + //If command is empty, DefaultCommand is used + Command string + Root string + Debug bool + Log string + LogFormat Format + PdeathSignal syscall.Signal + Setpgid bool + Criu string + SystemdCgroup bool +} + +// List returns all containers created inside the provided runc root directory +func (r *Runc) List(context context.Context) ([]*Container, error) { + data, err := cmdOutput(r.command(context, "list", "--format=json"), false) + if err != nil { + return nil, err + } + var out []*Container + if err := json.Unmarshal(data, &out); err != nil { + return nil, err + } + return out, nil +} + +// State returns the state for the container provided by id +func (r *Runc) State(context context.Context, id string) (*Container, error) { + data, err := cmdOutput(r.command(context, "state", id), true) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data) + } + var c Container + if err := json.Unmarshal(data, &c); err != nil { + return nil, err + } + return &c, nil +} + +type ConsoleSocket interface { + Path() string +} + +type CreateOpts struct { + IO + // PidFile is a path to where a pid file should be created + PidFile string + ConsoleSocket ConsoleSocket + Detach bool + NoPivot bool + NoNewKeyring bool + ExtraFiles []*os.File +} + +func (o *CreateOpts) args() (out []string, err error) { + if o.PidFile != "" { + abs, err := filepath.Abs(o.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + if o.ConsoleSocket != nil { + out = append(out, "--console-socket", o.ConsoleSocket.Path()) + } + if o.NoPivot { + out = append(out, "--no-pivot") + } + if o.NoNewKeyring { + out = append(out, "--no-new-keyring") + } + if o.Detach { + out = append(out, "--detach") + } + if o.ExtraFiles != nil { + out = append(out, "--preserve-fds", strconv.Itoa(len(o.ExtraFiles))) + } + return out, nil +} + +// Create creates a new container and returns its pid if it was created successfully +func (r *Runc) Create(context context.Context, id, bundle string, opts *CreateOpts) error { + args := []string{"create", "--bundle", bundle} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + cmd.ExtraFiles = opts.ExtraFiles + + if cmd.Stdout == nil && cmd.Stderr == nil { + data, err := cmdOutput(cmd, true) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil + } + ec, err := Monitor.Start(cmd) + if err != nil { + return err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return err + } + } + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return err +} + +// Start will start an already created container +func (r *Runc) Start(context context.Context, id string) error { + return r.runOrError(r.command(context, "start", id)) +} + +type ExecOpts struct { + IO + PidFile string + ConsoleSocket ConsoleSocket + Detach bool +} + +func (o *ExecOpts) args() (out []string, err error) { + if o.ConsoleSocket != nil { + out = append(out, "--console-socket", o.ConsoleSocket.Path()) + } + if o.Detach { + out = append(out, "--detach") + } + if o.PidFile != "" { + abs, err := filepath.Abs(o.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + return out, nil +} + +// Exec executres and additional process inside the container based on a full +// OCI Process specification +func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts *ExecOpts) error { + f, err := ioutil.TempFile("", "runc-process") + if err != nil { + return err + } + defer os.Remove(f.Name()) + err = json.NewEncoder(f).Encode(spec) + f.Close() + if err != nil { + return err + } + args := []string{"exec", "--process", f.Name()} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + if cmd.Stdout == nil && cmd.Stderr == nil { + data, err := cmdOutput(cmd, true) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil + } + ec, err := Monitor.Start(cmd) + if err != nil { + return err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return err + } + } + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return err +} + +// Run runs the create, start, delete lifecycle of the container +// and returns its exit status after it has exited +func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) (int, error) { + args := []string{"run", "--bundle", bundle} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return -1, err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + ec, err := Monitor.Start(cmd) + if err != nil { + return -1, err + } + return Monitor.Wait(cmd, ec) +} + +type DeleteOpts struct { + Force bool +} + +func (o *DeleteOpts) args() (out []string) { + if o.Force { + out = append(out, "--force") + } + return out +} + +// Delete deletes the container +func (r *Runc) Delete(context context.Context, id string, opts *DeleteOpts) error { + args := []string{"delete"} + if opts != nil { + args = append(args, opts.args()...) + } + return r.runOrError(r.command(context, append(args, id)...)) +} + +// KillOpts specifies options for killing a container and its processes +type KillOpts struct { + All bool +} + +func (o *KillOpts) args() (out []string) { + if o.All { + out = append(out, "--all") + } + return out +} + +// Kill sends the specified signal to the container +func (r *Runc) Kill(context context.Context, id string, sig int, opts *KillOpts) error { + args := []string{ + "kill", + } + if opts != nil { + args = append(args, opts.args()...) + } + return r.runOrError(r.command(context, append(args, id, strconv.Itoa(sig))...)) +} + +// Stats return the stats for a container like cpu, memory, and io +func (r *Runc) Stats(context context.Context, id string) (*Stats, error) { + cmd := r.command(context, "events", "--stats", id) + rd, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + ec, err := Monitor.Start(cmd) + if err != nil { + return nil, err + } + defer func() { + rd.Close() + Monitor.Wait(cmd, ec) + }() + var e Event + if err := json.NewDecoder(rd).Decode(&e); err != nil { + return nil, err + } + return e.Stats, nil +} + +// Events returns an event stream from runc for a container with stats and OOM notifications +func (r *Runc) Events(context context.Context, id string, interval time.Duration) (chan *Event, error) { + cmd := r.command(context, "events", fmt.Sprintf("--interval=%ds", int(interval.Seconds())), id) + rd, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + ec, err := Monitor.Start(cmd) + if err != nil { + rd.Close() + return nil, err + } + var ( + dec = json.NewDecoder(rd) + c = make(chan *Event, 128) + ) + go func() { + defer func() { + close(c) + rd.Close() + Monitor.Wait(cmd, ec) + }() + for { + var e Event + if err := dec.Decode(&e); err != nil { + if err == io.EOF { + return + } + e = Event{ + Type: "error", + Err: err, + } + } + c <- &e + } + }() + return c, nil +} + +// Pause the container with the provided id +func (r *Runc) Pause(context context.Context, id string) error { + return r.runOrError(r.command(context, "pause", id)) +} + +// Resume the container with the provided id +func (r *Runc) Resume(context context.Context, id string) error { + return r.runOrError(r.command(context, "resume", id)) +} + +// Ps lists all the processes inside the container returning their pids +func (r *Runc) Ps(context context.Context, id string) ([]int, error) { + data, err := cmdOutput(r.command(context, "ps", "--format", "json", id), true) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data) + } + var pids []int + if err := json.Unmarshal(data, &pids); err != nil { + return nil, err + } + return pids, nil +} + +type CheckpointOpts struct { + // ImagePath is the path for saving the criu image file + ImagePath string + // WorkDir is the working directory for criu + WorkDir string + // ParentPath is the path for previous image files from a pre-dump + ParentPath string + // AllowOpenTCP allows open tcp connections to be checkpointed + AllowOpenTCP bool + // AllowExternalUnixSockets allows external unix sockets to be checkpointed + AllowExternalUnixSockets bool + // AllowTerminal allows the terminal(pty) to be checkpointed with a container + AllowTerminal bool + // CriuPageServer is the address:port for the criu page server + CriuPageServer string + // FileLocks handle file locks held by the container + FileLocks bool + // Cgroups is the cgroup mode for how to handle the checkpoint of a container's cgroups + Cgroups CgroupMode + // EmptyNamespaces creates a namespace for the container but does not save its properties + // Provide the namespaces you wish to be checkpointed without their settings on restore + EmptyNamespaces []string +} + +type CgroupMode string + +const ( + Soft CgroupMode = "soft" + Full CgroupMode = "full" + Strict CgroupMode = "strict" +) + +func (o *CheckpointOpts) args() (out []string) { + if o.ImagePath != "" { + out = append(out, "--image-path", o.ImagePath) + } + if o.WorkDir != "" { + out = append(out, "--work-path", o.WorkDir) + } + if o.ParentPath != "" { + out = append(out, "--parent-path", o.ParentPath) + } + if o.AllowOpenTCP { + out = append(out, "--tcp-established") + } + if o.AllowExternalUnixSockets { + out = append(out, "--ext-unix-sk") + } + if o.AllowTerminal { + out = append(out, "--shell-job") + } + if o.CriuPageServer != "" { + out = append(out, "--page-server", o.CriuPageServer) + } + if o.FileLocks { + out = append(out, "--file-locks") + } + if string(o.Cgroups) != "" { + out = append(out, "--manage-cgroups-mode", string(o.Cgroups)) + } + for _, ns := range o.EmptyNamespaces { + out = append(out, "--empty-ns", ns) + } + return out +} + +type CheckpointAction func([]string) []string + +// LeaveRunning keeps the container running after the checkpoint has been completed +func LeaveRunning(args []string) []string { + return append(args, "--leave-running") +} + +// PreDump allows a pre-dump of the checkpoint to be made and completed later +func PreDump(args []string) []string { + return append(args, "--pre-dump") +} + +// Checkpoint allows you to checkpoint a container using criu +func (r *Runc) Checkpoint(context context.Context, id string, opts *CheckpointOpts, actions ...CheckpointAction) error { + args := []string{"checkpoint"} + if opts != nil { + args = append(args, opts.args()...) + } + for _, a := range actions { + args = a(args) + } + return r.runOrError(r.command(context, append(args, id)...)) +} + +type RestoreOpts struct { + CheckpointOpts + IO + + Detach bool + PidFile string + NoSubreaper bool + NoPivot bool +} + +func (o *RestoreOpts) args() ([]string, error) { + out := o.CheckpointOpts.args() + if o.Detach { + out = append(out, "--detach") + } + if o.PidFile != "" { + abs, err := filepath.Abs(o.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + if o.NoPivot { + out = append(out, "--no-pivot") + } + if o.NoSubreaper { + out = append(out, "-no-subreaper") + } + return out, nil +} + +// Restore restores a container with the provide id from an existing checkpoint +func (r *Runc) Restore(context context.Context, id, bundle string, opts *RestoreOpts) (int, error) { + args := []string{"restore"} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return -1, err + } + args = append(args, oargs...) + } + args = append(args, "--bundle", bundle) + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + ec, err := Monitor.Start(cmd) + if err != nil { + return -1, err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return -1, err + } + } + } + return Monitor.Wait(cmd, ec) +} + +// Update updates the current container with the provided resource spec +func (r *Runc) Update(context context.Context, id string, resources *specs.LinuxResources) error { + buf := getBuf() + defer putBuf(buf) + + if err := json.NewEncoder(buf).Encode(resources); err != nil { + return err + } + args := []string{"update", "--resources", "-", id} + cmd := r.command(context, args...) + cmd.Stdin = buf + return r.runOrError(cmd) +} + +var ErrParseRuncVersion = errors.New("unable to parse runc version") + +type Version struct { + Runc string + Commit string + Spec string +} + +// Version returns the runc and runtime-spec versions +func (r *Runc) Version(context context.Context) (Version, error) { + data, err := cmdOutput(r.command(context, "--version"), false) + if err != nil { + return Version{}, err + } + return parseVersion(data) +} + +func parseVersion(data []byte) (Version, error) { + var v Version + parts := strings.Split(strings.TrimSpace(string(data)), "\n") + if len(parts) != 3 { + return v, ErrParseRuncVersion + } + + for i, p := range []struct { + dest *string + split string + }{ + { + dest: &v.Runc, + split: "version ", + }, + { + dest: &v.Commit, + split: ": ", + }, + { + dest: &v.Spec, + split: ": ", + }, + } { + p2 := strings.Split(parts[i], p.split) + if len(p2) != 2 { + return v, fmt.Errorf("unable to parse version line %q", parts[i]) + } + *p.dest = p2[1] + } + return v, nil +} + +func (r *Runc) args() (out []string) { + if r.Root != "" { + out = append(out, "--root", r.Root) + } + if r.Debug { + out = append(out, "--debug") + } + if r.Log != "" { + out = append(out, "--log", r.Log) + } + if r.LogFormat != none { + out = append(out, "--log-format", string(r.LogFormat)) + } + if r.Criu != "" { + out = append(out, "--criu", r.Criu) + } + if r.SystemdCgroup { + out = append(out, "--systemd-cgroup") + } + return out +} + +// runOrError will run the provided command. If an error is +// encountered and neither Stdout or Stderr was set the error and the +// stderr of the command will be returned in the format of : +// +func (r *Runc) runOrError(cmd *exec.Cmd) error { + if cmd.Stdout != nil || cmd.Stderr != nil { + ec, err := Monitor.Start(cmd) + if err != nil { + return err + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return err + } + data, err := cmdOutput(cmd, true) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil +} + +func cmdOutput(cmd *exec.Cmd, combined bool) ([]byte, error) { + b := getBuf() + defer putBuf(b) + + cmd.Stdout = b + if combined { + cmd.Stderr = b + } + ec, err := Monitor.Start(cmd) + if err != nil { + return nil, err + } + + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + + return b.Bytes(), err +} diff --git a/vendor/github.com/containerd/go-runc/utils.go b/vendor/github.com/containerd/go-runc/utils.go new file mode 100644 index 000000000..8cb241aca --- /dev/null +++ b/vendor/github.com/containerd/go-runc/utils.go @@ -0,0 +1,45 @@ +package runc + +import ( + "bytes" + "io/ioutil" + "strconv" + "sync" + "syscall" +) + +// ReadPidFile reads the pid file at the provided path and returns +// the pid or an error if the read and conversion is unsuccessful +func ReadPidFile(path string) (int, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return -1, err + } + return strconv.Atoi(string(data)) +} + +const exitSignalOffset = 128 + +// exitStatus returns the correct exit status for a process based on if it +// was signaled or exited cleanly +func exitStatus(status syscall.WaitStatus) int { + if status.Signaled() { + return exitSignalOffset + int(status.Signal()) + } + return status.ExitStatus() +} + +var bytesBufferPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(nil) + }, +} + +func getBuf() *bytes.Buffer { + return bytesBufferPool.Get().(*bytes.Buffer) +} + +func putBuf(b *bytes.Buffer) { + b.Reset() + bytesBufferPool.Put(b) +} diff --git a/vendor/github.com/dmcgowan/go-tar/LICENSE b/vendor/github.com/dmcgowan/go-tar/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dmcgowan/go-tar/common.go b/vendor/github.com/dmcgowan/go-tar/common.go new file mode 100644 index 000000000..e3609536c --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/common.go @@ -0,0 +1,796 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// +// Tape archives (tar) are a file format for storing a sequence of files that +// can be read and written in a streaming manner. +// This package aims to cover most variations of the format, +// including those produced by GNU and BSD tar tools. +package tar + +import ( + "errors" + "fmt" + "io" + "math" + "os" + "path" + "reflect" + "strconv" + "strings" + "time" +) + +// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit +// architectures. If a large value is encountered when decoding, the result +// stored in Header will be the truncated version. + +var ( + ErrHeader = errors.New("tar: invalid tar header") + ErrWriteTooLong = errors.New("tar: write too long") + ErrFieldTooLong = errors.New("tar: header field too long") + ErrWriteAfterClose = errors.New("tar: write after close") + errMissData = errors.New("tar: sparse file references non-existent data") + errUnrefData = errors.New("tar: sparse file contains unreferenced data") + errWriteHole = errors.New("tar: write non-NUL byte in sparse hole") +) + +type headerError []string + +func (he headerError) Error() string { + const prefix = "tar: cannot encode header" + var ss []string + for _, s := range he { + if s != "" { + ss = append(ss, s) + } + } + if len(ss) == 0 { + return prefix + } + return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and ")) +} + +// Type flags for Header.Typeflag. +const ( + // Type '0' indicates a regular file. + TypeReg = '0' + TypeRegA = '\x00' // For legacy support; use TypeReg instead + + // Type '1' to '6' are header-only flags and may not have a data body. + TypeLink = '1' // Hard link + TypeSymlink = '2' // Symbolic link + TypeChar = '3' // Character device node + TypeBlock = '4' // Block device node + TypeDir = '5' // Directory + TypeFifo = '6' // FIFO node + + // Type '7' is reserved. + TypeCont = '7' + + // Type 'x' is used by the PAX format to store key-value records that + // are only relevant to the next file. + // This package transparently handles these types. + TypeXHeader = 'x' + + // Type 'g' is used by the PAX format to store key-value records that + // are relevant to all subsequent files. + // This package only supports parsing and composing such headers, + // but does not currently support persisting the global state across files. + TypeXGlobalHeader = 'g' + + // Type 'S' indicates a sparse file in the GNU format. + // Header.SparseHoles should be populated when using this type. + TypeGNUSparse = 'S' + + // Types 'L' and 'K' are used by the GNU format for a meta file + // used to store the path or link name for the next file. + // This package transparently handles these types. + TypeGNULongName = 'L' + TypeGNULongLink = 'K' +) + +// Keywords for PAX extended header records. +const ( + paxNone = "" // Indicates that no PAX key is suitable + paxPath = "path" + paxLinkpath = "linkpath" + paxSize = "size" + paxUid = "uid" + paxGid = "gid" + paxUname = "uname" + paxGname = "gname" + paxMtime = "mtime" + paxAtime = "atime" + paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid + paxCharset = "charset" // Currently unused + paxComment = "comment" // Currently unused + + paxSchilyXattr = "SCHILY.xattr." + + // Keywords for GNU sparse files in a PAX extended header. + paxGNUSparse = "GNU.sparse." + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// basicKeys is a set of the PAX keys for which we have built-in support. +// This does not contain "charset" or "comment", which are both PAX-specific, +// so adding them as first-class features of Header is unlikely. +// Users can use the PAXRecords field to set it themselves. +var basicKeys = map[string]bool{ + paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true, + paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true, +} + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +// +// For forward compatibility, users that retrieve a Header from Reader.Next, +// mutate it in some ways, and then pass it back to Writer.WriteHeader +// should do so by creating a new Header and copying the fields +// that they are interested in preserving. +type Header struct { + Typeflag byte // Type of header entry (should be TypeReg for most files) + + Name string // Name of file entry + Linkname string // Target name of link (valid for TypeLink or TypeSymlink) + + Size int64 // Logical file size in bytes + Mode int64 // Permission and mode bits + Uid int // User ID of owner + Gid int // Group ID of owner + Uname string // User name of owner + Gname string // Group name of owner + + // If the Format is unspecified, then Writer.WriteHeader rounds ModTime + // to the nearest second and ignores the AccessTime and ChangeTime fields. + // + // To use AccessTime or ChangeTime, specify the Format as PAX or GNU. + // To use sub-second resolution, specify the Format as PAX. + ModTime time.Time // Modification time + AccessTime time.Time // Access time (requires either PAX or GNU support) + ChangeTime time.Time // Change time (requires either PAX or GNU support) + + Devmajor int64 // Major device number (valid for TypeChar or TypeBlock) + Devminor int64 // Minor device number (valid for TypeChar or TypeBlock) + + // SparseHoles represents a sequence of holes in a sparse file. + // + // A file is sparse if len(SparseHoles) > 0 or Typeflag is TypeGNUSparse. + // If TypeGNUSparse is set, then the format is GNU, otherwise + // the format is PAX (by using GNU-specific PAX records). + // + // A sparse file consists of fragments of data, intermixed with holes + // (described by this field). A hole is semantically a block of NUL-bytes, + // but does not actually exist within the tar file. + // The holes must be sorted in ascending order, + // not overlap with each other, and not extend past the specified Size. + SparseHoles []SparseEntry + + // Xattrs stores extended attributes as PAX records under the + // "SCHILY.xattr." namespace. + // + // The following are semantically equivalent: + // h.Xattrs[key] = value + // h.PAXRecords["SCHILY.xattr."+key] = value + // + // When Writer.WriteHeader is called, the contents of Xattrs will take + // precedence over those in PAXRecords. + // + // Deprecated: Use PAXRecords instead. + Xattrs map[string]string + + // PAXRecords is a map of PAX extended header records. + // + // User-defined records should have keys of the following form: + // VENDOR.keyword + // Where VENDOR is some namespace in all uppercase, and keyword may + // not contain the '=' character (e.g., "GOLANG.pkg.version"). + // The key and value should be non-empty UTF-8 strings. + // + // When Writer.WriteHeader is called, PAX records derived from the + // the other fields in Header take precedence over PAXRecords. + PAXRecords map[string]string + + // Format specifies the format of the tar header. + // + // This is set by Reader.Next as a best-effort guess at the format. + // Since the Reader liberally reads some non-compliant files, + // it is possible for this to be FormatUnknown. + // + // If the format is unspecified when Writer.WriteHeader is called, + // then it uses the first format (in the order of USTAR, PAX, GNU) + // capable of encoding this Header (see Format). + Format Format +} + +// SparseEntry represents a Length-sized fragment at Offset in the file. +type SparseEntry struct{ Offset, Length int64 } + +func (s SparseEntry) endOffset() int64 { return s.Offset + s.Length } + +// A sparse file can be represented as either a sparseDatas or a sparseHoles. +// As long as the total size is known, they are equivalent and one can be +// converted to the other form and back. The various tar formats with sparse +// file support represent sparse files in the sparseDatas form. That is, they +// specify the fragments in the file that has data, and treat everything else as +// having zero bytes. As such, the encoding and decoding logic in this package +// deals with sparseDatas. +// +// However, the external API uses sparseHoles instead of sparseDatas because the +// zero value of sparseHoles logically represents a normal file (i.e., there are +// no holes in it). On the other hand, the zero value of sparseDatas implies +// that the file has no data in it, which is rather odd. +// +// As an example, if the underlying raw file contains the 10-byte data: +// var compactFile = "abcdefgh" +// +// And the sparse map has the following entries: +// var spd sparseDatas = []sparseEntry{ +// {Offset: 2, Length: 5}, // Data fragment for 2..6 +// {Offset: 18, Length: 3}, // Data fragment for 18..20 +// } +// var sph sparseHoles = []SparseEntry{ +// {Offset: 0, Length: 2}, // Hole fragment for 0..1 +// {Offset: 7, Length: 11}, // Hole fragment for 7..17 +// {Offset: 21, Length: 4}, // Hole fragment for 21..24 +// } +// +// Then the content of the resulting sparse file with a Header.Size of 25 is: +// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type ( + sparseDatas []SparseEntry + sparseHoles []SparseEntry +) + +// validateSparseEntries reports whether sp is a valid sparse map. +// It does not matter whether sp represents data fragments or hole fragments. +func validateSparseEntries(sp []SparseEntry, size int64) bool { + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + if size < 0 { + return false + } + var pre SparseEntry + for _, cur := range sp { + switch { + case cur.Offset < 0 || cur.Length < 0: + return false // Negative values are never okay + case cur.Offset > math.MaxInt64-cur.Length: + return false // Integer overflow with large length + case cur.endOffset() > size: + return false // Region extends beyond the actual size + case pre.endOffset() > cur.Offset: + return false // Regions cannot overlap and must be in order + } + pre = cur + } + return true +} + +// alignSparseEntries mutates src and returns dst where each fragment's +// starting offset is aligned up to the nearest block edge, and each +// ending offset is aligned down to the nearest block edge. +// +// Even though the Go tar Reader and the BSD tar utility can handle entries +// with arbitrary offsets and lengths, the GNU tar utility can only handle +// offsets and lengths that are multiples of blockSize. +func alignSparseEntries(src []SparseEntry, size int64) []SparseEntry { + dst := src[:0] + for _, s := range src { + pos, end := s.Offset, s.endOffset() + pos += blockPadding(+pos) // Round-up to nearest blockSize + if end != size { + end -= blockPadding(-end) // Round-down to nearest blockSize + } + if pos < end { + dst = append(dst, SparseEntry{Offset: pos, Length: end - pos}) + } + } + return dst +} + +// invertSparseEntries converts a sparse map from one form to the other. +// If the input is sparseHoles, then it will output sparseDatas and vice-versa. +// The input must have been already validated. +// +// This function mutates src and returns a normalized map where: +// * adjacent fragments are coalesced together +// * only the last fragment may be empty +// * the endOffset of the last fragment is the total size +func invertSparseEntries(src []SparseEntry, size int64) []SparseEntry { + dst := src[:0] + var pre SparseEntry + for _, cur := range src { + if cur.Length == 0 { + continue // Skip empty fragments + } + pre.Length = cur.Offset - pre.Offset + if pre.Length > 0 { + dst = append(dst, pre) // Only add non-empty fragments + } + pre.Offset = cur.endOffset() + } + pre.Length = size - pre.Offset // Possibly the only empty fragment + return append(dst, pre) +} + +// fileState tracks the number of logical (includes sparse holes) and physical +// (actual in tar archive) bytes remaining for the current file. +// +// Invariant: LogicalRemaining >= PhysicalRemaining +type fileState interface { + LogicalRemaining() int64 + PhysicalRemaining() int64 +} + +// allowedFormats determines which formats can be used. +// The value returned is the logical OR of multiple possible formats. +// If the value is FormatUnknown, then the input Header cannot be encoded +// and an error is returned explaining why. +// +// As a by-product of checking the fields, this function returns paxHdrs, which +// contain all fields that could not be directly encoded. +// A value receiver ensures that this method does not mutate the source Header. +func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) { + format = FormatUSTAR | FormatPAX | FormatGNU + paxHdrs = make(map[string]string) + + var whyNoUSTAR, whyNoPAX, whyNoGNU string + var preferPAX bool // Prefer PAX over USTAR + verifyString := func(s string, size int, name, paxKey string) { + // NUL-terminator is optional for path and linkpath. + // Technically, it is required for uname and gname, + // but neither GNU nor BSD tar checks for it. + tooLong := len(s) > size + allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath + if hasNUL(s) || (tooLong && !allowLongGNU) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s) + format.mustNotBe(FormatGNU) + } + if !isASCII(s) || tooLong { + canSplitUSTAR := paxKey == paxPath + if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s) + format.mustNotBe(FormatUSTAR) + } + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = s + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == s { + paxHdrs[paxKey] = v + } + } + verifyNumeric := func(n int64, size int, name, paxKey string) { + if !fitsInBase256(size, n) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n) + format.mustNotBe(FormatGNU) + } + if !fitsInOctal(size, n) { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n) + format.mustNotBe(FormatUSTAR) + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = strconv.FormatInt(n, 10) + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) { + paxHdrs[paxKey] = v + } + } + verifyTime := func(ts time.Time, size int, name, paxKey string) { + if ts.IsZero() { + return // Always okay + } + if !fitsInBase256(size, ts.Unix()) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts) + format.mustNotBe(FormatGNU) + } + isMtime := paxKey == paxMtime + fitsOctal := fitsInOctal(size, ts.Unix()) + if (isMtime && !fitsOctal) || !isMtime { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts) + format.mustNotBe(FormatUSTAR) + } + needsNano := ts.Nanosecond() != 0 + if !isMtime || !fitsOctal || needsNano { + preferPAX = true // USTAR may truncate sub-second measurements + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = formatPAXTime(ts) + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) { + paxHdrs[paxKey] = v + } + } + + // Check basic fields. + var blk block + v7 := blk.V7() + ustar := blk.USTAR() + gnu := blk.GNU() + verifyString(h.Name, len(v7.Name()), "Name", paxPath) + verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath) + verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname) + verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname) + verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone) + verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid) + verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid) + verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize) + verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone) + verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone) + verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime) + verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime) + verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime) + + // Check for header-only types. + var whyOnlyPAX, whyOnlyGNU string + switch h.Typeflag { + case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse: + // Exclude TypeLink and TypeSymlink, since they may reference directories. + if strings.HasSuffix(h.Name, "/") { + return FormatUnknown, nil, headerError{"filename may not have trailing slash"} + } + case TypeXHeader, TypeGNULongName, TypeGNULongLink: + return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"} + case TypeXGlobalHeader: + if !reflect.DeepEqual(h, Header{Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format}) { + return FormatUnknown, nil, headerError{"only PAXRecords may be set for TypeXGlobalHeader"} + } + whyOnlyPAX = "only PAX supports TypeXGlobalHeader" + format.mayOnlyBe(FormatPAX) + } + if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 { + return FormatUnknown, nil, headerError{"negative size on header-only type"} + } + + // Check PAX records. + if len(h.Xattrs) > 0 { + for k, v := range h.Xattrs { + paxHdrs[paxSchilyXattr+k] = v + } + whyOnlyPAX = "only PAX supports Xattrs" + format.mayOnlyBe(FormatPAX) + } + if len(h.PAXRecords) > 0 { + for k, v := range h.PAXRecords { + switch _, exists := paxHdrs[k]; { + case exists: + continue // Do not overwrite existing records + case h.Typeflag == TypeXGlobalHeader: + paxHdrs[k] = v // Copy all records + case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse): + paxHdrs[k] = v // Ignore local records that may conflict + } + } + whyOnlyPAX = "only PAX supports PAXRecords" + format.mayOnlyBe(FormatPAX) + } + for k, v := range paxHdrs { + if !validPAXRecord(k, v) { + return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)} + } + } + + // Check sparse files. + if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse { + if isHeaderOnlyType(h.Typeflag) { + return FormatUnknown, nil, headerError{"header-only type cannot be sparse"} + } + if !validateSparseEntries(h.SparseHoles, h.Size) { + return FormatUnknown, nil, headerError{"invalid sparse holes"} + } + if h.Typeflag == TypeGNUSparse { + whyOnlyGNU = "only GNU supports TypeGNUSparse" + format.mayOnlyBe(FormatGNU) + } else { + whyNoGNU = "GNU supports sparse files only with TypeGNUSparse" + format.mustNotBe(FormatGNU) + } + whyNoUSTAR = "USTAR does not support sparse files" + format.mustNotBe(FormatUSTAR) + } + + // Check desired format. + if wantFormat := h.Format; wantFormat != FormatUnknown { + if wantFormat.has(FormatPAX) && !preferPAX { + wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too + } + format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted + } + if format == FormatUnknown { + switch h.Format { + case FormatUSTAR: + err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU} + case FormatPAX: + err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU} + case FormatGNU: + err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX} + default: + err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU} + } + } + return format, paxHdrs, err +} + +var sysSparseDetect func(f *os.File) (sparseHoles, error) +var sysSparsePunch func(f *os.File, sph sparseHoles) error + +// DetectSparseHoles searches for holes within f to populate SparseHoles +// on supported operating systems and filesystems. +// The file offset is cleared to zero. +// +// When packing a sparse file, DetectSparseHoles should be called prior to +// serializing the header to the archive with Writer.WriteHeader. +func (h *Header) DetectSparseHoles(f *os.File) (err error) { + defer func() { + if _, serr := f.Seek(0, io.SeekStart); err == nil { + err = serr + } + }() + + h.SparseHoles = nil + if sysSparseDetect != nil { + sph, err := sysSparseDetect(f) + h.SparseHoles = sph + return err + } + return nil +} + +// PunchSparseHoles destroys the contents of f, and prepares a sparse file +// (on supported operating systems and filesystems) +// with holes punched according to SparseHoles. +// The file offset is cleared to zero. +// +// When extracting a sparse file, PunchSparseHoles should be called prior to +// populating the content of a file with Reader.WriteTo. +func (h *Header) PunchSparseHoles(f *os.File) (err error) { + defer func() { + if _, serr := f.Seek(0, io.SeekStart); err == nil { + err = serr + } + }() + + if err := f.Truncate(0); err != nil { + return err + } + + var size int64 + if len(h.SparseHoles) > 0 { + size = h.SparseHoles[len(h.SparseHoles)-1].endOffset() + } + if !validateSparseEntries(h.SparseHoles, size) { + return errors.New("tar: invalid sparse holes") + } + + if size == 0 { + return nil // For non-sparse files, do nothing (other than Truncate) + } + if sysSparsePunch != nil { + return sysSparsePunch(f, h.SparseHoles) + } + return f.Truncate(size) +} + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + mode |= os.ModeSticky + } + + // Set file mode bits; clear perm, setuid, setgid, and sticky bits. + switch m := os.FileMode(fi.h.Mode) &^ 07777; m { + case c_ISDIR: + mode |= os.ModeDir + case c_ISFIFO: + mode |= os.ModeNamedPipe + case c_ISLNK: + mode |= os.ModeSymlink + case c_ISBLK: + mode |= os.ModeDevice + case c_ISCHR: + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case c_ISSOCK: + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeSymlink: + mode |= os.ModeSymlink + case TypeChar: + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + mode |= os.ModeDevice + case TypeDir: + mode |= os.ModeDir + case TypeFifo: + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +const ( + // Mode constants from the USTAR spec: + // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + + // Common Unix mode constants; these are not defined in any common tar standard. + // Header.FileInfo understands these, but FileInfoHeader will never produce these. + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// +// Since os.FileInfo's Name method only returns the base name of +// the file it describes, it may be necessary to modify Header.Name +// to provide the full path name of the file. +// +// This function does not populate Header.SparseHoles; +// for sparse file support, additionally call Header.DetectSparseHoles. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Typeflag = TypeChar + } else { + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + case fm&os.ModeSocket != 0: + return nil, fmt.Errorf("tar: sockets not supported") + default: + return nil, fmt.Errorf("tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + // If possible, populate additional fields from OS-specific + // FileInfo fields. + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Use the + // original Header to populate all remaining fields. + h.Uid = sys.Uid + h.Gid = sys.Gid + h.Uname = sys.Uname + h.Gname = sys.Gname + h.AccessTime = sys.AccessTime + h.ChangeTime = sys.ChangeTime + if sys.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range sys.Xattrs { + h.Xattrs[k] = v + } + } + if sys.Typeflag == TypeLink { + // hard link + h.Typeflag = TypeLink + h.Size = 0 + h.Linkname = sys.Linkname + } + if sys.SparseHoles != nil { + h.SparseHoles = append([]SparseEntry{}, sys.SparseHoles...) + } + if sys.PAXRecords != nil { + h.PAXRecords = make(map[string]string) + for k, v := range sys.PAXRecords { + h.PAXRecords[k] = v + } + } + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +// isHeaderOnlyType checks if the given type flag is of the type that has no +// data section even if a size is specified. +func isHeaderOnlyType(flag byte) bool { + switch flag { + case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: + return true + default: + return false + } +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/dmcgowan/go-tar/format.go b/vendor/github.com/dmcgowan/go-tar/format.go new file mode 100644 index 000000000..cf1289534 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/format.go @@ -0,0 +1,301 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import "strings" + +// Format represents the tar archive format. +// +// The original tar format was introduced in Unix V7. +// Since then, there have been multiple competing formats attempting to +// standardize or extend the V7 format to overcome its limitations. +// The most common formats are the USTAR, PAX, and GNU formats, +// each with their own advantages and limitations. +// +// The following table captures the capabilities of each format: +// +// | USTAR | PAX | GNU +// ------------------+--------+-----------+---------- +// Name | 256B | unlimited | unlimited +// Linkname | 100B | unlimited | unlimited +// Size | uint33 | unlimited | uint89 +// Mode | uint21 | uint21 | uint57 +// Uid/Gid | uint21 | unlimited | uint57 +// Uname/Gname | 32B | unlimited | 32B +// ModTime | uint33 | unlimited | int89 +// AccessTime | n/a | unlimited | int89 +// ChangeTime | n/a | unlimited | int89 +// Devmajor/Devminor | uint21 | uint21 | uint57 +// ------------------+--------+-----------+---------- +// string encoding | ASCII | UTF-8 | binary +// sub-second times | no | yes | no +// sparse files | no | yes | yes +// +// The table's upper portion shows the Header fields, where each format reports +// the maximum number of bytes allowed for each string field and +// the integer type used to store each numeric field +// (where timestamps are stored as the number of seconds since the Unix epoch). +// +// The table's lower portion shows specialized features of each format, +// such as supported string encodings, support for sub-second timestamps, +// or support for sparse files. +type Format int + +// Constants to identify various tar formats. +const ( + // Deliberately hide the meaning of constants from public API. + _ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc... + + // FormatUnknown indicates that the format is unknown. + FormatUnknown + + // The format of the original Unix V7 tar tool prior to standardization. + formatV7 + + // FormatUSTAR represents the USTAR header format defined in POSIX.1-1988. + // + // While this format is compatible with most tar readers, + // the format has several limitations making it unsuitable for some usages. + // Most notably, it cannot support sparse files, files larger than 8GiB, + // filenames larger than 256 characters, and non-ASCII filenames. + // + // Reference: + // http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 + FormatUSTAR + + // FormatPAX represents the PAX header format defined in POSIX.1-2001. + // + // PAX extends USTAR by writing a special file with Typeflag TypeXHeader + // preceding the original header. This file contains a set of key-value + // records, which are used to overcome USTAR's shortcomings, in addition to + // providing the ability to have sub-second resolution for timestamps. + // + // Some newer formats add their own extensions to PAX by defining their + // own keys and assigning certain semantic meaning to the associated values. + // For example, sparse file support in PAX is implemented using keys + // defined by the GNU manual (e.g., "GNU.sparse.map"). + // + // Reference: + // http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html + FormatPAX + + // FormatGNU represents the GNU header format. + // + // The GNU header format is older than the USTAR and PAX standards and + // is not compatible with them. The GNU format supports + // arbitrary file sizes, filenames of arbitrary encoding and length, + // sparse files, and other features. + // + // It is recommended that PAX be chosen over GNU unless the target + // application can only parse GNU formatted archives. + // + // Reference: + // http://www.gnu.org/software/tar/manual/html_node/Standard.html + FormatGNU + + // Schily's tar format, which is incompatible with USTAR. + // This does not cover STAR extensions to the PAX format; these fall under + // the PAX format. + formatSTAR + + formatMax +) + +func (f Format) has(f2 Format) bool { return f&f2 != 0 } +func (f *Format) mayBe(f2 Format) { *f |= f2 } +func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 } +func (f *Format) mustNotBe(f2 Format) { *f &^= f2 } + +var formatNames = map[Format]string{ + formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR", +} + +func (f Format) String() string { + var ss []string + for f2 := Format(1); f2 < formatMax; f2 <<= 1 { + if f.has(f2) { + ss = append(ss, formatNames[f2]) + } + } + switch len(ss) { + case 0: + return "" + case 1: + return ss[0] + default: + return "(" + strings.Join(ss, " | ") + ")" + } +} + +// Magics used to identify various formats. +const ( + magicGNU, versionGNU = "ustar ", " \x00" + magicUSTAR, versionUSTAR = "ustar\x00", "00" + trailerSTAR = "tar\x00" +) + +// Size constants from various tar specifications. +const ( + blockSize = 512 // Size of each block in a tar stream + nameSize = 100 // Max length of the name field in USTAR format + prefixSize = 155 // Max length of the prefix field in USTAR format +) + +// blockPadding computes the number of bytes needed to pad offset up to the +// nearest block edge where 0 <= n < blockSize. +func blockPadding(offset int64) (n int64) { + return -offset & (blockSize - 1) +} + +var zeroBlock block + +type block [blockSize]byte + +// Convert block to any number of formats. +func (b *block) V7() *headerV7 { return (*headerV7)(b) } +func (b *block) GNU() *headerGNU { return (*headerGNU)(b) } +func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) } +func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) } +func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) } + +// GetFormat checks that the block is a valid tar header based on the checksum. +// It then attempts to guess the specific format based on magic values. +// If the checksum fails, then FormatUnknown is returned. +func (b *block) GetFormat() Format { + // Verify checksum. + var p parser + value := p.parseOctal(b.V7().Chksum()) + chksum1, chksum2 := b.ComputeChecksum() + if p.err != nil || (value != chksum1 && value != chksum2) { + return FormatUnknown + } + + // Guess the magic values. + magic := string(b.USTAR().Magic()) + version := string(b.USTAR().Version()) + trailer := string(b.STAR().Trailer()) + switch { + case magic == magicUSTAR && trailer == trailerSTAR: + return formatSTAR + case magic == magicUSTAR: + return FormatUSTAR | FormatPAX + case magic == magicGNU && version == versionGNU: + return FormatGNU + default: + return formatV7 + } +} + +// SetFormat writes the magic values necessary for specified format +// and then updates the checksum accordingly. +func (b *block) SetFormat(format Format) { + // Set the magic values. + switch { + case format.has(formatV7): + // Do nothing. + case format.has(FormatGNU): + copy(b.GNU().Magic(), magicGNU) + copy(b.GNU().Version(), versionGNU) + case format.has(formatSTAR): + copy(b.STAR().Magic(), magicUSTAR) + copy(b.STAR().Version(), versionUSTAR) + copy(b.STAR().Trailer(), trailerSTAR) + case format.has(FormatUSTAR | FormatPAX): + copy(b.USTAR().Magic(), magicUSTAR) + copy(b.USTAR().Version(), versionUSTAR) + default: + panic("invalid format") + } + + // Update checksum. + // This field is special in that it is terminated by a NULL then space. + var f formatter + field := b.V7().Chksum() + chksum, _ := b.ComputeChecksum() // Possible values are 256..128776 + f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143 + field[7] = ' ' +} + +// ComputeChecksum computes the checksum for the header block. +// POSIX specifies a sum of the unsigned byte values, but the Sun tar used +// signed byte values. +// We compute and return both. +func (b *block) ComputeChecksum() (unsigned, signed int64) { + for i, c := range b { + if 148 <= i && i < 156 { + c = ' ' // Treat the checksum field itself as all spaces. + } + unsigned += int64(uint8(c)) + signed += int64(int8(c)) + } + return unsigned, signed +} + +// Reset clears the block with all zeros. +func (b *block) Reset() { + *b = block{} +} + +type headerV7 [blockSize]byte + +func (h *headerV7) Name() []byte { return h[000:][:100] } +func (h *headerV7) Mode() []byte { return h[100:][:8] } +func (h *headerV7) UID() []byte { return h[108:][:8] } +func (h *headerV7) GID() []byte { return h[116:][:8] } +func (h *headerV7) Size() []byte { return h[124:][:12] } +func (h *headerV7) ModTime() []byte { return h[136:][:12] } +func (h *headerV7) Chksum() []byte { return h[148:][:8] } +func (h *headerV7) TypeFlag() []byte { return h[156:][:1] } +func (h *headerV7) LinkName() []byte { return h[157:][:100] } + +type headerGNU [blockSize]byte + +func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerGNU) Magic() []byte { return h[257:][:6] } +func (h *headerGNU) Version() []byte { return h[263:][:2] } +func (h *headerGNU) UserName() []byte { return h[265:][:32] } +func (h *headerGNU) GroupName() []byte { return h[297:][:32] } +func (h *headerGNU) DevMajor() []byte { return h[329:][:8] } +func (h *headerGNU) DevMinor() []byte { return h[337:][:8] } +func (h *headerGNU) AccessTime() []byte { return h[345:][:12] } +func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] } +func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) } +func (h *headerGNU) RealSize() []byte { return h[483:][:12] } + +type headerSTAR [blockSize]byte + +func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerSTAR) Magic() []byte { return h[257:][:6] } +func (h *headerSTAR) Version() []byte { return h[263:][:2] } +func (h *headerSTAR) UserName() []byte { return h[265:][:32] } +func (h *headerSTAR) GroupName() []byte { return h[297:][:32] } +func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] } +func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] } +func (h *headerSTAR) Prefix() []byte { return h[345:][:131] } +func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] } +func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] } +func (h *headerSTAR) Trailer() []byte { return h[508:][:4] } + +type headerUSTAR [blockSize]byte + +func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerUSTAR) Magic() []byte { return h[257:][:6] } +func (h *headerUSTAR) Version() []byte { return h[263:][:2] } +func (h *headerUSTAR) UserName() []byte { return h[265:][:32] } +func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] } +func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] } +func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] } +func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] } + +type sparseArray []byte + +func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) } +func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] } +func (s sparseArray) MaxEntries() int { return len(s) / 24 } + +type sparseElem []byte + +func (s sparseElem) Offset() []byte { return s[00:][:12] } +func (s sparseElem) Length() []byte { return s[12:][:12] } diff --git a/vendor/github.com/dmcgowan/go-tar/reader.go b/vendor/github.com/dmcgowan/go-tar/reader.go new file mode 100644 index 000000000..1d0673020 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/reader.go @@ -0,0 +1,852 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "io" + "io/ioutil" + "strconv" + "strings" + "time" +) + +// Reader provides sequential access to the contents of a tar archive. +// Reader.Next advances to the next file in the archive (including the first), +// and then Reader can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + pad int64 // Amount of padding (ignored) after current file entry + curr fileReader // Reader for current file entry + blk block // Buffer to use as temporary local storage + + // err is a persistent error. + // It is only the responsibility of every exported method of Reader to + // ensure that this error is sticky. + err error +} + +type fileReader interface { + io.Reader + fileState + + WriteTo(io.Writer) (int64, error) +} + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { + return &Reader{r: r, curr: ®FileReader{r, 0}} +} + +// Next advances to the next entry in the tar archive. +// The Header.Size determines how many bytes can be read for the next file. +// Any remaining data in the current file is automatically discarded. +// +// io.EOF is returned at the end of the input. +func (tr *Reader) Next() (*Header, error) { + if tr.err != nil { + return nil, tr.err + } + hdr, err := tr.next() + tr.err = err + return hdr, err +} + +func (tr *Reader) next() (*Header, error) { + var paxHdrs map[string]string + var gnuLongName, gnuLongLink string + + // Externally, Next iterates through the tar archive as if it is a series of + // files. Internally, the tar format often uses fake "files" to add meta + // data that describes the next file. These meta data "files" should not + // normally be visible to the outside. As such, this loop iterates through + // one or more "header files" until it finds a "normal file". + format := FormatUSTAR | FormatPAX | FormatGNU +loop: + for { + // Discard the remainder of the file and any padding. + if err := discard(tr.r, tr.curr.PhysicalRemaining()); err != nil { + return nil, err + } + if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil { + return nil, err + } + tr.pad = 0 + + hdr, rawHdr, err := tr.readHeader() + if err != nil { + return nil, err + } + if err := tr.handleRegularFile(hdr); err != nil { + return nil, err + } + format.mayOnlyBe(hdr.Format) + + // Check for PAX/GNU special headers and files. + switch hdr.Typeflag { + case TypeXHeader, TypeXGlobalHeader: + format.mayOnlyBe(FormatPAX) + paxHdrs, err = parsePAX(tr) + if err != nil { + return nil, err + } + if hdr.Typeflag == TypeXGlobalHeader { + mergePAX(hdr, paxHdrs) + return &Header{ + Typeflag: hdr.Typeflag, + Xattrs: hdr.Xattrs, + PAXRecords: hdr.PAXRecords, + Format: format, + }, nil + } + continue loop // This is a meta header affecting the next header + case TypeGNULongName, TypeGNULongLink: + format.mayOnlyBe(FormatGNU) + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + + var p parser + switch hdr.Typeflag { + case TypeGNULongName: + gnuLongName = p.parseString(realname) + case TypeGNULongLink: + gnuLongLink = p.parseString(realname) + } + continue loop // This is a meta header affecting the next header + default: + // The old GNU sparse format is handled here since it is technically + // just a regular file with additional attributes. + + if err := mergePAX(hdr, paxHdrs); err != nil { + return nil, err + } + if gnuLongName != "" { + hdr.Name = gnuLongName + } + if gnuLongLink != "" { + hdr.Linkname = gnuLongLink + } + if hdr.Typeflag == TypeRegA && strings.HasSuffix(hdr.Name, "/") { + hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories + } + + // The extended headers may have updated the size. + // Thus, setup the regFileReader again after merging PAX headers. + if err := tr.handleRegularFile(hdr); err != nil { + return nil, err + } + + // Sparse formats rely on being able to read from the logical data + // section; there must be a preceding call to handleRegularFile. + if err := tr.handleSparseFile(hdr, rawHdr); err != nil { + return nil, err + } + + // Set the final guess at the format. + if format.has(FormatUSTAR) && format.has(FormatPAX) { + format.mayOnlyBe(FormatUSTAR) + } + hdr.Format = format + return hdr, nil // This is a file, so stop + } + } +} + +// handleRegularFile sets up the current file reader and padding such that it +// can only read the following logical data section. It will properly handle +// special headers that contain no data section. +func (tr *Reader) handleRegularFile(hdr *Header) error { + nb := hdr.Size + if isHeaderOnlyType(hdr.Typeflag) { + nb = 0 + } + if nb < 0 { + return ErrHeader + } + + tr.pad = blockPadding(nb) + tr.curr = ®FileReader{r: tr.r, nb: nb} + return nil +} + +// handleSparseFile checks if the current file is a sparse format of any type +// and sets the curr reader appropriately. +func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error { + var spd sparseDatas + var err error + if hdr.Typeflag == TypeGNUSparse { + spd, err = tr.readOldGNUSparseMap(hdr, rawHdr) + } else { + spd, err = tr.readGNUSparsePAXHeaders(hdr) + } + + // If sp is non-nil, then this is a sparse file. + // Note that it is possible for len(sp) == 0. + if err == nil && spd != nil { + if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) { + return ErrHeader + } + sph := invertSparseEntries(spd, hdr.Size) + tr.curr = &sparseFileReader{tr.curr, sph, 0} + hdr.SparseHoles = append([]SparseEntry{}, sph...) + } + return err +} + +// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. +// If they are found, then this function reads the sparse map and returns it. +// This assumes that 0.0 headers have already been converted to 0.1 headers +// by the the PAX header parsing logic. +func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) { + // Identify the version of GNU headers. + var is1x0 bool + major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor] + switch { + case major == "0" && (minor == "0" || minor == "1"): + is1x0 = false + case major == "1" && minor == "0": + is1x0 = true + case major != "" || minor != "": + return nil, nil // Unknown GNU sparse PAX version + case hdr.PAXRecords[paxGNUSparseMap] != "": + is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess + default: + return nil, nil // Not a PAX format GNU sparse file. + } + hdr.Format.mayOnlyBe(FormatPAX) + + // Update hdr from GNU sparse PAX headers. + if name := hdr.PAXRecords[paxGNUSparseName]; name != "" { + hdr.Name = name + } + size := hdr.PAXRecords[paxGNUSparseSize] + if size == "" { + size = hdr.PAXRecords[paxGNUSparseRealSize] + } + if size != "" { + n, err := strconv.ParseInt(size, 10, 64) + if err != nil { + return nil, ErrHeader + } + hdr.Size = n + } + + // Read the sparse map according to the appropriate format. + if is1x0 { + return readGNUSparseMap1x0(tr.curr) + } + return readGNUSparseMap0x1(hdr.PAXRecords) +} + +// mergePAX merges paxHdrs into hdr for all relevant fields of Header. +func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { + for k, v := range paxHdrs { + if v == "" { + continue // Keep the original USTAR value + } + var id64 int64 + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxUname: + hdr.Uname = v + case paxGname: + hdr.Gname = v + case paxUid: + id64, err = strconv.ParseInt(v, 10, 64) + hdr.Uid = int(id64) // Integer overflow possible + case paxGid: + id64, err = strconv.ParseInt(v, 10, 64) + hdr.Gid = int(id64) // Integer overflow possible + case paxAtime: + hdr.AccessTime, err = parsePAXTime(v) + case paxMtime: + hdr.ModTime, err = parsePAXTime(v) + case paxCtime: + hdr.ChangeTime, err = parsePAXTime(v) + case paxSize: + hdr.Size, err = strconv.ParseInt(v, 10, 64) + default: + if strings.HasPrefix(k, paxSchilyXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxSchilyXattr):]] = v + } + } + if err != nil { + return ErrHeader + } + } + hdr.PAXRecords = paxHdrs + return nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + sbuf := string(buf) + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into format 0.1 + // headers since 0.0 headers were not PAX compliant. + var sparseMap []string + + paxHdrs := make(map[string]string) + for len(sbuf) > 0 { + key, value, residual, err := parsePAXRecord(sbuf) + if err != nil { + return nil, ErrHeader + } + sbuf = residual + + switch key { + case paxGNUSparseOffset, paxGNUSparseNumBytes: + // Validate sparse header order and value. + if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) || + (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) || + strings.Contains(value, ",") { + return nil, ErrHeader + } + sparseMap = append(sparseMap, value) + default: + paxHdrs[key] = value + } + } + if len(sparseMap) > 0 { + paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") + } + return paxHdrs, nil +} + +// readHeader reads the next block header and assumes that the underlying reader +// is already aligned to a block boundary. It returns the raw block of the +// header in case further processing is required. +// +// The err will be set to io.EOF only when one of the following occurs: +// * Exactly 0 bytes are read and EOF is hit. +// * Exactly 1 block of zeros is read and EOF is hit. +// * At least 2 blocks of zeros are read. +func (tr *Reader) readHeader() (*Header, *block, error) { + // Two blocks of zero bytes marks the end of the archive. + if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil { + return nil, nil, err // EOF is okay here; exactly 0 bytes read + } + if bytes.Equal(tr.blk[:], zeroBlock[:]) { + if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil { + return nil, nil, err // EOF is okay here; exactly 1 block of zeros read + } + if bytes.Equal(tr.blk[:], zeroBlock[:]) { + return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read + } + return nil, nil, ErrHeader // Zero block and then non-zero block + } + + // Verify the header matches a known format. + format := tr.blk.GetFormat() + if format == FormatUnknown { + return nil, nil, ErrHeader + } + + var p parser + hdr := new(Header) + + // Unpack the V7 header. + v7 := tr.blk.V7() + hdr.Typeflag = v7.TypeFlag()[0] + hdr.Name = p.parseString(v7.Name()) + hdr.Linkname = p.parseString(v7.LinkName()) + hdr.Size = p.parseNumeric(v7.Size()) + hdr.Mode = p.parseNumeric(v7.Mode()) + hdr.Uid = int(p.parseNumeric(v7.UID())) + hdr.Gid = int(p.parseNumeric(v7.GID())) + hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0) + + // Unpack format specific fields. + if format > formatV7 { + ustar := tr.blk.USTAR() + hdr.Uname = p.parseString(ustar.UserName()) + hdr.Gname = p.parseString(ustar.GroupName()) + hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) + hdr.Devminor = p.parseNumeric(ustar.DevMinor()) + + var prefix string + switch { + case format.has(FormatUSTAR | FormatPAX): + hdr.Format = format + ustar := tr.blk.USTAR() + prefix = p.parseString(ustar.Prefix()) + + // For Format detection, check if block is properly formatted since + // the parser is more liberal than what USTAR actually permits. + notASCII := func(r rune) bool { return r >= 0x80 } + if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 { + hdr.Format = FormatUnknown // Non-ASCII characters in block. + } + nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 } + if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) && + nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) { + hdr.Format = FormatUnknown // Numeric fields must end in NUL + } + case format.has(formatSTAR): + star := tr.blk.STAR() + prefix = p.parseString(star.Prefix()) + hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) + hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) + case format.has(FormatGNU): + hdr.Format = format + var p2 parser + gnu := tr.blk.GNU() + if b := gnu.AccessTime(); b[0] != 0 { + hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0) + } + if b := gnu.ChangeTime(); b[0] != 0 { + hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0) + } + + // Prior to Go1.8, the Writer had a bug where it would output + // an invalid tar file in certain rare situations because the logic + // incorrectly believed that the old GNU format had a prefix field. + // This is wrong and leads to an output file that mangles the + // atime and ctime fields, which are often left unused. + // + // In order to continue reading tar files created by former, buggy + // versions of Go, we skeptically parse the atime and ctime fields. + // If we are unable to parse them and the prefix field looks like + // an ASCII string, then we fallback on the pre-Go1.8 behavior + // of treating these fields as the USTAR prefix field. + // + // Note that this will not use the fallback logic for all possible + // files generated by a pre-Go1.8 toolchain. If the generated file + // happened to have a prefix field that parses as valid + // atime and ctime fields (e.g., when they are valid octal strings), + // then it is impossible to distinguish between an valid GNU file + // and an invalid pre-Go1.8 file. + // + // See https://golang.org/issues/12594 + // See https://golang.org/issues/21005 + if p2.err != nil { + hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{} + ustar := tr.blk.USTAR() + if s := p.parseString(ustar.Prefix()); isASCII(s) { + prefix = s + } + hdr.Format = FormatUnknown // Buggy file is not GNU + } + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + return hdr, &tr.blk, p.err +} + +// readOldGNUSparseMap reads the sparse map from the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. +// If it's larger than four entries, then one or more extension headers are used +// to store the rest of the sparse map. +// +// The Header.Size does not reflect the size of any extended headers used. +// Thus, this function will read from the raw io.Reader to fetch extra headers. +// This method mutates blk in the process. +func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) { + // Make sure that the input format is GNU. + // Unfortunately, the STAR format also has a sparse header format that uses + // the same type flag but has a completely different layout. + if blk.GetFormat() != FormatGNU { + return nil, ErrHeader + } + hdr.Format.mayOnlyBe(FormatGNU) + + var p parser + hdr.Size = p.parseNumeric(blk.GNU().RealSize()) + if p.err != nil { + return nil, p.err + } + s := blk.GNU().Sparse() + spd := make(sparseDatas, 0, s.MaxEntries()) + for { + for i := 0; i < s.MaxEntries(); i++ { + // This termination condition is identical to GNU and BSD tar. + if s.Entry(i).Offset()[0] == 0x00 { + break // Don't return, need to process extended headers (even if empty) + } + offset := p.parseNumeric(s.Entry(i).Offset()) + length := p.parseNumeric(s.Entry(i).Length()) + if p.err != nil { + return nil, p.err + } + spd = append(spd, SparseEntry{Offset: offset, Length: length}) + } + + if s.IsExtended()[0] > 0 { + // There are more entries. Read an extension header and parse its entries. + if _, err := mustReadFull(tr.r, blk[:]); err != nil { + return nil, err + } + s = blk.Sparse() + continue + } + return spd, nil // Done + } +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format +// version 1.0. The format of the sparse map consists of a series of +// newline-terminated numeric fields. The first field is the number of entries +// and is always present. Following this are the entries, consisting of two +// fields (offset, length). This function must stop reading at the end +// boundary of the block containing the last newline. +// +// Note that the GNU manual says that numeric values should be encoded in octal +// format. However, the GNU tar utility itself outputs these values in decimal. +// As such, this library treats values as being encoded in decimal. +func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { + var ( + cntNewline int64 + buf bytes.Buffer + blk block + ) + + // feedTokens copies data in blocks from r into buf until there are + // at least cnt newlines in buf. It will not read more blocks than needed. + feedTokens := func(n int64) error { + for cntNewline < n { + if _, err := mustReadFull(r, blk[:]); err != nil { + return err + } + buf.Write(blk[:]) + for _, c := range blk { + if c == '\n' { + cntNewline++ + } + } + } + return nil + } + + // nextToken gets the next token delimited by a newline. This assumes that + // at least one newline exists in the buffer. + nextToken := func() string { + cntNewline-- + tok, _ := buf.ReadString('\n') + return strings.TrimRight(tok, "\n") + } + + // Parse for the number of entries. + // Use integer overflow resistant math to check this. + if err := feedTokens(1); err != nil { + return nil, err + } + numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // Parse for all member entries. + // numEntries is trusted after this since a potential attacker must have + // committed resources proportional to what this library used. + if err := feedTokens(2 * numEntries); err != nil { + return nil, err + } + spd := make(sparseDatas, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err1 := strconv.ParseInt(nextToken(), 10, 64) + length, err2 := strconv.ParseInt(nextToken(), 10, 64) + if err1 != nil || err2 != nil { + return nil, ErrHeader + } + spd = append(spd, SparseEntry{Offset: offset, Length: length}) + } + return spd, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format +// version 0.1. The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) { + // Get number of entries. + // Use integer overflow resistant math to check this. + numEntriesStr := paxHdrs[paxGNUSparseNumBlocks] + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // There should be two numbers in sparseMap for each entry. + sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",") + if len(sparseMap) == 1 && sparseMap[0] == "" { + sparseMap = sparseMap[:0] + } + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map. + // numEntries is trusted now. + spd := make(sparseDatas, 0, numEntries) + for len(sparseMap) >= 2 { + offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64) + length, err2 := strconv.ParseInt(sparseMap[1], 10, 64) + if err1 != nil || err2 != nil { + return nil, ErrHeader + } + spd = append(spd, SparseEntry{Offset: offset, Length: length}) + sparseMap = sparseMap[2:] + } + return spd, nil +} + +// Read reads from the current file in the tar archive. +// It returns (0, io.EOF) when it reaches the end of that file, +// until Next is called to advance to the next file. +// +// If the current file is sparse, then the regions marked as a hole +// are read back as NUL-bytes. +// +// Calling Read on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what +// the Header.Size claims. +func (tr *Reader) Read(b []byte) (int, error) { + if tr.err != nil { + return 0, tr.err + } + n, err := tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return n, err +} + +// WriteTo writes the content of the current file to w. +// The bytes written matches the number of remaining bytes in the current file. +// +// If the current file is sparse and w is an io.WriteSeeker, +// then WriteTo uses Seek to skip past holes defined in Header.SparseHoles, +// assuming that skipped regions are filled with NULs. +// This always writes the last byte to ensure w is the right size. +func (tr *Reader) WriteTo(w io.Writer) (int64, error) { + if tr.err != nil { + return 0, tr.err + } + n, err := tr.curr.WriteTo(w) + if err != nil { + tr.err = err + } + return n, err +} + +// regFileReader is a fileReader for reading data from a regular file entry. +type regFileReader struct { + r io.Reader // Underlying Reader + nb int64 // Number of remaining bytes to read +} + +func (fr *regFileReader) Read(b []byte) (n int, err error) { + if int64(len(b)) > fr.nb { + b = b[:fr.nb] + } + if len(b) > 0 { + n, err = fr.r.Read(b) + fr.nb -= int64(n) + } + switch { + case err == io.EOF && fr.nb > 0: + return n, io.ErrUnexpectedEOF + case err == nil && fr.nb == 0: + return n, io.EOF + default: + return n, err + } +} + +func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) { + return io.Copy(w, struct{ io.Reader }{fr}) +} + +func (fr regFileReader) LogicalRemaining() int64 { + return fr.nb +} + +func (fr regFileReader) PhysicalRemaining() int64 { + return fr.nb +} + +// sparseFileReader is a fileReader for reading data from a sparse file entry. +type sparseFileReader struct { + fr fileReader // Underlying fileReader + sp sparseHoles // Normalized list of sparse holes + pos int64 // Current position in sparse file +} + +func (sr *sparseFileReader) Read(b []byte) (n int, err error) { + finished := int64(len(b)) >= sr.LogicalRemaining() + if finished { + b = b[:sr.LogicalRemaining()] + } + + b0 := b + endPos := sr.pos + int64(len(b)) + for endPos > sr.pos && err == nil { + var nf int // Bytes read in fragment + holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() + if sr.pos < holeStart { // In a data fragment + bf := b[:min(int64(len(b)), holeStart-sr.pos)] + nf, err = tryReadFull(sr.fr, bf) + } else { // In a hole fragment + bf := b[:min(int64(len(b)), holeEnd-sr.pos)] + nf, err = tryReadFull(zeroReader{}, bf) + } + b = b[nf:] + sr.pos += int64(nf) + if sr.pos >= holeEnd && len(sr.sp) > 1 { + sr.sp = sr.sp[1:] // Ensure last fragment always remains + } + } + + n = len(b0) - len(b) + switch { + case err == io.EOF: + return n, errMissData // Less data in dense file than sparse file + case err != nil: + return n, err + case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: + return n, errUnrefData // More data in dense file than sparse file + case finished: + return n, io.EOF + default: + return n, nil + } +} + +func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) { + ws, ok := w.(io.WriteSeeker) + if ok { + if _, err := ws.Seek(0, io.SeekCurrent); err != nil { + ok = false // Not all io.Seeker can really seek + } + } + if !ok { + return io.Copy(w, struct{ io.Reader }{sr}) + } + + var writeLastByte bool + pos0 := sr.pos + for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil { + var nf int64 // Size of fragment + holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() + if sr.pos < holeStart { // In a data fragment + nf = holeStart - sr.pos + nf, err = io.CopyN(ws, sr.fr, nf) + } else { // In a hole fragment + nf = holeEnd - sr.pos + if sr.PhysicalRemaining() == 0 { + writeLastByte = true + nf-- + } + _, err = ws.Seek(nf, io.SeekCurrent) + } + sr.pos += nf + if sr.pos >= holeEnd && len(sr.sp) > 1 { + sr.sp = sr.sp[1:] // Ensure last fragment always remains + } + } + + // If the last fragment is a hole, then seek to 1-byte before EOF, and + // write a single byte to ensure the file is the right size. + if writeLastByte && err == nil { + _, err = ws.Write([]byte{0}) + sr.pos++ + } + + n = sr.pos - pos0 + switch { + case err == io.EOF: + return n, errMissData // Less data in dense file than sparse file + case err != nil: + return n, err + case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: + return n, errUnrefData // More data in dense file than sparse file + default: + return n, nil + } +} + +func (sr sparseFileReader) LogicalRemaining() int64 { + return sr.sp[len(sr.sp)-1].endOffset() - sr.pos +} +func (sr sparseFileReader) PhysicalRemaining() int64 { + return sr.fr.PhysicalRemaining() +} + +type zeroReader struct{} + +func (zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +// mustReadFull is like io.ReadFull except it returns +// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read. +func mustReadFull(r io.Reader, b []byte) (int, error) { + n, err := tryReadFull(r, b) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err +} + +// tryReadFull is like io.ReadFull except it returns +// io.EOF when it is hit before len(b) bytes are read. +func tryReadFull(r io.Reader, b []byte) (n int, err error) { + for len(b) > n && err == nil { + var nn int + nn, err = r.Read(b[n:]) + n += nn + } + if len(b) == n && err == io.EOF { + err = nil + } + return n, err +} + +// discard skips n bytes in r, reporting an error if unable to do so. +func discard(r io.Reader, n int64) error { + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + var seekSkipped int64 // Number of bytes skipped via Seek + if sr, ok := r.(io.Seeker); ok && n > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, io.SeekCurrent) + if pos1 >= 0 && err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(n-1, io.SeekCurrent) + if pos2 < 0 || err != nil { + return err + } + seekSkipped = pos2 - pos1 + } + } + + copySkipped, err := io.CopyN(ioutil.Discard, r, n-seekSkipped) + if err == io.EOF && seekSkipped+copySkipped < n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/dmcgowan/go-tar/sparse_unix.go b/vendor/github.com/dmcgowan/go-tar/sparse_unix.go new file mode 100644 index 000000000..c623c1ee4 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/sparse_unix.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "io" + "os" + "runtime" + "syscall" +) + +func init() { + sysSparseDetect = sparseDetectUnix +} + +func sparseDetectUnix(f *os.File) (sph sparseHoles, err error) { + // SEEK_DATA and SEEK_HOLE originated from Solaris and support for it + // has been added to most of the other major Unix systems. + var seekData, seekHole = 3, 4 // SEEK_DATA/SEEK_HOLE from unistd.h + + if runtime.GOOS == "darwin" { + // Darwin has the constants swapped, compared to all other UNIX. + seekData, seekHole = 4, 3 + } + + // Check for seekData/seekHole support. + // Different OS and FS may differ in the exact errno that is returned when + // there is no support. Rather than special-casing every possible errno + // representing "not supported", just assume that a non-nil error means + // that seekData/seekHole is not supported. + if _, err := f.Seek(0, seekHole); err != nil { + return nil, nil + } + + // Populate the SparseHoles. + var last, pos int64 = -1, 0 + for { + // Get the location of the next hole section. + if pos, err = fseek(f, pos, seekHole); pos == last || err != nil { + return sph, err + } + offset := pos + last = pos + + // Get the location of the next data section. + if pos, err = fseek(f, pos, seekData); pos == last || err != nil { + return sph, err + } + length := pos - offset + last = pos + + if length > 0 { + sph = append(sph, SparseEntry{offset, length}) + } + } +} + +func fseek(f *os.File, pos int64, whence int) (int64, error) { + pos, err := f.Seek(pos, whence) + if errno(err) == syscall.ENXIO { + // SEEK_DATA returns ENXIO when past the last data fragment, + // which makes determining the size of the last hole difficult. + pos, err = f.Seek(0, io.SeekEnd) + } + return pos, err +} + +func errno(err error) error { + if perr, ok := err.(*os.PathError); ok { + return perr.Err + } + return err +} diff --git a/vendor/github.com/dmcgowan/go-tar/sparse_windows.go b/vendor/github.com/dmcgowan/go-tar/sparse_windows.go new file mode 100644 index 000000000..05bf1a90b --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/sparse_windows.go @@ -0,0 +1,129 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package tar + +import ( + "os" + "syscall" + "unsafe" +) + +var errInvalidFunc = syscall.Errno(1) // ERROR_INVALID_FUNCTION from WinError.h + +func init() { + sysSparseDetect = sparseDetectWindows + sysSparsePunch = sparsePunchWindows +} + +func sparseDetectWindows(f *os.File) (sph sparseHoles, err error) { + const queryAllocRanges = 0x000940CF // FSCTL_QUERY_ALLOCATED_RANGES from WinIoCtl.h + type allocRangeBuffer struct{ offset, length int64 } // FILE_ALLOCATED_RANGE_BUFFER from WinIoCtl.h + + s, err := f.Stat() + if err != nil { + return nil, err + } + + queryRange := allocRangeBuffer{0, s.Size()} + allocRanges := make([]allocRangeBuffer, 64) + + // Repeatedly query for ranges until the input buffer is large enough. + var bytesReturned uint32 + for { + err := syscall.DeviceIoControl( + syscall.Handle(f.Fd()), queryAllocRanges, + (*byte)(unsafe.Pointer(&queryRange)), uint32(unsafe.Sizeof(queryRange)), + (*byte)(unsafe.Pointer(&allocRanges[0])), uint32(len(allocRanges)*int(unsafe.Sizeof(allocRanges[0]))), + &bytesReturned, nil, + ) + if err == syscall.ERROR_MORE_DATA { + allocRanges = make([]allocRangeBuffer, 2*len(allocRanges)) + continue + } + if err == errInvalidFunc { + return nil, nil // Sparse file not supported on this FS + } + if err != nil { + return nil, err + } + break + } + n := bytesReturned / uint32(unsafe.Sizeof(allocRanges[0])) + allocRanges = append(allocRanges[:n], allocRangeBuffer{s.Size(), 0}) + + // Invert the data fragments into hole fragments. + var pos int64 + for _, r := range allocRanges { + if r.offset > pos { + sph = append(sph, SparseEntry{pos, r.offset - pos}) + } + pos = r.offset + r.length + } + return sph, nil +} + +func sparsePunchWindows(f *os.File, sph sparseHoles) error { + const setSparse = 0x000900C4 // FSCTL_SET_SPARSE from WinIoCtl.h + const setZeroData = 0x000980C8 // FSCTL_SET_ZERO_DATA from WinIoCtl.h + type zeroDataInfo struct{ start, end int64 } // FILE_ZERO_DATA_INFORMATION from WinIoCtl.h + + // Set the file as being sparse. + var bytesReturned uint32 + devErr := syscall.DeviceIoControl( + syscall.Handle(f.Fd()), setSparse, + nil, 0, nil, 0, + &bytesReturned, nil, + ) + if devErr != nil && devErr != errInvalidFunc { + return devErr + } + + // Set the file to the right size. + var size int64 + if len(sph) > 0 { + size = sph[len(sph)-1].endOffset() + } + if err := f.Truncate(size); err != nil { + return err + } + if devErr == errInvalidFunc { + // Sparse file not supported on this FS. + // Call sparsePunchManual since SetEndOfFile does not guarantee that + // the extended space is filled with zeros. + return sparsePunchManual(f, sph) + } + + // Punch holes for all relevant fragments. + for _, s := range sph { + zdi := zeroDataInfo{s.Offset, s.endOffset()} + err := syscall.DeviceIoControl( + syscall.Handle(f.Fd()), setZeroData, + (*byte)(unsafe.Pointer(&zdi)), uint32(unsafe.Sizeof(zdi)), + nil, 0, + &bytesReturned, nil, + ) + if err != nil { + return err + } + } + return nil +} + +// sparsePunchManual writes zeros into each hole. +func sparsePunchManual(f *os.File, sph sparseHoles) error { + const chunkSize = 32 << 10 + zbuf := make([]byte, chunkSize) + for _, s := range sph { + for pos := s.Offset; pos < s.endOffset(); pos += chunkSize { + n := min(chunkSize, s.endOffset()-pos) + if _, err := f.WriteAt(zbuf[:n], pos); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/dmcgowan/go-tar/stat_actime1.go b/vendor/github.com/dmcgowan/go-tar/stat_actime1.go new file mode 100644 index 000000000..cf9cc79c5 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/stat_actime1.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/vendor/github.com/dmcgowan/go-tar/stat_actime2.go b/vendor/github.com/dmcgowan/go-tar/stat_actime2.go new file mode 100644 index 000000000..6f17dbe30 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/stat_actime2.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/vendor/github.com/dmcgowan/go-tar/stat_unix.go b/vendor/github.com/dmcgowan/go-tar/stat_unix.go new file mode 100644 index 000000000..868105f33 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/stat_unix.go @@ -0,0 +1,96 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "os/user" + "runtime" + "strconv" + "sync" + "syscall" +) + +func init() { + sysStat = statUnix +} + +// userMap and groupMap caches UID and GID lookups for performance reasons. +// The downside is that renaming uname or gname by the OS never takes effect. +var userMap, groupMap sync.Map // map[int]string + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + + // Best effort at populating Uname and Gname. + // The os/user functions may fail for any number of reasons + // (not implemented on that platform, cgo not enabled, etc). + if u, ok := userMap.Load(h.Uid); ok { + h.Uname = u.(string) + } else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil { + h.Uname = u.Username + userMap.Store(h.Uid, h.Uname) + } + if g, ok := groupMap.Load(h.Gid); ok { + h.Gname = g.(string) + } else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil { + h.Gname = g.Name + groupMap.Store(h.Gid, h.Gname) + } + + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + + // Best effort at populating Devmajor and Devminor. + if h.Typeflag == TypeChar || h.Typeflag == TypeBlock { + dev := uint64(sys.Rdev) // May be int32 or uint32 + switch runtime.GOOS { + case "linux": + // Copied from golang.org/x/sys/unix/dev_linux.go. + major := uint32((dev & 0x00000000000fff00) >> 8) + major |= uint32((dev & 0xfffff00000000000) >> 32) + minor := uint32((dev & 0x00000000000000ff) >> 0) + minor |= uint32((dev & 0x00000ffffff00000) >> 12) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "darwin": + // Copied from golang.org/x/sys/unix/dev_darwin.go. + major := uint32((dev >> 24) & 0xff) + minor := uint32(dev & 0xffffff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "dragonfly": + // Copied from golang.org/x/sys/unix/dev_dragonfly.go. + major := uint32((dev >> 8) & 0xff) + minor := uint32(dev & 0xffff00ff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "freebsd": + // Copied from golang.org/x/sys/unix/dev_freebsd.go. + major := uint32((dev >> 8) & 0xff) + minor := uint32(dev & 0xffff00ff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "netbsd": + // Copied from golang.org/x/sys/unix/dev_netbsd.go. + major := uint32((dev & 0x000fff00) >> 8) + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xfff00000) >> 12) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "openbsd": + // Copied from golang.org/x/sys/unix/dev_openbsd.go. + major := uint32((dev & 0x0000ff00) >> 8) + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xffff0000) >> 8) + h.Devmajor, h.Devminor = int64(major), int64(minor) + default: + // TODO: Implement solaris (see https://golang.org/issue/8106) + } + } + return nil +} diff --git a/vendor/github.com/dmcgowan/go-tar/strconv.go b/vendor/github.com/dmcgowan/go-tar/strconv.go new file mode 100644 index 000000000..8bbd65cd1 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/strconv.go @@ -0,0 +1,326 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "time" +) + +// hasNUL reports whether the NUL character exists within s. +func hasNUL(s string) bool { + return strings.IndexByte(s, 0) >= 0 +} + +// isASCII reports whether the input is an ASCII C-style string. +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 || c == 0x00 { + return false + } + } + return true +} + +// toASCII converts the input to an ASCII C-style string. +// This a best effort conversion, so invalid characters are dropped. +func toASCII(s string) string { + if isASCII(s) { + return s + } + b := make([]byte, 0, len(s)) + for _, c := range s { + if c < 0x80 && c != 0x00 { + b = append(b, byte(c)) + } + } + return string(b) +} + +type parser struct { + err error // Last error seen +} + +type formatter struct { + err error // Last error seen +} + +// parseString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func (*parser) parseString(b []byte) string { + if i := bytes.IndexByte(b, 0); i >= 0 { + return string(b[:i]) + } + return string(b) +} + +// formatString copies s into b, NUL-terminating if possible. +func (f *formatter) formatString(b []byte, s string) { + if len(s) > len(b) { + f.err = ErrFieldTooLong + } + copy(b, s) + if len(s) < len(b) { + b[len(s)] = 0 + } + + // Some buggy readers treat regular files with a trailing slash + // in the V7 path field as a directory even though the full path + // recorded elsewhere (e.g., via PAX record) contains no trailing slash. + if len(s) > len(b) && b[len(b)-1] == '/' { + n := len(strings.TrimRight(s[:len(b)], "/")) + b[n] = 0 // Replace trailing slash with NUL terminator + } +} + +// fitsInBase256 reports whether x can be encoded into n bytes using base-256 +// encoding. Unlike octal encoding, base-256 encoding does not require that the +// string ends with a NUL character. Thus, all n bytes are available for output. +// +// If operating in binary mode, this assumes strict GNU binary mode; which means +// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is +// equivalent to the sign bit in two's complement form. +func fitsInBase256(n int, x int64) bool { + binBits := uint(n-1) * 8 + return n >= 9 || (x >= -1< 0 && b[0]&0x80 != 0 { + // Handling negative numbers relies on the following identity: + // -a-1 == ^a + // + // If the number is negative, we use an inversion mask to invert the + // data bytes and treat the value as an unsigned number. + var inv byte // 0x00 if positive or zero, 0xff if negative + if b[0]&0x40 != 0 { + inv = 0xff + } + + var x uint64 + for i, c := range b { + c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing + if i == 0 { + c &= 0x7f // Ignore signal bit in first byte + } + if (x >> 56) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + x = x<<8 | uint64(c) + } + if (x >> 63) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + if inv == 0xff { + return ^int64(x) + } + return int64(x) + } + + // Normal case is base-8 (octal) format. + return p.parseOctal(b) +} + +// formatNumeric encodes x into b using base-8 (octal) encoding if possible. +// Otherwise it will attempt to use base-256 (binary) encoding. +func (f *formatter) formatNumeric(b []byte, x int64) { + if fitsInOctal(len(b), x) { + f.formatOctal(b, x) + return + } + + if fitsInBase256(len(b), x) { + for i := len(b) - 1; i >= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // Highest bit indicates binary format + return + } + + f.formatOctal(b, 0) // Last resort, just write zero + f.err = ErrFieldTooLong +} + +func (p *parser) parseOctal(b []byte) int64 { + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, perr := strconv.ParseUint(p.parseString(b), 8, 64) + if perr != nil { + p.err = ErrHeader + } + return int64(x) +} + +func (f *formatter) formatOctal(b []byte, x int64) { + if !fitsInOctal(len(b), x) { + x = 0 // Last resort, just write zero + f.err = ErrFieldTooLong + } + + s := strconv.FormatInt(x, 8) + // Add leading zeros, but leave room for a NUL. + if n := len(b) - len(s) - 1; n > 0 { + s = strings.Repeat("0", n) + s + } + f.formatString(b, s) +} + +// fitsInOctal reports whether the integer x fits in a field n-bytes long +// using octal encoding with the appropriate NUL terminator. +func fitsInOctal(n int, x int64) bool { + octBits := uint(n-1) * 3 + return x >= 0 && (n >= 22 || x < 1<= 0 { + ss, sn = s[:pos], s[pos+1:] + } + + // Parse the seconds. + secs, err := strconv.ParseInt(ss, 10, 64) + if err != nil { + return time.Time{}, ErrHeader + } + if len(sn) == 0 { + return time.Unix(secs, 0), nil // No sub-second values + } + + // Parse the nanoseconds. + if strings.Trim(sn, "0123456789") != "" { + return time.Time{}, ErrHeader + } + if len(sn) < maxNanoSecondDigits { + sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad + } else { + sn = sn[:maxNanoSecondDigits] // Right truncate + } + nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed + if len(ss) > 0 && ss[0] == '-' { + return time.Unix(secs, -1*int64(nsecs)), nil // Negative correction + } + return time.Unix(secs, int64(nsecs)), nil +} + +// formatPAXTime converts ts into a time of the form %d.%d as described in the +// PAX specification. This function is capable of negative timestamps. +func formatPAXTime(ts time.Time) (s string) { + secs, nsecs := ts.Unix(), ts.Nanosecond() + if nsecs == 0 { + return strconv.FormatInt(secs, 10) + } + + // If seconds is negative, then perform correction. + sign := "" + if secs < 0 { + sign = "-" // Remember sign + secs = -(secs + 1) // Add a second to secs + nsecs = -(nsecs - 1E9) // Take that second away from nsecs + } + return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0") +} + +// parsePAXRecord parses the input PAX record string into a key-value pair. +// If parsing is successful, it will slice off the currently read record and +// return the remainder as r. +func parsePAXRecord(s string) (k, v, r string, err error) { + // The size field ends at the first space. + sp := strings.IndexByte(s, ' ') + if sp == -1 { + return "", "", s, ErrHeader + } + + // Parse the first token as a decimal integer. + n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int + if perr != nil || n < 5 || int64(len(s)) < n { + return "", "", s, ErrHeader + } + + // Extract everything between the space and the final newline. + rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] + if nl != "\n" { + return "", "", s, ErrHeader + } + + // The first equals separates the key from the value. + eq := strings.IndexByte(rec, '=') + if eq == -1 { + return "", "", s, ErrHeader + } + k, v = rec[:eq], rec[eq+1:] + + if !validPAXRecord(k, v) { + return "", "", s, ErrHeader + } + return k, v, rem, nil +} + +// formatPAXRecord formats a single PAX record, prefixing it with the +// appropriate length. +func formatPAXRecord(k, v string) (string, error) { + if !validPAXRecord(k, v) { + return "", ErrHeader + } + + const padding = 3 // Extra padding for ' ', '=', and '\n' + size := len(k) + len(v) + padding + size += len(strconv.Itoa(size)) + record := strconv.Itoa(size) + " " + k + "=" + v + "\n" + + // Final adjustment if adding size field increased the record size. + if len(record) != size { + size = len(record) + record = strconv.Itoa(size) + " " + k + "=" + v + "\n" + } + return record, nil +} + +// validPAXRecord reports whether the key-value pair is valid where each +// record is formatted as: +// "%d %s=%s\n" % (size, key, value) +// +// Keys and values should be UTF-8, but the number of bad writers out there +// forces us to be a more liberal. +// Thus, we only reject all keys with NUL, and only reject NULs in values +// for the PAX version of the USTAR string fields. +// The key must not contain an '=' character. +func validPAXRecord(k, v string) bool { + if k == "" || strings.IndexByte(k, '=') >= 0 { + return false + } + switch k { + case paxPath, paxLinkpath, paxUname, paxGname: + return !hasNUL(v) + default: + return !hasNUL(k) + } +} diff --git a/vendor/github.com/dmcgowan/go-tar/writer.go b/vendor/github.com/dmcgowan/go-tar/writer.go new file mode 100644 index 000000000..2eed61934 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/writer.go @@ -0,0 +1,629 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "fmt" + "io" + "path" + "sort" + "strconv" + "strings" + "time" +) + +// Writer provides sequential writing of a tar archive. +// Write.WriteHeader begins a new file with the provided Header, +// and then Writer can be treated as an io.Writer to supply that file's data. +type Writer struct { + w io.Writer + pad int64 // Amount of padding to write after current file entry + curr fileWriter // Writer for current file entry + hdr Header // Shallow copy of Header that is safe for mutations + blk block // Buffer to use as temporary local storage + + // err is a persistent error. + // It is only the responsibility of every exported method of Writer to + // ensure that this error is sticky. + err error +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{w: w, curr: ®FileWriter{w, 0}} +} + +type fileWriter interface { + io.Writer + fileState + + ReadFrom(io.Reader) (int64, error) +} + +// Flush finishes writing the current file's block padding. +// The current file must be fully written before Flush can be called. +// +// Deprecated: This is unnecessary as the next call to WriteHeader or Close +// will implicitly flush out the file's padding. +func (tw *Writer) Flush() error { + if tw.err != nil { + return tw.err + } + if nb := tw.curr.LogicalRemaining(); nb > 0 { + return fmt.Errorf("tar: missed writing %d bytes", nb) + } + if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil { + return tw.err + } + tw.pad = 0 + return nil +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// The Header.Size determines how many bytes can be written for the next file. +// If the current file is not fully written, then this returns an error. +// This implicitly flushes any padding necessary before writing the header. +func (tw *Writer) WriteHeader(hdr *Header) error { + if err := tw.Flush(); err != nil { + return err + } + tw.hdr = *hdr // Shallow copy of Header + + // Round ModTime and ignore AccessTime and ChangeTime unless + // the format is explicitly chosen. + // This ensures nominal usage of WriteHeader (without specifying the format) + // does not always result in the PAX format being chosen, which + // causes a 1KiB increase to every header. + if tw.hdr.Format == FormatUnknown { + tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second) + tw.hdr.AccessTime = time.Time{} + tw.hdr.ChangeTime = time.Time{} + } + + allowedFormats, paxHdrs, err := tw.hdr.allowedFormats() + switch { + case allowedFormats.has(FormatUSTAR): + tw.err = tw.writeUSTARHeader(&tw.hdr) + return tw.err + case allowedFormats.has(FormatPAX): + tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs) + return tw.err + case allowedFormats.has(FormatGNU): + tw.err = tw.writeGNUHeader(&tw.hdr) + return tw.err + default: + return err // Non-fatal error + } +} + +func (tw *Writer) writeUSTARHeader(hdr *Header) error { + // Check if we can use USTAR prefix/suffix splitting. + var namePrefix string + if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok { + namePrefix, hdr.Name = prefix, suffix + } + + // Pack the main header. + var f formatter + blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal) + f.formatString(blk.USTAR().Prefix(), namePrefix) + blk.SetFormat(FormatUSTAR) + if f.err != nil { + return f.err // Should never happen since header is validated + } + return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag) +} + +func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { + realName, realSize := hdr.Name, hdr.Size + + // Handle sparse files. + var spd sparseDatas + var spb []byte + if len(hdr.SparseHoles) > 0 { + sph := append([]SparseEntry{}, hdr.SparseHoles...) // Copy sparse map + sph = alignSparseEntries(sph, hdr.Size) + spd = invertSparseEntries(sph, hdr.Size) + + // Format the sparse map. + hdr.Size = 0 // Replace with encoded size + spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n') + for _, s := range spd { + hdr.Size += s.Length + spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n') + spb = append(strconv.AppendInt(spb, s.Length, 10), '\n') + } + pad := blockPadding(int64(len(spb))) + spb = append(spb, zeroBlock[:pad]...) + hdr.Size += int64(len(spb)) // Accounts for encoded sparse map + + // Add and modify appropriate PAX records. + dir, file := path.Split(realName) + hdr.Name = path.Join(dir, "GNUSparseFile.0", file) + paxHdrs[paxGNUSparseMajor] = "1" + paxHdrs[paxGNUSparseMinor] = "0" + paxHdrs[paxGNUSparseName] = realName + paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10) + paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10) + delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName + } + + // Write PAX records to the output. + isGlobal := hdr.Typeflag == TypeXGlobalHeader + if len(paxHdrs) > 0 || isGlobal { + // Sort keys for deterministic ordering. + var keys []string + for k := range paxHdrs { + keys = append(keys, k) + } + sort.Strings(keys) + + // Write each record to a buffer. + var buf bytes.Buffer + for _, k := range keys { + rec, err := formatPAXRecord(k, paxHdrs[k]) + if err != nil { + return err + } + buf.WriteString(rec) + } + + // Write the extended header file. + var name string + var flag byte + if isGlobal { + name = "GlobalHead.0.0" + flag = TypeXGlobalHeader + } else { + dir, file := path.Split(realName) + name = path.Join(dir, "PaxHeaders.0", file) + flag = TypeXHeader + } + data := buf.String() + if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { + return err // Global headers return here + } + } + + // Pack the main header. + var f formatter // Ignore errors since they are expected + fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) } + blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal) + blk.SetFormat(FormatPAX) + if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { + return err + } + + // Write the sparse map and setup the sparse writer if necessary. + if len(spd) > 0 { + // Use tw.curr since the sparse map is accounted for in hdr.Size. + if _, err := tw.curr.Write(spb); err != nil { + return err + } + tw.curr = &sparseFileWriter{tw.curr, spd, 0} + } + return nil +} + +func (tw *Writer) writeGNUHeader(hdr *Header) error { + // Use long-link files if Name or Linkname exceeds the field size. + const longName = "././@LongLink" + if len(hdr.Name) > nameSize { + data := hdr.Name + "\x00" + if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil { + return err + } + } + if len(hdr.Linkname) > nameSize { + data := hdr.Linkname + "\x00" + if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil { + return err + } + } + + // Pack the main header. + var f formatter // Ignore errors since they are expected + var spd sparseDatas + var spb []byte + blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric) + if !hdr.AccessTime.IsZero() { + f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix()) + } + if !hdr.ChangeTime.IsZero() { + f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix()) + } + if hdr.Typeflag == TypeGNUSparse { + sph := append([]SparseEntry{}, hdr.SparseHoles...) // Copy sparse map + sph = alignSparseEntries(sph, hdr.Size) + spd = invertSparseEntries(sph, hdr.Size) + + // Format the sparse map. + formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas { + for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ { + f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset) + f.formatNumeric(sa.Entry(i).Length(), sp[0].Length) + sp = sp[1:] + } + if len(sp) > 0 { + sa.IsExtended()[0] = 1 + } + return sp + } + sp2 := formatSPD(spd, blk.GNU().Sparse()) + for len(sp2) > 0 { + var spHdr block + sp2 = formatSPD(sp2, spHdr.Sparse()) + spb = append(spb, spHdr[:]...) + } + + // Update size fields in the header block. + realSize := hdr.Size + hdr.Size = 0 // Encoded size; does not account for encoded sparse map + for _, s := range spd { + hdr.Size += s.Length + } + copy(blk.V7().Size(), zeroBlock[:]) // Reset field + f.formatNumeric(blk.V7().Size(), hdr.Size) + f.formatNumeric(blk.GNU().RealSize(), realSize) + } + blk.SetFormat(FormatGNU) + if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { + return err + } + + // Write the extended sparse map and setup the sparse writer if necessary. + if len(spd) > 0 { + // Use tw.w since the sparse map is not accounted for in hdr.Size. + if _, err := tw.w.Write(spb); err != nil { + return err + } + tw.curr = &sparseFileWriter{tw.curr, spd, 0} + } + return nil +} + +type ( + stringFormatter func([]byte, string) + numberFormatter func([]byte, int64) +) + +// templateV7Plus fills out the V7 fields of a block using values from hdr. +// It also fills out fields (uname, gname, devmajor, devminor) that are +// shared in the USTAR, PAX, and GNU formats using the provided formatters. +// +// The block returned is only valid until the next call to +// templateV7Plus or writeRawFile. +func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block { + tw.blk.Reset() + + modTime := hdr.ModTime + if modTime.IsZero() { + modTime = time.Unix(0, 0) + } + + v7 := tw.blk.V7() + v7.TypeFlag()[0] = hdr.Typeflag + fmtStr(v7.Name(), hdr.Name) + fmtStr(v7.LinkName(), hdr.Linkname) + fmtNum(v7.Mode(), hdr.Mode) + fmtNum(v7.UID(), int64(hdr.Uid)) + fmtNum(v7.GID(), int64(hdr.Gid)) + fmtNum(v7.Size(), hdr.Size) + fmtNum(v7.ModTime(), modTime.Unix()) + + ustar := tw.blk.USTAR() + fmtStr(ustar.UserName(), hdr.Uname) + fmtStr(ustar.GroupName(), hdr.Gname) + fmtNum(ustar.DevMajor(), hdr.Devmajor) + fmtNum(ustar.DevMinor(), hdr.Devminor) + + return &tw.blk +} + +// writeRawFile writes a minimal file with the given name and flag type. +// It uses format to encode the header format and will write data as the body. +// It uses default values for all of the other fields (as BSD and GNU tar does). +func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error { + tw.blk.Reset() + + // Best effort for the filename. + name = toASCII(name) + if len(name) > nameSize { + name = name[:nameSize] + } + name = strings.TrimRight(name, "/") + + var f formatter + v7 := tw.blk.V7() + v7.TypeFlag()[0] = flag + f.formatString(v7.Name(), name) + f.formatOctal(v7.Mode(), 0) + f.formatOctal(v7.UID(), 0) + f.formatOctal(v7.GID(), 0) + f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB + f.formatOctal(v7.ModTime(), 0) + tw.blk.SetFormat(format) + if f.err != nil { + return f.err // Only occurs if size condition is violated + } + + // Write the header and data. + if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil { + return err + } + _, err := io.WriteString(tw, data) + return err +} + +// writeRawHeader writes the value of blk, regardless of its value. +// It sets up the Writer such that it can accept a file of the given size. +// If the flag is a special header-only flag, then the size is treated as zero. +func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error { + if err := tw.Flush(); err != nil { + return err + } + if _, err := tw.w.Write(blk[:]); err != nil { + return err + } + if isHeaderOnlyType(flag) { + size = 0 + } + tw.curr = ®FileWriter{tw.w, size} + tw.pad = blockPadding(size) + return nil +} + +// splitUSTARPath splits a path according to USTAR prefix and suffix rules. +// If the path is not splittable, then it will return ("", "", false). +func splitUSTARPath(name string) (prefix, suffix string, ok bool) { + length := len(name) + if length <= nameSize || !isASCII(name) { + return "", "", false + } else if length > prefixSize+1 { + length = prefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + + i := strings.LastIndex(name[:length], "/") + nlen := len(name) - i - 1 // nlen is length of suffix + plen := i // plen is length of prefix + if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize { + return "", "", false + } + return name[:i], name[i+1:], true +} + +// Write writes to the current file in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// Header.Size bytes are written after WriteHeader. +// +// If the current file is sparse, then the regions marked as a hole +// must be written as NUL-bytes. +// +// Calling Write on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless +// of what the Header.Size claims. +func (tw *Writer) Write(b []byte) (int, error) { + if tw.err != nil { + return 0, tw.err + } + n, err := tw.curr.Write(b) + if err != nil && err != ErrWriteTooLong { + tw.err = err + } + return n, err +} + +// ReadFrom populates the content of the current file by reading from r. +// The bytes read must match the number of remaining bytes in the current file. +// +// If the current file is sparse and r is an io.ReadSeeker, +// then ReadFrom uses Seek to skip past holes defined in Header.SparseHoles, +// assuming that skipped regions are all NULs. +// This always reads the last byte to ensure r is the right size. +func (tw *Writer) ReadFrom(r io.Reader) (int64, error) { + if tw.err != nil { + return 0, tw.err + } + n, err := tw.curr.ReadFrom(r) + if err != nil && err != ErrWriteTooLong { + tw.err = err + } + return n, err +} + +// Close closes the tar archive by flushing the padding, and writing the footer. +// If the current file (from a prior call to WriteHeader) is not fully written, +// then this returns an error. +func (tw *Writer) Close() error { + if tw.err == ErrWriteAfterClose { + return nil + } + if tw.err != nil { + return tw.err + } + + // Trailer: two zero blocks. + err := tw.Flush() + for i := 0; i < 2 && err == nil; i++ { + _, err = tw.w.Write(zeroBlock[:]) + } + + // Ensure all future actions are invalid. + tw.err = ErrWriteAfterClose + return err // Report IO errors +} + +// regFileWriter is a fileWriter for writing data to a regular file entry. +type regFileWriter struct { + w io.Writer // Underlying Writer + nb int64 // Number of remaining bytes to write +} + +func (fw *regFileWriter) Write(b []byte) (n int, err error) { + overwrite := int64(len(b)) > fw.nb + if overwrite { + b = b[:fw.nb] + } + if len(b) > 0 { + n, err = fw.w.Write(b) + fw.nb -= int64(n) + } + switch { + case err != nil: + return n, err + case overwrite: + return n, ErrWriteTooLong + default: + return n, nil + } +} + +func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) { + return io.Copy(struct{ io.Writer }{fw}, r) +} + +func (fw regFileWriter) LogicalRemaining() int64 { + return fw.nb +} +func (fw regFileWriter) PhysicalRemaining() int64 { + return fw.nb +} + +// sparseFileWriter is a fileWriter for writing data to a sparse file entry. +type sparseFileWriter struct { + fw fileWriter // Underlying fileWriter + sp sparseDatas // Normalized list of data fragments + pos int64 // Current position in sparse file +} + +func (sw *sparseFileWriter) Write(b []byte) (n int, err error) { + overwrite := int64(len(b)) > sw.LogicalRemaining() + if overwrite { + b = b[:sw.LogicalRemaining()] + } + + b0 := b + endPos := sw.pos + int64(len(b)) + for endPos > sw.pos && err == nil { + var nf int // Bytes written in fragment + dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() + if sw.pos < dataStart { // In a hole fragment + bf := b[:min(int64(len(b)), dataStart-sw.pos)] + nf, err = zeroWriter{}.Write(bf) + } else { // In a data fragment + bf := b[:min(int64(len(b)), dataEnd-sw.pos)] + nf, err = sw.fw.Write(bf) + } + b = b[nf:] + sw.pos += int64(nf) + if sw.pos >= dataEnd && len(sw.sp) > 1 { + sw.sp = sw.sp[1:] // Ensure last fragment always remains + } + } + + n = len(b0) - len(b) + switch { + case err == ErrWriteTooLong: + return n, errMissData // Not possible; implies bug in validation logic + case err != nil: + return n, err + case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: + return n, errUnrefData // Not possible; implies bug in validation logic + case overwrite: + return n, ErrWriteTooLong + default: + return n, nil + } +} + +func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) { + rs, ok := r.(io.ReadSeeker) + if ok { + if _, err := rs.Seek(0, io.SeekCurrent); err != nil { + ok = false // Not all io.Seeker can really seek + } + } + if !ok { + return io.Copy(struct{ io.Writer }{sw}, r) + } + + var readLastByte bool + pos0 := sw.pos + for sw.LogicalRemaining() > 0 && !readLastByte && err == nil { + var nf int64 // Size of fragment + dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() + if sw.pos < dataStart { // In a hole fragment + nf = dataStart - sw.pos + if sw.PhysicalRemaining() == 0 { + readLastByte = true + nf-- + } + _, err = rs.Seek(nf, io.SeekCurrent) + } else { // In a data fragment + nf = dataEnd - sw.pos + nf, err = io.CopyN(sw.fw, rs, nf) + } + sw.pos += nf + if sw.pos >= dataEnd && len(sw.sp) > 1 { + sw.sp = sw.sp[1:] // Ensure last fragment always remains + } + } + + // If the last fragment is a hole, then seek to 1-byte before EOF, and + // read a single byte to ensure the file is the right size. + if readLastByte && err == nil { + _, err = mustReadFull(rs, []byte{0}) + sw.pos++ + } + + n = sw.pos - pos0 + switch { + case err == io.EOF: + return n, io.ErrUnexpectedEOF + case err == ErrWriteTooLong: + return n, errMissData // Not possible; implies bug in validation logic + case err != nil: + return n, err + case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: + return n, errUnrefData // Not possible; implies bug in validation logic + default: + return n, ensureEOF(rs) + } +} + +func (sw sparseFileWriter) LogicalRemaining() int64 { + return sw.sp[len(sw.sp)-1].endOffset() - sw.pos +} +func (sw sparseFileWriter) PhysicalRemaining() int64 { + return sw.fw.PhysicalRemaining() +} + +// zeroWriter may only be written with NULs, otherwise it returns errWriteHole. +type zeroWriter struct{} + +func (zeroWriter) Write(b []byte) (int, error) { + for i, c := range b { + if c != 0 { + return i, errWriteHole + } + } + return len(b), nil +} + +// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so. +func ensureEOF(r io.Reader) error { + n, err := tryReadFull(r, []byte{0}) + switch { + case n > 0: + return ErrWriteTooLong + case err == io.EOF: + return nil + default: + return err + } +} diff --git a/vendor/github.com/docker/go-metrics/LICENSE.code b/vendor/github.com/docker/go-metrics/LICENSE.code new file mode 100644 index 000000000..8f3fee627 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-metrics/LICENSE.docs b/vendor/github.com/docker/go-metrics/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/go-metrics/NOTICE b/vendor/github.com/docker/go-metrics/NOTICE new file mode 100644 index 000000000..8915f0277 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md new file mode 100644 index 000000000..da6b16c20 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/README.md @@ -0,0 +1,79 @@ +# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) + +This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +## Best Practices + +This packages is meant to be used for collecting metrics in Docker projects. +It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. +If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). + +The following are a few Docker specific rules that will help you name and work with metrics in your project. + +1. Namespace and Subsystem + +This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. + +```go +ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, +}) +``` + +In the example above we are creating metrics for the Docker engine's daemon package. +`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. + +A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. + +2. Declaring your Metrics + +Try to keep all your metric declarations in one file. +This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. + +3. Use labels instead of multiple metrics + +Labels allow you to define one metric such as the time it takes to perform a certain action on an object. +If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. + + +```go +containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") +``` + +The last parameter is the label name or key. +When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. + +```go +containerActions.WithValues("create").UpdateSince(start) +``` + +4. Always use a unit + +The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. +For a timer, the standard unit is seconds and a counter's standard unit is a total. +For gauges you must provide the unit. +This package provides a standard set of units for use within the Docker projects. + +```go +Nanoseconds Unit = "nanoseconds" +Seconds Unit = "seconds" +Bytes Unit = "bytes" +Total Unit = "total" +``` + +If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. + +## Docs + +Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). + +## Additional Metrics + +Additional metrics are also defined here that are not available in the prometheus client. +If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. + + +## Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/go-metrics/counter.go b/vendor/github.com/docker/go-metrics/counter.go new file mode 100644 index 000000000..fe36316a4 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/counter.go @@ -0,0 +1,52 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Counter is a metrics that can only increment its current count +type Counter interface { + // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. + // + // If len(vs) == 0, increments the counter by 1. + Inc(vs ...float64) +} + +// LabeledCounter is counter that must have labels populated before use. +type LabeledCounter interface { + WithValues(vs ...string) Counter +} + +type labeledCounter struct { + pc *prometheus.CounterVec +} + +func (lc *labeledCounter) WithValues(vs ...string) Counter { + return &counter{pc: lc.pc.WithLabelValues(vs...)} +} + +func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { + lc.pc.Describe(ch) +} + +func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { + lc.pc.Collect(ch) +} + +type counter struct { + pc prometheus.Counter +} + +func (c *counter) Inc(vs ...float64) { + if len(vs) == 0 { + c.pc.Inc() + } + + c.pc.Add(sumFloat64(vs...)) +} + +func (c *counter) Describe(ch chan<- *prometheus.Desc) { + c.pc.Describe(ch) +} + +func (c *counter) Collect(ch chan<- prometheus.Metric) { + c.pc.Collect(ch) +} diff --git a/vendor/github.com/docker/go-metrics/docs.go b/vendor/github.com/docker/go-metrics/docs.go new file mode 100644 index 000000000..8fbdfc697 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/docs.go @@ -0,0 +1,3 @@ +// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +package metrics diff --git a/vendor/github.com/docker/go-metrics/gauge.go b/vendor/github.com/docker/go-metrics/gauge.go new file mode 100644 index 000000000..74296e877 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/gauge.go @@ -0,0 +1,72 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Gauge is a metric that allows incrementing and decrementing a value +type Gauge interface { + Inc(...float64) + Dec(...float64) + + // Add adds the provided value to the gauge's current value + Add(float64) + + // Set replaces the gauge's current value with the provided value + Set(float64) +} + +// LabeledGauge describes a gauge the must have values populated before use. +type LabeledGauge interface { + WithValues(labels ...string) Gauge +} + +type labeledGauge struct { + pg *prometheus.GaugeVec +} + +func (lg *labeledGauge) WithValues(labels ...string) Gauge { + return &gauge{pg: lg.pg.WithLabelValues(labels...)} +} + +func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { + lg.pg.Describe(c) +} + +func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { + lg.pg.Collect(c) +} + +type gauge struct { + pg prometheus.Gauge +} + +func (g *gauge) Inc(vs ...float64) { + if len(vs) == 0 { + g.pg.Inc() + } + + g.Add(sumFloat64(vs...)) +} + +func (g *gauge) Dec(vs ...float64) { + if len(vs) == 0 { + g.pg.Dec() + } + + g.Add(-sumFloat64(vs...)) +} + +func (g *gauge) Add(v float64) { + g.pg.Add(v) +} + +func (g *gauge) Set(v float64) { + g.pg.Set(v) +} + +func (g *gauge) Describe(c chan<- *prometheus.Desc) { + g.pg.Describe(c) +} + +func (g *gauge) Collect(c chan<- prometheus.Metric) { + g.pg.Collect(c) +} diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go new file mode 100644 index 000000000..bb3be41d3 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/handler.go @@ -0,0 +1,13 @@ +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +// Handler returns the global http.Handler that provides the prometheus +// metrics format on GET requests +func Handler() http.Handler { + return prometheus.Handler() +} diff --git a/vendor/github.com/docker/go-metrics/helpers.go b/vendor/github.com/docker/go-metrics/helpers.go new file mode 100644 index 000000000..68b7f51b3 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/helpers.go @@ -0,0 +1,10 @@ +package metrics + +func sumFloat64(vs ...float64) float64 { + var sum float64 + for _, v := range vs { + sum += v + } + + return sum +} diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go new file mode 100644 index 000000000..7734c2945 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/namespace.go @@ -0,0 +1,181 @@ +package metrics + +import ( + "fmt" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +type Labels map[string]string + +// NewNamespace returns a namespaces that is responsible for managing a collection of +// metrics for a particual namespace and subsystem +// +// labels allows const labels to be added to all metrics created in this namespace +// and are commonly used for data like application version and git commit +func NewNamespace(name, subsystem string, labels Labels) *Namespace { + if labels == nil { + labels = make(map[string]string) + } + return &Namespace{ + name: name, + subsystem: subsystem, + labels: labels, + } +} + +// Namespace describes a set of metrics that share a namespace and subsystem. +type Namespace struct { + name string + subsystem string + labels Labels + mu sync.Mutex + metrics []prometheus.Collector +} + +// WithConstLabels returns a namespace with the provided set of labels merged +// with the existing constant labels on the namespace. +// +// Only metrics created with the returned namespace will get the new constant +// labels. The returned namespace must be registered separately. +func (n *Namespace) WithConstLabels(labels Labels) *Namespace { + n.mu.Lock() + ns := &Namespace{ + name: n.name, + subsystem: n.subsystem, + labels: mergeLabels(n.labels, labels), + } + n.mu.Unlock() + return ns +} + +func (n *Namespace) NewCounter(name, help string) Counter { + c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} + n.Add(c) + return c +} + +func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { + c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} + n.Add(c) + return c +} + +func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Total), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewTimer(name, help string) Timer { + t := &timer{ + m: prometheus.NewHistogram(n.newTimerOpts(name, help)), + } + n.Add(t) + return t +} + +func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { + t := &labeledTimer{ + m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), + } + n.Add(t) + return t +} + +func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Seconds), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { + g := &gauge{ + pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), + } + n.Add(g) + return g +} + +func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { + g := &labeledGauge{ + pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), + } + n.Add(g) + return g +} + +func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, unit), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Describe(ch) + } +} + +func (n *Namespace) Collect(ch chan<- prometheus.Metric) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Collect(ch) + } +} + +func (n *Namespace) Add(collector prometheus.Collector) { + n.mu.Lock() + n.metrics = append(n.metrics, collector) + n.mu.Unlock() +} + +func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { + name = makeName(name, unit) + namespace := n.name + if n.subsystem != "" { + namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) + } + name = fmt.Sprintf("%s_%s", namespace, name) + return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) +} + +// mergeLabels merges two or more labels objects into a single map, favoring +// the later labels. +func mergeLabels(lbs ...Labels) Labels { + merged := make(Labels) + + for _, target := range lbs { + for k, v := range target { + merged[k] = v + } + } + + return merged +} + +func makeName(name string, unit Unit) string { + if unit == "" { + return name + } + + return fmt.Sprintf("%s_%s", name, unit) +} diff --git a/vendor/github.com/docker/go-metrics/register.go b/vendor/github.com/docker/go-metrics/register.go new file mode 100644 index 000000000..708358df0 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/register.go @@ -0,0 +1,15 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Register adds all the metrics in the provided namespace to the global +// metrics registry +func Register(n *Namespace) { + prometheus.MustRegister(n) +} + +// Deregister removes all the metrics in the provided namespace from the +// global metrics registry +func Deregister(n *Namespace) { + prometheus.Unregister(n) +} diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go new file mode 100644 index 000000000..824c98739 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/timer.go @@ -0,0 +1,85 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// StartTimer begins a timer observation at the callsite. When the target +// operation is completed, the caller should call the return done func(). +func StartTimer(timer Timer) (done func()) { + start := time.Now() + return func() { + timer.Update(time.Since(start)) + } +} + +// Timer is a metric that allows collecting the duration of an action in seconds +type Timer interface { + // Update records an observation, duration, and converts to the target + // units. + Update(duration time.Duration) + + // UpdateSince will add the duration from the provided starting time to the + // timer's summary with the precisions that was used in creation of the timer + UpdateSince(time.Time) +} + +// LabeledTimer is a timer that must have label values populated before use. +type LabeledTimer interface { + WithValues(labels ...string) *labeledTimerObserver +} + +type labeledTimer struct { + m *prometheus.HistogramVec +} + +type labeledTimerObserver struct { + m prometheus.Observer +} + +func (lbo *labeledTimerObserver) Update(duration time.Duration) { + lbo.m.Observe(duration.Seconds()) +} + +func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { + lbo.m.Observe(time.Since(since).Seconds()) +} + +func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { + return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} +} + +func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { + lt.m.Describe(c) +} + +func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { + lt.m.Collect(c) +} + +type timer struct { + m prometheus.Observer +} + +func (t *timer) Update(duration time.Duration) { + t.m.Observe(duration.Seconds()) +} + +func (t *timer) UpdateSince(since time.Time) { + t.m.Observe(time.Since(since).Seconds()) +} + +func (t *timer) Describe(c chan<- *prometheus.Desc) { + c <- t.m.(prometheus.Metric).Desc() +} + +func (t *timer) Collect(c chan<- prometheus.Metric) { + // Are there any observers that don't implement Collector? It is really + // unclear what the point of the upstream change was, but we'll let this + // panic if we get an observer that doesn't implement collector. In this + // case, we should almost always see metricVec objects, so this should + // never panic. + t.m.(prometheus.Collector).Collect(c) +} diff --git a/vendor/github.com/docker/go-metrics/unit.go b/vendor/github.com/docker/go-metrics/unit.go new file mode 100644 index 000000000..c96622f90 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/unit.go @@ -0,0 +1,12 @@ +package metrics + +// Unit represents the type or precision of a metric that is appended to +// the metrics fully qualified name +type Unit string + +const ( + Nanoseconds Unit = "nanoseconds" + Seconds Unit = "seconds" + Bytes Unit = "bytes" + Total Unit = "total" +) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE new file mode 100644 index 000000000..b2b065037 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md new file mode 100644 index 000000000..68c8c46a8 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md @@ -0,0 +1,245 @@ +# Go gRPC Interceptors for Prometheus monitoring + +[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus) +[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus) +[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus) +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) + +[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients. + +A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus). + +## Interceptors + +[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed +by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement +common patterns: auth, logging and... monitoring. + +To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware). + +## Usage + +There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both. + +### Server-side + +```go +import "github.com/grpc-ecosystem/go-grpc-prometheus" +... + // Initialize your gRPC server's interceptor. + myServer := grpc.NewServer( + grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), + grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), + ) + // Register your gRPC service implementations. + myservice.RegisterMyServiceServer(s.server, &myServiceImpl{}) + // After all your registrations, make sure all of the Prometheus metrics are initialized. + grpc_prometheus.Register(myServer) + // Register Prometheus metrics handler. + http.Handle("/metrics", prometheus.Handler()) +... +``` + +### Client-side + +```go +import "github.com/grpc-ecosystem/go-grpc-prometheus" +... + clientConn, err = grpc.Dial( + address, + grpc.WithUnaryInterceptor(UnaryClientInterceptor), + grpc.WithStreamInterceptor(StreamClientInterceptor) + ) + client = pb_testproto.NewTestServiceClient(clientConn) + resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"}) +... +``` + +# Metrics + +## Labels + +All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods +contain the same rich labels: + + * `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and + the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and + `service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"` + * `grpc_method` - the name of the method called on the gRPC service. E.g. + `grpc_method="Ping"` + * `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle). + Differentiating between the two is important especially for latency measurements. + + - `unary` is single request, single response RPC + - `client_stream` is a multi-request, single response RPC + - `server_stream` is a single request, multi-response RPC + - `bidi_stream` is a multi-request, multi-response RPC + + +Additionally for completed RPCs, the following labels are used: + + * `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go). + The list of all statuses is to long, but here are some common ones: + + - `OK` - means the RPC was successful + - `IllegalArgument` - RPC contained bad values + - `Internal` - server-side error not disclosed to the clients + +## Counters + +The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go) +the respective Prometheus handler (usually `/metrics`). + +For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts. + +For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto), +calling the method `PingList`. The call succeeds and returns 20 messages in the stream. + +First, immediately after the server receives the call it will increment the +`grpc_server_started_total` and start the handling time clock (if histograms are enabled). + +```jsoniq +grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +Then the user logic gets invoked. It receives one message from the client containing the request +(it's a `server_stream`): + +```jsoniq +grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +The user logic may return an error, or send multiple messages back to the client. In this case, on +each of the 20 messages sent back, a counter will be incremented: + +```jsoniq +grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20 +``` + +After the call completes, it's status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) +and the relevant call labels increment the `grpc_server_handled_total` counter. + +```jsoniq +grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +## Histograms + +[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way +to measure latency distributions of your RPCs. However since it is bad practice to have metrics +of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels)) +the latency monitoring metrics are disabled by default. To enable them please call the following +in your server initialization code: + +```jsoniq +grpc_prometheus.EnableHandlingTimeHistogram() +``` + +After the call completes, it's handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) +variable `grpc_server_handling_seconds`. It contains three sub-metrics: + + * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method + * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for + calculating average handling times + * `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective + handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/)) + +The counter values will look as follows: + +```jsoniq +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1 +grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001 +grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + + +## Useful query examples + +Prometheus philosophy is to provide the most detailed metrics possible to the monitoring system, and +let the aggregations be handled there. The verbosity of above metrics make it possible to have that +flexibility. Here's a couple of useful monitoring queries: + + +### request inbound rate +```jsoniq +sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service) +``` +For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the +rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note +how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together. + +### unary request error rate +```jsoniq +sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) +``` +For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the +ones that didn't finish with `OK` code. + +### unary request error percentage +```jsoniq +sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) + / +sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service) + * 100.0 +``` +For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that +this is a combination of the two above examples. This is an example of a query you would like to +[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g. +"no more than 1% requests should fail". + +### average response stream size +```jsoniq +sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) + / +sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) +``` +For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all ` +server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows +you to track when clients started to send "wide" queries that ret +Note the divisor is the number of started RPCs, in order to account for in-flight requests. + +### 99%-tile latency of unary requests +```jsoniq +histogram_quantile(0.99, + sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le) +) +``` +For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles) +of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile +estimation will take samples in a rolling `5m` window. When combined with other quantiles +(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system +(e.g. impact of caching). + +### percentage of slow unary queries (>250ms) +```jsoniq +100.0 - ( +sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service) + / +sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service) +) * 100.0 +``` +For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25` +seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal) +buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps. +This is an example of a query you would like to alert on in your system for SLA violations, +e.g. "less than 1% of requests are slower than 250ms". + + +## Status + +This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services at [Improbable](https://improbable.io). + +## License + +`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go new file mode 100644 index 000000000..d9e87b2f7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go @@ -0,0 +1,72 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for client-side gRPC. + +package grpc_prometheus + +import ( + "io" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. +func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + monitor := newClientReporter(Unary, method) + monitor.SentMessage() + err := invoker(ctx, method, req, reply, cc, opts...) + if err != nil { + monitor.ReceivedMessage() + } + monitor.Handled(grpc.Code(err)) + return err +} + +// StreamServerInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + monitor := newClientReporter(clientStreamType(desc), method) + clientStream, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + monitor.Handled(grpc.Code(err)) + return nil, err + } + return &monitoredClientStream{clientStream, monitor}, nil +} + +func clientStreamType(desc *grpc.StreamDesc) grpcType { + if desc.ClientStreams && !desc.ServerStreams { + return ClientStream + } else if !desc.ClientStreams && desc.ServerStreams { + return ServerStream + } + return BidiStream +} + +// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters. +type monitoredClientStream struct { + grpc.ClientStream + monitor *clientReporter +} + +func (s *monitoredClientStream) SendMsg(m interface{}) error { + err := s.ClientStream.SendMsg(m) + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredClientStream) RecvMsg(m interface{}) error { + err := s.ClientStream.RecvMsg(m) + if err == nil { + s.monitor.ReceivedMessage() + } else if err == io.EOF { + s.monitor.Handled(codes.OK) + } else { + s.monitor.Handled(grpc.Code(err)) + } + return err +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go new file mode 100644 index 000000000..16b761553 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go @@ -0,0 +1,111 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "google.golang.org/grpc/codes" + + prom "github.com/prometheus/client_golang/prometheus" +) + +var ( + clientStartedCounter = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "client", + Name: "started_total", + Help: "Total number of RPCs started on the client.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + clientHandledCounter = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "client", + Name: "handled_total", + Help: "Total number of RPCs completed by the client, regardless of success or failure.", + }, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}) + + clientStreamMsgReceived = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "client", + Name: "msg_received_total", + Help: "Total number of RPC stream messages received by the client.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + clientStreamMsgSent = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "client", + Name: "msg_sent_total", + Help: "Total number of gRPC stream messages sent by the client.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + clientHandledHistogramEnabled = false + clientHandledHistogramOpts = prom.HistogramOpts{ + Namespace: "grpc", + Subsystem: "client", + Name: "handling_seconds", + Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.", + Buckets: prom.DefBuckets, + } + clientHandledHistogram *prom.HistogramVec +) + +func init() { + prom.MustRegister(clientStartedCounter) + prom.MustRegister(clientHandledCounter) + prom.MustRegister(clientStreamMsgReceived) + prom.MustRegister(clientStreamMsgSent) +} + +// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func EnableClientHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&clientHandledHistogramOpts) + } + if !clientHandledHistogramEnabled { + clientHandledHistogram = prom.NewHistogramVec( + clientHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + prom.Register(clientHandledHistogram) + } + clientHandledHistogramEnabled = true +} + +type clientReporter struct { + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newClientReporter(rpcType grpcType, fullMethod string) *clientReporter { + r := &clientReporter{rpcType: rpcType} + if clientHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +func (r *clientReporter) ReceivedMessage() { + clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) SentMessage() { + clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) Handled(code codes.Code) { + clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if clientHandledHistogramEnabled { + clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go new file mode 100644 index 000000000..f85c8c237 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go @@ -0,0 +1,74 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for server-side gRPC. + +package grpc_prometheus + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +// PreregisterServices takes a gRPC server and pre-initializes all counters to 0. +// This allows for easier monitoring in Prometheus (no missing metrics), and should be called *after* all services have +// been registered with the server. +func Register(server *grpc.Server) { + serviceInfo := server.GetServiceInfo() + for serviceName, info := range serviceInfo { + for _, mInfo := range info.Methods { + preRegisterMethod(serviceName, &mInfo) + } + } +} + +// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. +func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + monitor := newServerReporter(Unary, info.FullMethod) + monitor.ReceivedMessage() + resp, err := handler(ctx, req) + monitor.Handled(grpc.Code(err)) + if err == nil { + monitor.SentMessage() + } + return resp, err +} + +// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + monitor := newServerReporter(streamRpcType(info), info.FullMethod) + err := handler(srv, &monitoredServerStream{ss, monitor}) + monitor.Handled(grpc.Code(err)) + return err +} + +func streamRpcType(info *grpc.StreamServerInfo) grpcType { + if info.IsClientStream && !info.IsServerStream { + return ClientStream + } else if !info.IsClientStream && info.IsServerStream { + return ServerStream + } + return BidiStream +} + +// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters. +type monitoredServerStream struct { + grpc.ServerStream + monitor *serverReporter +} + +func (s *monitoredServerStream) SendMsg(m interface{}) error { + err := s.ServerStream.SendMsg(m) + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredServerStream) RecvMsg(m interface{}) error { + err := s.ServerStream.RecvMsg(m) + if err == nil { + s.monitor.ReceivedMessage() + } + return err +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go new file mode 100644 index 000000000..628a89056 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go @@ -0,0 +1,157 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "google.golang.org/grpc/codes" + + prom "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" +) + +type grpcType string + +const ( + Unary grpcType = "unary" + ClientStream grpcType = "client_stream" + ServerStream grpcType = "server_stream" + BidiStream grpcType = "bidi_stream" +) + +var ( + serverStartedCounter = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "server", + Name: "started_total", + Help: "Total number of RPCs started on the server.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + serverHandledCounter = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "server", + Name: "handled_total", + Help: "Total number of RPCs completed on the server, regardless of success or failure.", + }, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}) + + serverStreamMsgReceived = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "server", + Name: "msg_received_total", + Help: "Total number of RPC stream messages received on the server.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + serverStreamMsgSent = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "server", + Name: "msg_sent_total", + Help: "Total number of gRPC stream messages sent by the server.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + serverHandledHistogramEnabled = false + serverHandledHistogramOpts = prom.HistogramOpts{ + Namespace: "grpc", + Subsystem: "server", + Name: "handling_seconds", + Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.", + Buckets: prom.DefBuckets, + } + serverHandledHistogram *prom.HistogramVec +) + +func init() { + prom.MustRegister(serverStartedCounter) + prom.MustRegister(serverHandledCounter) + prom.MustRegister(serverStreamMsgReceived) + prom.MustRegister(serverStreamMsgSent) +} + +type HistogramOption func(*prom.HistogramOpts) + +// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on. +func WithHistogramBuckets(buckets []float64) HistogramOption { + return func(o *prom.HistogramOpts) { o.Buckets = buckets } +} + +// EnableHandlingTimeHistogram turns on recording of handling time of RPCs for server-side interceptors. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func EnableHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&serverHandledHistogramOpts) + } + if !serverHandledHistogramEnabled { + serverHandledHistogram = prom.NewHistogramVec( + serverHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + prom.Register(serverHandledHistogram) + } + serverHandledHistogramEnabled = true +} + +type serverReporter struct { + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newServerReporter(rpcType grpcType, fullMethod string) *serverReporter { + r := &serverReporter{rpcType: rpcType} + if serverHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +func (r *serverReporter) ReceivedMessage() { + serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) SentMessage() { + serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) Handled(code codes.Code) { + serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if serverHandledHistogramEnabled { + serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} + +// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated. +func preRegisterMethod(serviceName string, mInfo *grpc.MethodInfo) { + methodName := mInfo.Name + methodType := string(typeFromMethodInfo(mInfo)) + // These are just references (no increments), as just referencing will create the labels but not set values. + serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName) + serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName) + serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName) + if serverHandledHistogramEnabled { + serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName) + } + for _, code := range allCodes { + serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String()) + } +} + +func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType { + if mInfo.IsClientStream == false && mInfo.IsServerStream == false { + return Unary + } + if mInfo.IsClientStream == true && mInfo.IsServerStream == false { + return ClientStream + } + if mInfo.IsClientStream == false && mInfo.IsServerStream == true { + return ServerStream + } + return BidiStream +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go new file mode 100644 index 000000000..372460ac4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go @@ -0,0 +1,27 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "strings" + + "google.golang.org/grpc/codes" +) + +var ( + allCodes = []codes.Code{ + codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound, + codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted, + codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal, + codes.Unavailable, codes.DataLoss, + } +) + +func splitMethodName(fullMethodName string) (string, string) { + fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash + if i := strings.Index(fullMethodName, "/"); i >= 0 { + return fullMethodName[:i], fullMethodName[i+1:] + } + return "unknown", "unknown" +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE new file mode 100644 index 000000000..5d8cb5b72 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/README.md b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md new file mode 100644 index 000000000..751ee6967 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md @@ -0,0 +1,20 @@ +# Overview +This repository provides various Protocol Buffer extensions for the Go +language (golang), namely support for record length-delimited message +streaming. + +| Java | Go | +| ------------------------------ | --------------------- | +| MessageLite#parseDelimitedFrom | pbutil.ReadDelimited | +| MessageLite#writeDelimitedTo | pbutil.WriteDelimited | + +Because [Code Review 9102043](https://codereview.appspot.com/9102043/) is +destined to never be merged into mainline (i.e., never be promoted to formal +[goprotobuf features](https://github.com/golang/protobuf)), this repository +will live here in the wild. + +# Documentation +We have [generated Go Doc documentation](http://godoc.org/github.com/matttproud/golang_protobuf_extensions/pbutil) here. + +# Testing +[![Build Status](https://travis-ci.org/matttproud/golang_protobuf_extensions.png?branch=master)](https://travis-ci.org/matttproud/golang_protobuf_extensions) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 000000000..258c0636a --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 000000000..c318385cb --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 000000000..8fb59ad22 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 000000000..dd878a30e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md new file mode 100644 index 000000000..0eb0df1df --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/README.md @@ -0,0 +1,49 @@ +# Prometheus Go client library + +[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang) + +This is the [Go](http://golang.org) client library for +[Prometheus](http://prometheus.io). It has two separate parts, one for +instrumenting application code, and one for creating clients that talk to the +Prometheus HTTP API. + +__This library requires Go1.7 or later.__ + +## Instrumenting applications + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) + +The +[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) +contains the instrumentation library. See the +[best practices section](http://prometheus.io/docs/practices/naming/) of the +Prometheus documentation to learn more about instrumenting applications. + +The +[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) +contains simple examples of instrumented code. + +## Client for the Prometheus HTTP API + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus) + +The +[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) +contains the client for the +[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you +to write Go applications that query time series data from a Prometheus +server. It is still in alpha stage. + +## Where is `model`, `extraction`, and `text`? + +The `model` packages has been moved to +[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model). + +The `extraction` and `text` packages are now contained in +[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt). + +## Contributing and community + +See the [contributing guidelines](CONTRIBUTING.md) and the +[Community section](http://prometheus.io/community/) of the homepage. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 000000000..44986bff0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1 @@ +See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 000000000..623d3d83f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by + // Describe. Returned metrics that share the same descriptor must differ + // in their variable label values. This method may be called + // concurrently and must therefore be implemented in a concurrency safe + // way. Blocking occurs at the expense of total performance of rendering + // all registered metrics. Ideally, Collector implementations support + // concurrent readers. + Collect(chan<- Metric) +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 000000000..765e4550c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,277 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair +} + +func (c *counter) Desc() *Desc { + return c.desc +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + return populateMetric(CounterValue, val, c.labelPairs, out) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *metricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 000000000..4a755b0fa --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,188 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 000000000..36ef15567 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,186 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides metrics primitives to instrument code for +// monitoring. It also offers a registry for metrics. Sub-packages allow to +// expose the registered metrics via HTTP (package promhttp) or push them to a +// Pushgateway (package push). +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegisterer comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// (The top-level functions in the prometheus package are deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 000000000..18a99d5fa --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 000000000..e3b67df8a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,29 @@ +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 000000000..17c72d7eb --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,286 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. +func NewGauge(opts GaugeOpts) Gauge { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, out) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *metricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 000000000..096454af9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,284 @@ +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() Collector { + return &goCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + ch <- c.goInfoDesc + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 000000000..331783a75 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,505 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + selfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *metricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 000000000..bfee5c6eb --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,524 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" +) + +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead +// (which is not instrumented, but can be instrumented with the tooling provided +// in package promhttp). +func Handler() http.Handler { + return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.Handler instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := DefaultGatherer.Gather() + if err != nil { + http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + }) +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +// +// Deprecated: InstrumentHandler has several issues. Use the tooling provided in +// package promhttp instead. The issues are the following: +// +// - It uses Summaries rather than Histograms. Summaries are not useful if +// aggregation across multiple instances is required. +// +// - It uses microseconds as unit, which is deprecated and should be replaced by +// seconds. +// +// - The size of the request is calculated in a separate goroutine. Since this +// calculator requires access to the request header, it creates a race with +// any writes to the header performed during request handling. +// httputil.ReverseProxy is a prominent example for a handler +// performing such writes. +// +// - It has additional issues with HTTP/2, cf. +// https://github.com/prometheus/client_golang/issues/272. +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler (and shares the same +// issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same +// issues) but provides more flexibility (at the cost of a more complex call +// syntax). As InstrumentHandler, this function registers four metric +// collectors, but it uses the provided SummaryOpts to create them. However, the +// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced +// by "requests_total", "request_duration_microseconds", "request_size_bytes", +// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares +// the same issues) but provides more flexibility (at the cost of a more complex +// call syntax). See InstrumentHandlerWithOpts for details how the provided +// SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + if err := Register(reqCnt); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + panic(err) + } + } + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + if err := Register(reqDur); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + if err := Register(reqSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + if err := Register(resSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + resSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := computeApproximateRequestSize(r) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + reqCnt.WithLabelValues(method, code).Inc() + reqDur.Observe(elapsed) + resSz.Observe(float64(delegate.written)) + reqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request) <-chan int { + // Get URL length in current go routine for avoiding a race condition. + // HandlerFunc that runs in parallel may modify the URL. + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + out := make(chan int, 1) + + go func() { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s + close(out) + }() + + return out +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 000000000..2502e3734 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,57 @@ +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 000000000..6213ee812 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. (Implementers may find + // LabelPairSorter useful for that.) Callers of Write should still make + // sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 000000000..5806cd09e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,52 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 000000000..94b2553e1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,140 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + pid int + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, rss *Desc + startTime *Desc +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) Collector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) Collector { + ns := "" + if len(namespace) > 0 { + ns = namespace + "_" + } + + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } + + if limits, err := p.NewLimits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 000000000..73dde7beb --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,807 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "sort" + "sync" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (see NewProcessCollector) and a Go collector (see +// NewGoCollector) already registered. This approach to keep default instances +// as global state mirrors the approach of other packages in the Go standard +// library. Note that there are caveats. Change the variables with caution and +// only if you understand the consequences. Users who want to avoid global state +// altogether should not use the convenience function and act on custom +// instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather than the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // It is in general not safe to register the same Collector multiple + // times concurrently. + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of MetricFamily protobufs. Even if an error occurs, Gather attempts + // to gather as many metrics as possible. Hence, if a non-nil error is + // returned, the returned MetricFamily slice could be nil (in case of a + // fatal error that prevented any meaningful metric collection) or + // contain a number of MetricFamily protobufs, some of which might be + // incomplete, and some might be missing altogether. The returned error + // (which might be a MultiError) explains the details. In scenarios + // where complete collection is critical, the returned MetricFamily + // protobufs should be disregarded if the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer r.mtx.Unlock() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return AlreadyRegisteredError{ + ExistingCollector: existing, + NewCollector: c, + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + metricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + collectors := make(chan Collector, len(r.collectorsByID)) + for _, collector := range r.collectorsByID { + collectors <- collector + } + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + r.mtx.RUnlock() + + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-collectors: + collector.Collect(metricChan) + wg.Done() + default: + return + } + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close the metricChan once all collectors are collected. + go func() { + wg.Wait() + close(metricChan) + }() + + // Drain metricChan in case of premature return. + defer func() { + for range metricChan { + } + }() + +collectLoop: + for { + select { + case metric, ok := <-metricChan: + if !ok { + // metricChan is closed, we are done. + break collectLoop + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, + )) + default: + if goroutineBudget <= 0 || len(collectors) == 0 { + // All collectors are aleady being worked on or + // we have already as many goroutines started as + // there are collectors. Just process metrics + // from now on. + for metric := range metricChan { + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, + )) + } + break collectLoop + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calles are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, +) error { + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + for _, labelPair := range dtoMetric.GetLabel() { + if !utf8.ValidString(*labelPair.Value) { + return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value) + } + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := hashNew() + h = hashAdd(h, metricFamily.GetName()) + h = hashAddByte(h, separatorByte) + dh := hashNew() + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + dh = hashAdd(dh, lp.GetName()) + dh = hashAddByte(dh, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { + if dimHash != dh { + return fmt.Errorf( + "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", + metricFamily.GetName(), dtoMetric, + ) + } + } else { + dimHashes[metricFamily.GetName()] = dh + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 000000000..f7dc85b96 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,609 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. However, the default behavior will change in the +// upcoming v0.10 of the library. There will be no rank estiamtions at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +// +// Deprecated: DefObjectives will not be used as the default objectives in +// v0.10 of the library. The default Summary will have no quantiles then. +var ( + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. While all other fields +// are optional and can safely be left at their zero value, it is recommended to +// explicitly set the Objectives field to the desired value as the default value +// will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is DefObjectives. It is used if Objectives is left at + // its zero value (i.e. nil). To create a Summary without Objectives, + // set it to an empty map (i.e. map[float64]float64{}). + // + // Deprecated: Note that the current value of DefObjectives is + // deprecated. It will be replaced by an empty map in v0.10 of the + // library. Please explicitly set Objectives to the desired value. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *metricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 000000000..b8fc5f18c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. ObserveDuration is +// usually called with a defer statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() { + if t.observer != nil { + t.observer.Observe(time.Since(t.begin).Seconds()) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 000000000..0f9ce63f4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 000000000..543b57c27 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,160 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 000000000..cea158249 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,469 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. It uses basicMetricVec internally. +type metricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.metrics { + delete(m.metrics, h) + } +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + + i := findMetricWithLabelValues(metrics, lvs, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + metrics, ok := m.metrics[h] + if !ok { + return false + } + i := findMetricWithLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + if !ok { + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + if !ok { + lvs := extractLabelValues(m.desc, labels, curry) + metric = m.newMetric(lvs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabelValues(metric.values, lvs, curry) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { + return false + } + var iLVs, iCurry int + for i, v := range values { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { + return false + } + iLVs++ + } + return true +} + +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { + return false + } + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if values[i] != labels[k] { + return false + } + } + return true +} + +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = labels[k] + } + return labelValues +} + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 000000000..20110e410 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/README.md b/vendor/github.com/prometheus/client_model/README.md new file mode 100644 index 000000000..a710042db --- /dev/null +++ b/vendor/github.com/prometheus/client_model/README.md @@ -0,0 +1,26 @@ +# Background +Under most circumstances, manually downloading this repository should never +be required. + +# Prerequisites +# Base +* [Google Protocol Buffers](https://developers.google.com/protocol-buffers) + +## Java +* [Apache Maven](http://maven.apache.org) +* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository + +## Go +* [Go](http://golang.org) +* [goprotobuf](https://code.google.com/p/goprotobuf) + +## Ruby +* [Ruby](https://www.ruby-lang.org) +* [bundler](https://rubygems.org/gems/bundler) + +# Building + $ make + +# Getting Started + * The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go). + * All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 000000000..b065f8683 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,364 @@ +// Code generated by protoc-gen-go. +// source: metrics.proto +// DO NOT EDIT! + +/* +Package io_prometheus_client is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + LabelPair + Gauge + Counter + Quantile + Summary + Untyped + Histogram + Bucket + Metric + MetricFamily +*/ +package io_prometheus_client + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 000000000..636a2c1a5 --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md new file mode 100644 index 000000000..11a584945 --- /dev/null +++ b/vendor/github.com/prometheus/common/README.md @@ -0,0 +1,12 @@ +# Common +[![Build Status](https://travis-ci.org/prometheus/common.svg)](https://travis-ci.org/prometheus/common) + +This repository contains Go libraries that are shared across Prometheus +components and libraries. + +* **config**: Common configuration structures +* **expfmt**: Decoding and encoding for the exposition format +* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus) +* **model**: Shared data structures +* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context` +* **version**: Version informations and metric diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 000000000..a7a42d5ef --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occured. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 000000000..11839ed65 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 000000000..371ac7503 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 000000000..dc2eedeef --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 000000000..f11321cd0 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,303 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { + var written int + + // Fail-fast checks. + if len(in.Metric) == 0 { + return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return written, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err := fmt.Fprintf( + out, "# HELP %s %s\n", + name, escapeString(*in.Help, false), + ) + written += n + if err != nil { + return written, err + } + } + metricType := in.GetType() + n, err := fmt.Fprintf( + out, "# TYPE %s %s\n", + name, strings.ToLower(metricType.String()), + ) + written += n + if err != nil { + return written, err + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Counter.GetValue(), + out, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Gauge.GetValue(), + out, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Untyped.GetValue(), + out, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + name, metric, + model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + q.GetValue(), + out, + ) + written += n + if err != nil { + return written, err + } + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Summary.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Summary.GetSampleCount()), + out, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, q := range metric.Histogram.Bucket { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, fmt.Sprint(q.GetUpperBound()), + float64(q.GetCumulativeCount()), + out, + ) + written += n + if err != nil { + return written, err + } + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, "+Inf", + float64(metric.Histogram.GetSampleCount()), + out, + ) + if err != nil { + return written, err + } + written += n + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Histogram.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Histogram.GetSampleCount()), + out, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeSample writes a single sample in text format to out, given the metric +// name, the metric proto message itself, optionally an additional label name +// and value (use empty strings if not required), and the value. The function +// returns the number of bytes written and any error encountered. +func writeSample( + name string, + metric *dto.Metric, + additionalLabelName, additionalLabelValue string, + value float64, + out io.Writer, +) (int, error) { + var written int + n, err := fmt.Fprint(out, name) + written += n + if err != nil { + return written, err + } + n, err = labelPairsToText( + metric.Label, + additionalLabelName, additionalLabelValue, + out, + ) + written += n + if err != nil { + return written, err + } + n, err = fmt.Fprintf(out, " %v", value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + n, err = out.Write([]byte{'\n'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// labelPairsToText converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'out'. An empty slice in combination with an +// empty string 'additionalLabelName' results in nothing being +// written. Otherwise, the label pairs are written, escaped as required by the +// text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. +func labelPairsToText( + in []*dto.LabelPair, + additionalLabelName, additionalLabelValue string, + out io.Writer, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var written int + separator := '{' + for _, lp := range in { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, lp.GetName(), escapeString(lp.GetValue(), true), + ) + written += n + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, additionalLabelName, + escapeString(additionalLabelValue, true), + ) + written += n + if err != nil { + return written, err + } + } + n, err := out.Write([]byte{'}'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +var ( + escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) + escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +// escapeString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +func escapeString(v string, includeDoubleQuote bool) string { + if includeDoubleQuote { + return escapeWithDoubleQuote.Replace(v) + } + + return escape.Replace(v) +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 000000000..54bcfde29 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,757 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All +// other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt new file mode 100644 index 000000000..7723656d5 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 000000000..648b38cb6 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 000000000..35e739c7a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 000000000..fc4de4106 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 000000000..038fc1c90 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 000000000..41051a01a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 000000000..6eda08a73 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 000000000..f7250909b --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,103 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + separator = []byte{0} + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 000000000..a7b969170 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 000000000..8762b13c6 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 000000000..7538e2997 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definiton +// in the Prometheus eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 000000000..74ed5a9f7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes and interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + *t = Time(v + va) + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// ParseDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + if ms == 0 { + return "0s" + } + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 000000000..c9ed3ffd8 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// sematics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 000000000..53c5e9aa1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md new file mode 100644 index 000000000..209549471 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/README.md @@ -0,0 +1,11 @@ +# procfs + +This procfs package provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 000000000..d3a826807 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,95 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// NewBuddyInfo reads the buddyinfo statistics. +func NewBuddyInfo() ([]BuddyInfo, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewBuddyInfo() +} + +// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 000000000..e2acd6d40 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 000000000..36c1586a1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,69 @@ +package procfs + +import ( + "fmt" + "os" + "path" + + "github.com/prometheus/procfs/nfs" + "github.com/prometheus/procfs/xfs" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} + +// XFSStats retrieves XFS filesystem runtime statistics. +func (fs FS) XFSStats() (*xfs.Stats, error) { + f, err := os.Open(fs.Path("fs/xfs/stat")) + if err != nil { + return nil, err + } + defer f.Close() + + return xfs.ParseStats(f) +} + +// NFSdClientRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdClientRPCStats() (*nfs.ClientRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfs")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseClientRPCStats(f) +} + +// NFSdServerRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfsd")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseServerRPCStats(f) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 000000000..1ad21c91a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import "strconv" + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 000000000..5761b4570 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,246 @@ +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 000000000..d7a248c0d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,138 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") + for i, l := range lines { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + mdName := mainLine[0] + activityState := mainLine[2] + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) + } + + active, total, size, err := evalStatusline(lines[i+1]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // j is the line number of the syncing-line. + j := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + j = i + 3 + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{ + Name: mdName, + ActivityState: activityState, + DisksActive: active, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { + matches := buildlineRE.FindStringSubmatch(buildline) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected buildline: %s", buildline) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedBlocks, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 000000000..6b2b0ba9d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,556 @@ +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10Len = 10 + fieldTransport11Len = 13 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueTime time.Duration + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseTime time.Duration + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestTime time.Duration +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTime time.Duration + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[2:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Number of expected fields in each per-operation statistics set + numFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) != numFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, numFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + ops = append(ops, NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, + CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, + CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + }) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + switch statVersion { + case statVersion10: + if len(ss) != fieldTransport10Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + if len(ss) != fieldTransport11Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11Len) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + return &NFSTransportStats{ + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTime: time.Duration(ns[4]) * time.Second, + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 000000000..f8c184efe --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,203 @@ +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func NewNetDev() (NetDev, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetDev() +} + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NewNetDev() (NetDev, error) { + return newNetDev(fs.Path("net/dev")) +} + +// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NewNetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + nd := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := nd.parseLine(s.Text()) + if err != nil { + return nd, err + } + + nd[line.Name] = *line + } + + return nd, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma seperated list of interface names. +func (nd NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(nd)) + for _, ifc := range nd { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go new file mode 100644 index 000000000..e2185b782 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go @@ -0,0 +1,263 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nfsd implements parsing of /proc/net/rpc/nfsd. +// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ +package nfs + +// ReplyCache models the "rc" line. +type ReplyCache struct { + Hits uint64 + Misses uint64 + NoCache uint64 +} + +// FileHandles models the "fh" line. +type FileHandles struct { + Stale uint64 + TotalLookups uint64 + AnonLookups uint64 + DirNoCache uint64 + NoDirNoCache uint64 +} + +// InputOutput models the "io" line. +type InputOutput struct { + Read uint64 + Write uint64 +} + +// Threads models the "th" line. +type Threads struct { + Threads uint64 + FullCnt uint64 +} + +// ReadAheadCache models the "ra" line. +type ReadAheadCache struct { + CacheSize uint64 + CacheHistogram []uint64 + NotFound uint64 +} + +// Network models the "net" line. +type Network struct { + NetCount uint64 + UDPCount uint64 + TCPCount uint64 + TCPConnect uint64 +} + +// ClientRPC models the nfs "rpc" line. +type ClientRPC struct { + RPCCount uint64 + Retransmissions uint64 + AuthRefreshes uint64 +} + +// ServerRPC models the nfsd "rpc" line. +type ServerRPC struct { + RPCCount uint64 + BadCnt uint64 + BadFmt uint64 + BadAuth uint64 + BadcInt uint64 +} + +// V2Stats models the "proc2" line. +type V2Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Root uint64 + Lookup uint64 + ReadLink uint64 + Read uint64 + WrCache uint64 + Write uint64 + Create uint64 + Remove uint64 + Rename uint64 + Link uint64 + SymLink uint64 + MkDir uint64 + RmDir uint64 + ReadDir uint64 + FsStat uint64 +} + +// V3Stats models the "proc3" line. +type V3Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Lookup uint64 + Access uint64 + ReadLink uint64 + Read uint64 + Write uint64 + Create uint64 + MkDir uint64 + SymLink uint64 + MkNod uint64 + Remove uint64 + RmDir uint64 + Rename uint64 + Link uint64 + ReadDir uint64 + ReadDirPlus uint64 + FsStat uint64 + FsInfo uint64 + PathConf uint64 + Commit uint64 +} + +// ClientV4Stats models the nfs "proc4" line. +type ClientV4Stats struct { + Null uint64 + Read uint64 + Write uint64 + Commit uint64 + Open uint64 + OpenConfirm uint64 + OpenNoattr uint64 + OpenDowngrade uint64 + Close uint64 + Setattr uint64 + FsInfo uint64 + Renew uint64 + SetClientId uint64 + SetClientIdConfirm uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Access uint64 + Getattr uint64 + Lookup uint64 + LookupRoot uint64 + Remove uint64 + Rename uint64 + Link uint64 + Symlink uint64 + Create uint64 + Pathconf uint64 + StatFs uint64 + ReadLink uint64 + ReadDir uint64 + ServerCaps uint64 + DelegReturn uint64 + GetAcl uint64 + SetAcl uint64 + FsLocations uint64 + ReleaseLockowner uint64 + Secinfo uint64 + FsidPresent uint64 + ExchangeId uint64 + CreateSession uint64 + DestroySession uint64 + Sequence uint64 + GetLeaseTime uint64 + ReclaimComplete uint64 + LayoutGet uint64 + GetDeviceInfo uint64 + LayoutCommit uint64 + LayoutReturn uint64 + SecinfoNoName uint64 + TestStateId uint64 + FreeStateId uint64 + GetDeviceList uint64 + BindConnToSession uint64 + DestroyClientId uint64 + Seek uint64 + Allocate uint64 + DeAllocate uint64 + LayoutStats uint64 + Clone uint64 +} + +// ServerV4Stats models the nfsd "proc4" line. +type ServerV4Stats struct { + Null uint64 + Compound uint64 +} + +// V4Ops models the "proc4ops" line: NFSv4 operations +// Variable list, see: +// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) +// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) +// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) +type V4Ops struct { + //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? + Op0Unused uint64 + Op1Unused uint64 + Op2Future uint64 + Access uint64 + Close uint64 + Commit uint64 + Create uint64 + DelegPurge uint64 + DelegReturn uint64 + GetAttr uint64 + GetFH uint64 + Link uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Lookup uint64 + LookupRoot uint64 + Nverify uint64 + Open uint64 + OpenAttr uint64 + OpenConfirm uint64 + OpenDgrd uint64 + PutFH uint64 + PutPubFH uint64 + PutRootFH uint64 + Read uint64 + ReadDir uint64 + ReadLink uint64 + Remove uint64 + Rename uint64 + Renew uint64 + RestoreFH uint64 + SaveFH uint64 + SecInfo uint64 + SetAttr uint64 + Verify uint64 + Write uint64 + RelLockOwner uint64 +} + +// RPCStats models all stats from /proc/net/rpc/nfs. +type ClientRPCStats struct { + Network Network + ClientRPC ClientRPC + V2Stats V2Stats + V3Stats V3Stats + ClientV4Stats ClientV4Stats +} + +// ServerRPCStats models all stats from /proc/net/rpc/nfsd. +type ServerRPCStats struct { + ReplyCache ReplyCache + FileHandles FileHandles + InputOutput InputOutput + Threads Threads + ReadAheadCache ReadAheadCache + Network Network + ServerRPC ServerRPC + V2Stats V2Stats + V3Stats V3Stats + ServerV4Stats ServerV4Stats + V4Ops V4Ops +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go new file mode 100644 index 000000000..3aa32563a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -0,0 +1,308 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "fmt" +) + +func parseReplyCache(v []uint64) (ReplyCache, error) { + if len(v) != 3 { + return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) + } + + return ReplyCache{ + Hits: v[0], + Misses: v[1], + NoCache: v[2], + }, nil +} + +func parseFileHandles(v []uint64) (FileHandles, error) { + if len(v) != 5 { + return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) + } + + return FileHandles{ + Stale: v[0], + TotalLookups: v[1], + AnonLookups: v[2], + DirNoCache: v[3], + NoDirNoCache: v[4], + }, nil +} + +func parseInputOutput(v []uint64) (InputOutput, error) { + if len(v) != 2 { + return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) + } + + return InputOutput{ + Read: v[0], + Write: v[1], + }, nil +} + +func parseThreads(v []uint64) (Threads, error) { + if len(v) != 2 { + return Threads{}, fmt.Errorf("invalid Threads line %q", v) + } + + return Threads{ + Threads: v[0], + FullCnt: v[1], + }, nil +} + +func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { + if len(v) != 12 { + return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) + } + + return ReadAheadCache{ + CacheSize: v[0], + CacheHistogram: v[1:11], + NotFound: v[11], + }, nil +} + +func parseNetwork(v []uint64) (Network, error) { + if len(v) != 4 { + return Network{}, fmt.Errorf("invalid Network line %q", v) + } + + return Network{ + NetCount: v[0], + UDPCount: v[1], + TCPCount: v[2], + TCPConnect: v[3], + }, nil +} + +func parseServerRPC(v []uint64) (ServerRPC, error) { + if len(v) != 5 { + return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ServerRPC{ + RPCCount: v[0], + BadCnt: v[1], + BadFmt: v[2], + BadAuth: v[3], + BadcInt: v[4], + }, nil +} + +func parseClientRPC(v []uint64) (ClientRPC, error) { + if len(v) != 3 { + return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ClientRPC{ + RPCCount: v[0], + Retransmissions: v[1], + AuthRefreshes: v[2], + }, nil +} + +func parseV2Stats(v []uint64) (V2Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 18 { + return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) + } + + return V2Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Root: v[4], + Lookup: v[5], + ReadLink: v[6], + Read: v[7], + WrCache: v[8], + Write: v[9], + Create: v[10], + Remove: v[11], + Rename: v[12], + Link: v[13], + SymLink: v[14], + MkDir: v[15], + RmDir: v[16], + ReadDir: v[17], + FsStat: v[18], + }, nil +} + +func parseV3Stats(v []uint64) (V3Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 22 { + return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) + } + + return V3Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Lookup: v[4], + Access: v[5], + ReadLink: v[6], + Read: v[7], + Write: v[8], + Create: v[9], + MkDir: v[10], + SymLink: v[11], + MkNod: v[12], + Remove: v[13], + RmDir: v[14], + Rename: v[15], + Link: v[16], + ReadDir: v[17], + ReadDirPlus: v[18], + FsStat: v[19], + FsInfo: v[20], + PathConf: v[21], + Commit: v[22], + }, nil +} + +func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 59 { + return ClientV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ClientV4Stats{ + Null: v[1], + Read: v[2], + Write: v[3], + Commit: v[4], + Open: v[5], + OpenConfirm: v[6], + OpenNoattr: v[7], + OpenDowngrade: v[8], + Close: v[9], + Setattr: v[10], + FsInfo: v[11], + Renew: v[12], + SetClientId: v[13], + SetClientIdConfirm: v[14], + Lock: v[15], + Lockt: v[16], + Locku: v[17], + Access: v[18], + Getattr: v[19], + Lookup: v[20], + LookupRoot: v[21], + Remove: v[22], + Rename: v[23], + Link: v[24], + Symlink: v[25], + Create: v[26], + Pathconf: v[27], + StatFs: v[28], + ReadLink: v[29], + ReadDir: v[30], + ServerCaps: v[31], + DelegReturn: v[32], + GetAcl: v[33], + SetAcl: v[34], + FsLocations: v[35], + ReleaseLockowner: v[36], + Secinfo: v[37], + FsidPresent: v[38], + ExchangeId: v[39], + CreateSession: v[40], + DestroySession: v[41], + Sequence: v[42], + GetLeaseTime: v[43], + ReclaimComplete: v[44], + LayoutGet: v[45], + GetDeviceInfo: v[46], + LayoutCommit: v[47], + LayoutReturn: v[48], + SecinfoNoName: v[49], + TestStateId: v[50], + FreeStateId: v[51], + GetDeviceList: v[52], + BindConnToSession: v[53], + DestroyClientId: v[54], + Seek: v[55], + Allocate: v[56], + DeAllocate: v[57], + LayoutStats: v[58], + Clone: v[59], + }, nil +} + +func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 2 { + return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ServerV4Stats{ + Null: v[1], + Compound: v[2], + }, nil +} + +func parseV4Ops(v []uint64) (V4Ops, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 39 { + return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) + } + + stats := V4Ops{ + Op0Unused: v[1], + Op1Unused: v[2], + Op2Future: v[3], + Access: v[4], + Close: v[5], + Commit: v[6], + Create: v[7], + DelegPurge: v[8], + DelegReturn: v[9], + GetAttr: v[10], + GetFH: v[11], + Link: v[12], + Lock: v[13], + Lockt: v[14], + Locku: v[15], + Lookup: v[16], + LookupRoot: v[17], + Nverify: v[18], + Open: v[19], + OpenAttr: v[20], + OpenConfirm: v[21], + OpenDgrd: v[22], + PutFH: v[23], + PutPubFH: v[24], + PutRootFH: v[25], + Read: v[26], + ReadDir: v[27], + ReadLink: v[28], + Remove: v[29], + Rename: v[30], + Renew: v[31], + RestoreFH: v[32], + SaveFH: v[33], + SecInfo: v[34], + SetAttr: v[35], + Verify: v[36], + Write: v[37], + RelLockOwner: v[38], + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go new file mode 100644 index 000000000..b5c0b15f3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs +func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { + stats := &ClientRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + + values, err := util.ParseUint64s(parts[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ClientRPC, err = parseClientRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ClientV4Stats, err = parseClientV4Stats(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go new file mode 100644 index 000000000..57bb4a358 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd +func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { + stats := &ServerRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + label := parts[0] + + var values []uint64 + var err error + if label == "th" { + if len(parts) < 3 { + return nil, fmt.Errorf("invalid NFSd th metric line %q", line) + } + values, err = util.ParseUint64s(parts[1:3]) + } else { + values, err = util.ParseUint64s(parts[1:]) + } + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "rc": + stats.ReplyCache, err = parseReplyCache(values) + case "fh": + stats.FileHandles, err = parseFileHandles(values) + case "io": + stats.InputOutput, err = parseInputOutput(values) + case "th": + stats.Threads, err = parseThreads(values) + case "ra": + stats.ReadAheadCache, err = parseReadAheadCache(values) + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ServerRPC, err = parseServerRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ServerV4Stats, err = parseServerV4Stats(values) + case "proc4ops": + stats.V4Ops, err = parseV4Ops(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 000000000..8717e1fe0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,224 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 000000000..e7f6674d0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,52 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 000000000..b684a5b55 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,137 @@ +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int64 + // Maximum size of files that the process may create. + FileSize int64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int64 + // Maximum size of the process stack in bytes. + StackSize int64 + // Maximum size of a core file. + CoreFileSize int64 + // Limit of the process's resident set in pages. + ResidentSet int64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int64 +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int64, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return i, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 000000000..befdd2690 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,55 @@ +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// process is a member. +func (p Proc) NewNamespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 000000000..724e271b9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,175 @@ +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 000000000..701f4df64 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,219 @@ +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + stat := Stat{} + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return stat, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 000000000..ffe9df50d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldnt parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go new file mode 100644 index 000000000..2bc0ef342 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/parse.go @@ -0,0 +1,330 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseStats parses a Stats from an input io.Reader, using the format +// found in /proc/fs/xfs/stat. +func ParseStats(r io.Reader) (*Stats, error) { + const ( + // Fields parsed into stats structures. + fieldExtentAlloc = "extent_alloc" + fieldAbt = "abt" + fieldBlkMap = "blk_map" + fieldBmbt = "bmbt" + fieldDir = "dir" + fieldTrans = "trans" + fieldIg = "ig" + fieldLog = "log" + fieldRw = "rw" + fieldAttr = "attr" + fieldIcluster = "icluster" + fieldVnodes = "vnodes" + fieldBuf = "buf" + fieldXpc = "xpc" + + // Unimplemented at this time due to lack of documentation. + fieldPushAil = "push_ail" + fieldXstrat = "xstrat" + fieldAbtb2 = "abtb2" + fieldAbtc2 = "abtc2" + fieldBmbt2 = "bmbt2" + fieldIbt2 = "ibt2" + fieldFibt2 = "fibt2" + fieldQm = "qm" + fieldDebug = "debug" + ) + + var xfss Stats + + s := bufio.NewScanner(r) + for s.Scan() { + // Expect at least a string label and a single integer value, ex: + // - abt 0 + // - rw 1 2 + ss := strings.Fields(string(s.Bytes())) + if len(ss) < 2 { + continue + } + label := ss[0] + + // Extended precision counters are uint64 values. + if label == fieldXpc { + us, err := util.ParseUint64s(ss[1:]) + if err != nil { + return nil, err + } + + xfss.ExtendedPrecision, err = extendedPrecisionStats(us) + if err != nil { + return nil, err + } + + continue + } + + // All other counters are uint32 values. + us, err := util.ParseUint32s(ss[1:]) + if err != nil { + return nil, err + } + + switch label { + case fieldExtentAlloc: + xfss.ExtentAllocation, err = extentAllocationStats(us) + case fieldAbt: + xfss.AllocationBTree, err = btreeStats(us) + case fieldBlkMap: + xfss.BlockMapping, err = blockMappingStats(us) + case fieldBmbt: + xfss.BlockMapBTree, err = btreeStats(us) + case fieldDir: + xfss.DirectoryOperation, err = directoryOperationStats(us) + case fieldTrans: + xfss.Transaction, err = transactionStats(us) + case fieldIg: + xfss.InodeOperation, err = inodeOperationStats(us) + case fieldLog: + xfss.LogOperation, err = logOperationStats(us) + case fieldRw: + xfss.ReadWrite, err = readWriteStats(us) + case fieldAttr: + xfss.AttributeOperation, err = attributeOperationStats(us) + case fieldIcluster: + xfss.InodeClustering, err = inodeClusteringStats(us) + case fieldVnodes: + xfss.Vnode, err = vnodeStats(us) + case fieldBuf: + xfss.Buffer, err = bufferStats(us) + } + if err != nil { + return nil, err + } + } + + return &xfss, s.Err() +} + +// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. +func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { + if l := len(us); l != 4 { + return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) + } + + return ExtentAllocationStats{ + ExtentsAllocated: us[0], + BlocksAllocated: us[1], + ExtentsFreed: us[2], + BlocksFreed: us[3], + }, nil +} + +// btreeStats builds a BTreeStats from a slice of uint32s. +func btreeStats(us []uint32) (BTreeStats, error) { + if l := len(us); l != 4 { + return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) + } + + return BTreeStats{ + Lookups: us[0], + Compares: us[1], + RecordsInserted: us[2], + RecordsDeleted: us[3], + }, nil +} + +// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. +func blockMappingStats(us []uint32) (BlockMappingStats, error) { + if l := len(us); l != 7 { + return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) + } + + return BlockMappingStats{ + Reads: us[0], + Writes: us[1], + Unmaps: us[2], + ExtentListInsertions: us[3], + ExtentListDeletions: us[4], + ExtentListLookups: us[5], + ExtentListCompares: us[6], + }, nil +} + +// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. +func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { + if l := len(us); l != 4 { + return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) + } + + return DirectoryOperationStats{ + Lookups: us[0], + Creates: us[1], + Removes: us[2], + Getdents: us[3], + }, nil +} + +// TransactionStats builds a TransactionStats from a slice of uint32s. +func transactionStats(us []uint32) (TransactionStats, error) { + if l := len(us); l != 3 { + return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) + } + + return TransactionStats{ + Sync: us[0], + Async: us[1], + Empty: us[2], + }, nil +} + +// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. +func inodeOperationStats(us []uint32) (InodeOperationStats, error) { + if l := len(us); l != 7 { + return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) + } + + return InodeOperationStats{ + Attempts: us[0], + Found: us[1], + Recycle: us[2], + Missed: us[3], + Duplicate: us[4], + Reclaims: us[5], + AttributeChange: us[6], + }, nil +} + +// LogOperationStats builds a LogOperationStats from a slice of uint32s. +func logOperationStats(us []uint32) (LogOperationStats, error) { + if l := len(us); l != 5 { + return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) + } + + return LogOperationStats{ + Writes: us[0], + Blocks: us[1], + NoInternalBuffers: us[2], + Force: us[3], + ForceSleep: us[4], + }, nil +} + +// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. +func readWriteStats(us []uint32) (ReadWriteStats, error) { + if l := len(us); l != 2 { + return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) + } + + return ReadWriteStats{ + Read: us[0], + Write: us[1], + }, nil +} + +// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. +func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { + if l := len(us); l != 4 { + return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) + } + + return AttributeOperationStats{ + Get: us[0], + Set: us[1], + Remove: us[2], + List: us[3], + }, nil +} + +// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. +func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { + if l := len(us); l != 3 { + return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) + } + + return InodeClusteringStats{ + Iflush: us[0], + Flush: us[1], + FlushInode: us[2], + }, nil +} + +// VnodeStats builds a VnodeStats from a slice of uint32s. +func vnodeStats(us []uint32) (VnodeStats, error) { + // The attribute "Free" appears to not be available on older XFS + // stats versions. Therefore, 7 or 8 elements may appear in + // this slice. + l := len(us) + if l != 7 && l != 8 { + return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) + } + + s := VnodeStats{ + Active: us[0], + Allocate: us[1], + Get: us[2], + Hold: us[3], + Release: us[4], + Reclaim: us[5], + Remove: us[6], + } + + // Skip adding free, unless it is present. The zero value will + // be used in place of an actual count. + if l == 7 { + return s, nil + } + + s.Free = us[7] + return s, nil +} + +// BufferStats builds a BufferStats from a slice of uint32s. +func bufferStats(us []uint32) (BufferStats, error) { + if l := len(us); l != 9 { + return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) + } + + return BufferStats{ + Get: us[0], + Create: us[1], + GetLocked: us[2], + GetLockedWaited: us[3], + BusyLocked: us[4], + MissLocked: us[5], + PageRetries: us[6], + PageFound: us[7], + GetRead: us[8], + }, nil +} + +// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. +func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { + if l := len(us); l != 3 { + return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) + } + + return ExtendedPrecisionStats{ + FlushBytes: us[0], + WriteBytes: us[1], + ReadBytes: us[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go new file mode 100644 index 000000000..d86794b7c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xfs provides access to statistics exposed by the XFS filesystem. +package xfs + +// Stats contains XFS filesystem runtime statistics, parsed from +// /proc/fs/xfs/stat. +// +// The names and meanings of each statistic were taken from +// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux +// kernel source. Most counters are uint32s (same data types used in +// xfs_stats.h), but some of the "extended precision stats" are uint64s. +type Stats struct { + // The name of the filesystem used to source these statistics. + // If empty, this indicates aggregated statistics for all XFS + // filesystems on the host. + Name string + + ExtentAllocation ExtentAllocationStats + AllocationBTree BTreeStats + BlockMapping BlockMappingStats + BlockMapBTree BTreeStats + DirectoryOperation DirectoryOperationStats + Transaction TransactionStats + InodeOperation InodeOperationStats + LogOperation LogOperationStats + ReadWrite ReadWriteStats + AttributeOperation AttributeOperationStats + InodeClustering InodeClusteringStats + Vnode VnodeStats + Buffer BufferStats + ExtendedPrecision ExtendedPrecisionStats +} + +// ExtentAllocationStats contains statistics regarding XFS extent allocations. +type ExtentAllocationStats struct { + ExtentsAllocated uint32 + BlocksAllocated uint32 + ExtentsFreed uint32 + BlocksFreed uint32 +} + +// BTreeStats contains statistics regarding an XFS internal B-tree. +type BTreeStats struct { + Lookups uint32 + Compares uint32 + RecordsInserted uint32 + RecordsDeleted uint32 +} + +// BlockMappingStats contains statistics regarding XFS block maps. +type BlockMappingStats struct { + Reads uint32 + Writes uint32 + Unmaps uint32 + ExtentListInsertions uint32 + ExtentListDeletions uint32 + ExtentListLookups uint32 + ExtentListCompares uint32 +} + +// DirectoryOperationStats contains statistics regarding XFS directory entries. +type DirectoryOperationStats struct { + Lookups uint32 + Creates uint32 + Removes uint32 + Getdents uint32 +} + +// TransactionStats contains statistics regarding XFS metadata transactions. +type TransactionStats struct { + Sync uint32 + Async uint32 + Empty uint32 +} + +// InodeOperationStats contains statistics regarding XFS inode operations. +type InodeOperationStats struct { + Attempts uint32 + Found uint32 + Recycle uint32 + Missed uint32 + Duplicate uint32 + Reclaims uint32 + AttributeChange uint32 +} + +// LogOperationStats contains statistics regarding the XFS log buffer. +type LogOperationStats struct { + Writes uint32 + Blocks uint32 + NoInternalBuffers uint32 + Force uint32 + ForceSleep uint32 +} + +// ReadWriteStats contains statistics regarding the number of read and write +// system calls for XFS filesystems. +type ReadWriteStats struct { + Read uint32 + Write uint32 +} + +// AttributeOperationStats contains statistics regarding manipulation of +// XFS extended file attributes. +type AttributeOperationStats struct { + Get uint32 + Set uint32 + Remove uint32 + List uint32 +} + +// InodeClusteringStats contains statistics regarding XFS inode clustering +// operations. +type InodeClusteringStats struct { + Iflush uint32 + Flush uint32 + FlushInode uint32 +} + +// VnodeStats contains statistics regarding XFS vnode operations. +type VnodeStats struct { + Active uint32 + Allocate uint32 + Get uint32 + Hold uint32 + Release uint32 + Reclaim uint32 + Remove uint32 + Free uint32 +} + +// BufferStats contains statistics regarding XFS read/write I/O buffers. +type BufferStats struct { + Get uint32 + Create uint32 + GetLocked uint32 + GetLockedWaited uint32 + BusyLocked uint32 + MissLocked uint32 + PageRetries uint32 + PageFound uint32 + GetRead uint32 +} + +// ExtendedPrecisionStats contains high precision counters used to track the +// total number of bytes read, written, or flushed, during XFS operations. +type ExtendedPrecisionStats struct { + FlushBytes uint64 + WriteBytes uint64 + ReadBytes uint64 +} diff --git a/vendor/github.com/stevvooe/ttrpc/LICENSE b/vendor/github.com/stevvooe/ttrpc/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/stevvooe/ttrpc/README.md b/vendor/github.com/stevvooe/ttrpc/README.md new file mode 100644 index 000000000..b246e578b --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/README.md @@ -0,0 +1,52 @@ +# ttrpc + +[![Build Status](https://travis-ci.org/stevvooe/ttrpc.svg?branch=master)](https://travis-ci.org/stevvooe/ttrpc) + +GRPC for low-memory environments. + +The existing grpc-go project requires a lot of memory overhead for importing +packages and at runtime. While this is great for many services with low density +requirements, this can be a problem when running a large number of services on +a single machine or on a machine with a small amount of memory. + +Using the same GRPC definitions, this project reduces the binary size and +protocol overhead required. We do this by eliding the `net/http`, `net/http2` +and `grpc` package used by grpc replacing it with a lightweight framing +protocol. The result are smaller binaries that use less resident memory with +the same ease of use as GRPC. + +Please note that while this project supports generating either end of the +protocol, the generated service definitions will be incompatible with regular +GRPC services, as they do not speak the same protocol. + +# Usage + +Create a gogo vanity binary (see +[`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an +example with the ttrpc plugin enabled. + +It's recommended to use [`protobuild`](https://github.com/stevvooe/protobuild) +to build the protobufs for this project, but this will work with protoc +directly, if required. + +# Differences from GRPC + +- The protocol stack has been replaced with a lighter protocol that doesn't + require http, http2 and tls. +- The client and server interface are identical whereas in GRPC there is a + client and server interface that are different. +- The Go stdlib context package is used instead. +- No support for streams yet. + +# Status + +Very new. YMMV. + +TODO: + +- [X] Plumb error codes and GRPC status +- [X] Remove use of any type and dependency on typeurl package +- [X] Ensure that protocol can support streaming in the future +- [ ] Document protocol layout +- [ ] Add testing under concurrent load to ensure +- [ ] Verify connection error handling diff --git a/vendor/github.com/stevvooe/ttrpc/channel.go b/vendor/github.com/stevvooe/ttrpc/channel.go new file mode 100644 index 000000000..9493d6862 --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/channel.go @@ -0,0 +1,138 @@ +package ttrpc + +import ( + "bufio" + "context" + "encoding/binary" + "io" + "net" + "sync" + + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + messageHeaderLength = 10 + messageLengthMax = 4 << 20 +) + +type messageType uint8 + +const ( + messageTypeRequest messageType = 0x1 + messageTypeResponse messageType = 0x2 +) + +// messageHeader represents the fixed-length message header of 10 bytes sent +// with every request. +type messageHeader struct { + Length uint32 // length excluding this header. b[:4] + StreamID uint32 // identifies which request stream message is a part of. b[4:8] + Type messageType // message type b[8] + Flags uint8 // reserved b[9] +} + +func readMessageHeader(p []byte, r io.Reader) (messageHeader, error) { + _, err := io.ReadFull(r, p[:messageHeaderLength]) + if err != nil { + return messageHeader{}, err + } + + return messageHeader{ + Length: binary.BigEndian.Uint32(p[:4]), + StreamID: binary.BigEndian.Uint32(p[4:8]), + Type: messageType(p[8]), + Flags: p[9], + }, nil +} + +func writeMessageHeader(w io.Writer, p []byte, mh messageHeader) error { + binary.BigEndian.PutUint32(p[:4], mh.Length) + binary.BigEndian.PutUint32(p[4:8], mh.StreamID) + p[8] = byte(mh.Type) + p[9] = mh.Flags + + _, err := w.Write(p[:]) + return err +} + +var buffers sync.Pool + +type channel struct { + conn net.Conn + bw *bufio.Writer + br *bufio.Reader + hrbuf [messageHeaderLength]byte // avoid alloc when reading header + hwbuf [messageHeaderLength]byte +} + +func newChannel(conn net.Conn) *channel { + return &channel{ + conn: conn, + bw: bufio.NewWriter(conn), + br: bufio.NewReader(conn), + } +} + +// recv a message from the channel. The returned buffer contains the message. +// +// If a valid grpc status is returned, the message header +// returned will be valid and caller should send that along to +// the correct consumer. The bytes on the underlying channel +// will be discarded. +func (ch *channel) recv(ctx context.Context) (messageHeader, []byte, error) { + mh, err := readMessageHeader(ch.hrbuf[:], ch.br) + if err != nil { + return messageHeader{}, nil, err + } + + if mh.Length > uint32(messageLengthMax) { + if _, err := ch.br.Discard(int(mh.Length)); err != nil { + return mh, nil, errors.Wrapf(err, "failed to discard after receiving oversized message") + } + + return mh, nil, status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", mh.Length, messageLengthMax) + } + + p := ch.getmbuf(int(mh.Length)) + if _, err := io.ReadFull(ch.br, p); err != nil { + return messageHeader{}, nil, errors.Wrapf(err, "failed reading message") + } + + return mh, p, nil +} + +func (ch *channel) send(ctx context.Context, streamID uint32, t messageType, p []byte) error { + if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil { + return err + } + + _, err := ch.bw.Write(p) + if err != nil { + return err + } + + return ch.bw.Flush() +} + +func (ch *channel) getmbuf(size int) []byte { + // we can't use the standard New method on pool because we want to allocate + // based on size. + b, ok := buffers.Get().(*[]byte) + if !ok || cap(*b) < size { + // TODO(stevvooe): It may be better to allocate these in fixed length + // buckets to reduce fragmentation but its not clear that would help + // with performance. An ilogb approach or similar would work well. + bb := make([]byte, size) + b = &bb + } else { + *b = (*b)[:size] + } + return *b +} + +func (ch *channel) putmbuf(p []byte) { + buffers.Put(&p) +} diff --git a/vendor/github.com/stevvooe/ttrpc/client.go b/vendor/github.com/stevvooe/ttrpc/client.go new file mode 100644 index 000000000..f04718167 --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/client.go @@ -0,0 +1,264 @@ +package ttrpc + +import ( + "context" + "io" + "net" + "os" + "strings" + "sync" + "syscall" + + "github.com/containerd/containerd/log" + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "google.golang.org/grpc/status" +) + +// ErrClosed is returned by client methods when the underlying connection is +// closed. +var ErrClosed = errors.New("ttrpc: closed") + +type Client struct { + codec codec + conn net.Conn + channel *channel + calls chan *callRequest + + closed chan struct{} + closeOnce sync.Once + closeFunc func() + done chan struct{} + err error +} + +func NewClient(conn net.Conn) *Client { + c := &Client{ + codec: codec{}, + conn: conn, + channel: newChannel(conn), + calls: make(chan *callRequest), + closed: make(chan struct{}), + done: make(chan struct{}), + closeFunc: func() {}, + } + + go c.run() + return c +} + +type callRequest struct { + ctx context.Context + req *Request + resp *Response // response will be written back here + errs chan error // error written here on completion +} + +func (c *Client) Call(ctx context.Context, service, method string, req, resp interface{}) error { + payload, err := c.codec.Marshal(req) + if err != nil { + return err + } + + var ( + creq = &Request{ + Service: service, + Method: method, + Payload: payload, + } + + cresp = &Response{} + ) + + if err := c.dispatch(ctx, creq, cresp); err != nil { + return err + } + + if err := c.codec.Unmarshal(cresp.Payload, resp); err != nil { + return err + } + + if cresp.Status == nil { + return errors.New("no status provided on response") + } + + return status.ErrorProto(cresp.Status) +} + +func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error { + errs := make(chan error, 1) + call := &callRequest{ + req: req, + resp: resp, + errs: errs, + } + + select { + case c.calls <- call: + case <-c.done: + return c.err + } + + select { + case err := <-errs: + return filterCloseErr(err) + case <-c.done: + return c.err + } +} + +func (c *Client) Close() error { + c.closeOnce.Do(func() { + close(c.closed) + }) + + return nil +} + +// OnClose allows a close func to be called when the server is closed +func (c *Client) OnClose(closer func()) { + c.closeFunc = closer +} + +type message struct { + messageHeader + p []byte + err error +} + +func (c *Client) run() { + var ( + streamID uint32 = 1 + waiters = make(map[uint32]*callRequest) + calls = c.calls + incoming = make(chan *message) + shutdown = make(chan struct{}) + shutdownErr error + ) + + go func() { + defer close(shutdown) + + // start one more goroutine to recv messages without blocking. + for { + mh, p, err := c.channel.recv(context.TODO()) + if err != nil { + _, ok := status.FromError(err) + if !ok { + // treat all errors that are not an rpc status as terminal. + // all others poison the connection. + shutdownErr = err + return + } + } + select { + case incoming <- &message{ + messageHeader: mh, + p: p[:mh.Length], + err: err, + }: + case <-c.done: + return + } + } + }() + + defer c.conn.Close() + defer close(c.done) + defer c.closeFunc() + + for { + select { + case call := <-calls: + if err := c.send(call.ctx, streamID, messageTypeRequest, call.req); err != nil { + call.errs <- err + continue + } + + waiters[streamID] = call + streamID += 2 // enforce odd client initiated request ids + case msg := <-incoming: + call, ok := waiters[msg.StreamID] + if !ok { + log.L.Errorf("ttrpc: received message for unknown channel %v", msg.StreamID) + continue + } + + call.errs <- c.recv(call.resp, msg) + delete(waiters, msg.StreamID) + case <-shutdown: + if shutdownErr != nil { + shutdownErr = filterCloseErr(shutdownErr) + } else { + shutdownErr = ErrClosed + } + + shutdownErr = errors.Wrapf(shutdownErr, "ttrpc: client shutting down") + + c.err = shutdownErr + for _, waiter := range waiters { + waiter.errs <- shutdownErr + } + c.Close() + return + case <-c.closed: + if c.err == nil { + c.err = ErrClosed + } + // broadcast the shutdown error to the remaining waiters. + for _, waiter := range waiters { + waiter.errs <- c.err + } + return + } + } +} + +func (c *Client) send(ctx context.Context, streamID uint32, mtype messageType, msg interface{}) error { + p, err := c.codec.Marshal(msg) + if err != nil { + return err + } + + return c.channel.send(ctx, streamID, mtype, p) +} + +func (c *Client) recv(resp *Response, msg *message) error { + if msg.err != nil { + return msg.err + } + + if msg.Type != messageTypeResponse { + return errors.New("unkown message type received") + } + + defer c.channel.putmbuf(msg.p) + return proto.Unmarshal(msg.p, resp) +} + +// filterCloseErr rewrites EOF and EPIPE errors to ErrClosed. Use when +// returning from call or handling errors from main read loop. +// +// This purposely ignores errors with a wrapped cause. +func filterCloseErr(err error) error { + if err == nil { + return nil + } + + if err == io.EOF { + return ErrClosed + } + + if strings.Contains(err.Error(), "use of closed network connection") { + return ErrClosed + } + + // if we have an epipe on a write, we cast to errclosed + if oerr, ok := err.(*net.OpError); ok && oerr.Op == "write" { + if serr, ok := oerr.Err.(*os.SyscallError); ok && serr.Err == syscall.EPIPE { + return ErrClosed + } + } + + return err +} diff --git a/vendor/github.com/stevvooe/ttrpc/codec.go b/vendor/github.com/stevvooe/ttrpc/codec.go new file mode 100644 index 000000000..7956a7222 --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/codec.go @@ -0,0 +1,26 @@ +package ttrpc + +import ( + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" +) + +type codec struct{} + +func (c codec) Marshal(msg interface{}) ([]byte, error) { + switch v := msg.(type) { + case proto.Message: + return proto.Marshal(v) + default: + return nil, errors.Errorf("ttrpc: cannot marshal unknown type: %T", msg) + } +} + +func (c codec) Unmarshal(p []byte, msg interface{}) error { + switch v := msg.(type) { + case proto.Message: + return proto.Unmarshal(p, v) + default: + return errors.Errorf("ttrpc: cannot unmarshal into unknown type: %T", msg) + } +} diff --git a/vendor/github.com/stevvooe/ttrpc/config.go b/vendor/github.com/stevvooe/ttrpc/config.go new file mode 100644 index 000000000..23bc603a3 --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/config.go @@ -0,0 +1,23 @@ +package ttrpc + +import "github.com/pkg/errors" + +type serverConfig struct { + handshaker Handshaker +} + +type ServerOpt func(*serverConfig) error + +// WithServerHandshaker can be passed to NewServer to ensure that the +// handshaker is called before every connection attempt. +// +// Only one handshaker is allowed per server. +func WithServerHandshaker(handshaker Handshaker) ServerOpt { + return func(c *serverConfig) error { + if c.handshaker != nil { + return errors.New("only one handshaker allowed per server") + } + c.handshaker = handshaker + return nil + } +} diff --git a/vendor/github.com/stevvooe/ttrpc/handshake.go b/vendor/github.com/stevvooe/ttrpc/handshake.go new file mode 100644 index 000000000..a08ae8ee4 --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/handshake.go @@ -0,0 +1,34 @@ +package ttrpc + +import ( + "context" + "net" +) + +// Handshaker defines the interface for connection handshakes performed on the +// server or client when first connecting. +type Handshaker interface { + // Handshake should confirm or decorate a connection that may be incoming + // to a server or outgoing from a client. + // + // If this returns without an error, the caller should use the connection + // in place of the original connection. + // + // The second return value can contain credential specific data, such as + // unix socket credentials or TLS information. + // + // While we currently only have implementations on the server-side, this + // interface should be sufficient to implement similar handshakes on the + // client-side. + Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) +} + +type handshakerFunc func(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) + +func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { + return fn(ctx, conn) +} + +func noopHandshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { + return conn, nil, nil +} diff --git a/vendor/github.com/stevvooe/ttrpc/server.go b/vendor/github.com/stevvooe/ttrpc/server.go new file mode 100644 index 000000000..fd29b719e --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/server.go @@ -0,0 +1,441 @@ +package ttrpc + +import ( + "context" + "io" + "math/rand" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/containerd/containerd/log" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + ErrServerClosed = errors.New("ttrpc: server closed") +) + +type Server struct { + config *serverConfig + services *serviceSet + codec codec + + mu sync.Mutex + listeners map[net.Listener]struct{} + connections map[*serverConn]struct{} // all connections to current state + done chan struct{} // marks point at which we stop serving requests +} + +func NewServer(opts ...ServerOpt) (*Server, error) { + config := &serverConfig{} + for _, opt := range opts { + if err := opt(config); err != nil { + return nil, err + } + } + + return &Server{ + config: config, + services: newServiceSet(), + done: make(chan struct{}), + listeners: make(map[net.Listener]struct{}), + connections: make(map[*serverConn]struct{}), + }, nil +} + +func (s *Server) Register(name string, methods map[string]Method) { + s.services.register(name, methods) +} + +func (s *Server) Serve(l net.Listener) error { + s.addListener(l) + defer s.closeListener(l) + + var ( + ctx = context.Background() + backoff time.Duration + handshaker = s.config.handshaker + ) + + if handshaker == nil { + handshaker = handshakerFunc(noopHandshake) + } + + for { + conn, err := l.Accept() + if err != nil { + select { + case <-s.done: + return ErrServerClosed + default: + } + + if terr, ok := err.(interface { + Temporary() bool + }); ok && terr.Temporary() { + if backoff == 0 { + backoff = time.Millisecond + } else { + backoff *= 2 + } + + if max := time.Second; backoff > max { + backoff = max + } + + sleep := time.Duration(rand.Int63n(int64(backoff))) + log.L.WithError(err).Errorf("ttrpc: failed accept; backoff %v", sleep) + time.Sleep(sleep) + continue + } + + return err + } + + backoff = 0 + + approved, handshake, err := handshaker.Handshake(ctx, conn) + if err != nil { + log.L.WithError(err).Errorf("ttrpc: refusing connection after handshake") + conn.Close() + continue + } + + sc := s.newConn(approved, handshake) + go sc.run(ctx) + } +} + +func (s *Server) Shutdown(ctx context.Context) error { + s.mu.Lock() + lnerr := s.closeListeners() + select { + case <-s.done: + default: + // protected by mutex + close(s.done) + } + s.mu.Unlock() + + ticker := time.NewTicker(200 * time.Millisecond) + defer ticker.Stop() + for { + if s.closeIdleConns() { + return lnerr + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +// Close the server without waiting for active connections. +func (s *Server) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + + select { + case <-s.done: + default: + // protected by mutex + close(s.done) + } + + err := s.closeListeners() + for c := range s.connections { + c.close() + delete(s.connections, c) + } + + return err +} + +func (s *Server) addListener(l net.Listener) { + s.mu.Lock() + defer s.mu.Unlock() + s.listeners[l] = struct{}{} +} + +func (s *Server) closeListener(l net.Listener) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.closeListenerLocked(l) +} + +func (s *Server) closeListenerLocked(l net.Listener) error { + defer delete(s.listeners, l) + return l.Close() +} + +func (s *Server) closeListeners() error { + var err error + for l := range s.listeners { + if cerr := s.closeListenerLocked(l); cerr != nil && err == nil { + err = cerr + } + } + return err +} + +func (s *Server) addConnection(c *serverConn) { + s.mu.Lock() + defer s.mu.Unlock() + + s.connections[c] = struct{}{} +} + +func (s *Server) closeIdleConns() bool { + s.mu.Lock() + defer s.mu.Unlock() + quiescent := true + for c := range s.connections { + st, ok := c.getState() + if !ok || st != connStateIdle { + quiescent = false + continue + } + c.close() + delete(s.connections, c) + } + return quiescent +} + +type connState int + +const ( + connStateActive = iota + 1 // outstanding requests + connStateIdle // no requests + connStateClosed // closed connection +) + +func (cs connState) String() string { + switch cs { + case connStateActive: + return "active" + case connStateIdle: + return "idle" + case connStateClosed: + return "closed" + default: + return "unknown" + } +} + +func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn { + c := &serverConn{ + server: s, + conn: conn, + handshake: handshake, + shutdown: make(chan struct{}), + } + c.setState(connStateIdle) + s.addConnection(c) + return c +} + +type serverConn struct { + server *Server + conn net.Conn + handshake interface{} // data from handshake, not used for now + state atomic.Value + + shutdownOnce sync.Once + shutdown chan struct{} // forced shutdown, used by close +} + +func (c *serverConn) getState() (connState, bool) { + cs, ok := c.state.Load().(connState) + return cs, ok +} + +func (c *serverConn) setState(newstate connState) { + c.state.Store(newstate) +} + +func (c *serverConn) close() error { + c.shutdownOnce.Do(func() { + close(c.shutdown) + }) + + return nil +} + +func (c *serverConn) run(sctx context.Context) { + type ( + request struct { + id uint32 + req *Request + } + + response struct { + id uint32 + resp *Response + } + ) + + var ( + ch = newChannel(c.conn) + ctx, cancel = context.WithCancel(sctx) + active int + state connState = connStateIdle + responses = make(chan response) + requests = make(chan request) + recvErr = make(chan error, 1) + shutdown = c.shutdown + done = make(chan struct{}) + ) + + defer c.conn.Close() + defer cancel() + defer close(done) + + go func(recvErr chan error) { + defer close(recvErr) + sendImmediate := func(id uint32, st *status.Status) bool { + select { + case responses <- response{ + // even though we've had an invalid stream id, we send it + // back on the same stream id so the client knows which + // stream id was bad. + id: id, + resp: &Response{ + Status: st.Proto(), + }, + }: + return true + case <-c.shutdown: + return false + case <-done: + return false + } + } + + for { + select { + case <-c.shutdown: + return + case <-done: + return + default: // proceed + } + + mh, p, err := ch.recv(ctx) + if err != nil { + status, ok := status.FromError(err) + if !ok { + recvErr <- err + return + } + + // in this case, we send an error for that particular message + // when the status is defined. + if !sendImmediate(mh.StreamID, status) { + return + } + + continue + } + + if mh.Type != messageTypeRequest { + // we must ignore this for future compat. + continue + } + + var req Request + if err := c.server.codec.Unmarshal(p, &req); err != nil { + ch.putmbuf(p) + if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) { + return + } + continue + } + ch.putmbuf(p) + + if mh.StreamID%2 != 1 { + // enforce odd client initiated identifiers. + if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) { + return + } + continue + } + + // Forward the request to the main loop. We don't wait on s.done + // because we have already accepted the client request. + select { + case requests <- request{ + id: mh.StreamID, + req: &req, + }: + case <-done: + return + } + } + }(recvErr) + + for { + newstate := state + switch { + case active > 0: + newstate = connStateActive + shutdown = nil + case active == 0: + newstate = connStateIdle + shutdown = c.shutdown // only enable this branch in idle mode + } + + if newstate != state { + c.setState(newstate) + state = newstate + } + + select { + case request := <-requests: + active++ + go func(id uint32) { + p, status := c.server.services.call(ctx, request.req.Service, request.req.Method, request.req.Payload) + resp := &Response{ + Status: status.Proto(), + Payload: p, + } + + select { + case responses <- response{ + id: id, + resp: resp, + }: + case <-done: + } + }(request.id) + case response := <-responses: + p, err := c.server.codec.Marshal(response.resp) + if err != nil { + log.L.WithError(err).Error("failed marshaling response") + return + } + + if err := ch.send(ctx, response.id, messageTypeResponse, p); err != nil { + log.L.WithError(err).Error("failed sending message on channel") + return + } + + active-- + case err := <-recvErr: + // TODO(stevvooe): Not wildly clear what we should do in this + // branch. Basically, it means that we are no longer receiving + // requests due to a terminal error. + recvErr = nil // connection is now "closing" + if err != nil && err != io.EOF { + log.L.WithError(err).Error("error receiving message") + } + case <-shutdown: + return + } + } +} diff --git a/vendor/github.com/stevvooe/ttrpc/services.go b/vendor/github.com/stevvooe/ttrpc/services.go new file mode 100644 index 000000000..b9a749e3d --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/services.go @@ -0,0 +1,134 @@ +package ttrpc + +import ( + "context" + "io" + "os" + "path" + + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type Method func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) + +type ServiceDesc struct { + Methods map[string]Method + + // TODO(stevvooe): Add stream support. +} + +type serviceSet struct { + services map[string]ServiceDesc +} + +func newServiceSet() *serviceSet { + return &serviceSet{ + services: make(map[string]ServiceDesc), + } +} + +func (s *serviceSet) register(name string, methods map[string]Method) { + if _, ok := s.services[name]; ok { + panic(errors.Errorf("duplicate service %v registered", name)) + } + + s.services[name] = ServiceDesc{ + Methods: methods, + } +} + +func (s *serviceSet) call(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, *status.Status) { + p, err := s.dispatch(ctx, serviceName, methodName, p) + st, ok := status.FromError(err) + if !ok { + st = status.New(convertCode(err), err.Error()) + } + + return p, st +} + +func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, error) { + method, err := s.resolve(serviceName, methodName) + if err != nil { + return nil, err + } + + unmarshal := func(obj interface{}) error { + switch v := obj.(type) { + case proto.Message: + if err := proto.Unmarshal(p, v); err != nil { + return status.Errorf(codes.Internal, "ttrpc: error unmarshaling payload: %v", err.Error()) + } + default: + return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v) + } + return nil + } + + resp, err := method(ctx, unmarshal) + if err != nil { + return nil, err + } + + switch v := resp.(type) { + case proto.Message: + r, err := proto.Marshal(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "ttrpc: error marshaling payload: %v", err.Error()) + } + + return r, nil + default: + return nil, status.Errorf(codes.Internal, "ttrpc: error unsupported response type: %T", v) + } +} + +func (s *serviceSet) resolve(service, method string) (Method, error) { + srv, ok := s.services[service] + if !ok { + return nil, status.Errorf(codes.NotFound, "service %v", service) + } + + mthd, ok := srv.Methods[method] + if !ok { + return nil, status.Errorf(codes.NotFound, "method %v", method) + } + + return mthd, nil +} + +// convertCode maps stdlib go errors into grpc space. +// +// This is ripped from the grpc-go code base. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled: + return codes.Canceled + case context.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} + +func fullPath(service, method string) string { + return "/" + path.Join("/", service, method) +} diff --git a/vendor/github.com/stevvooe/ttrpc/types.go b/vendor/github.com/stevvooe/ttrpc/types.go new file mode 100644 index 000000000..a522b0cf2 --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/types.go @@ -0,0 +1,26 @@ +package ttrpc + +import ( + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" +) + +type Request struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3"` + Method string `protobuf:"bytes,2,opt,name=method,proto3"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"` +} + +func (r *Request) Reset() { *r = Request{} } +func (r *Request) String() string { return fmt.Sprintf("%+#v", r) } +func (r *Request) ProtoMessage() {} + +type Response struct { + Status *spb.Status `protobuf:"bytes,1,opt,name=status,proto3"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3"` +} + +func (r *Response) Reset() { *r = Response{} } +func (r *Response) String() string { return fmt.Sprintf("%+#v", r) } +func (r *Response) ProtoMessage() {} diff --git a/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go b/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go new file mode 100644 index 000000000..812d927dc --- /dev/null +++ b/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go @@ -0,0 +1,92 @@ +package ttrpc + +import ( + "context" + "net" + "os" + "syscall" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +type UnixCredentialsFunc func(*unix.Ucred) error + +func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { + uc, err := requireUnixSocket(conn) + if err != nil { + return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: require unix socket") + } + + rs, err := uc.SyscallConn() + if err != nil { + return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: (net.UnixConn).SyscallConn failed") + } + var ( + ucred *unix.Ucred + ucredErr error + ) + if err := rs.Control(func(fd uintptr) { + ucred, ucredErr = unix.GetsockoptUcred(int(fd), unix.SOL_SOCKET, unix.SO_PEERCRED) + }); err != nil { + return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: (*syscall.RawConn).Control failed") + } + + if ucredErr != nil { + return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials") + } + + if err := fn(ucred); err != nil { + return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: credential check failed") + } + + return uc, ucred, nil +} + +// UnixSocketRequireUidGid requires specific *effective* UID/GID, rather than the real UID/GID. +// +// For example, if a daemon binary is owned by the root (UID 0) with SUID bit but running as an +// unprivileged user (UID 1001), the effective UID becomes 0, and the real UID becomes 1001. +// So calling this function with uid=0 allows a connection from effective UID 0 but rejects +// a connection from effective UID 1001. +// +// See socket(7), SO_PEERCRED: "The returned credentials are those that were in effect at the time of the call to connect(2) or socketpair(2)." +func UnixSocketRequireUidGid(uid, gid int) UnixCredentialsFunc { + return func(ucred *unix.Ucred) error { + return requireUidGid(ucred, uid, gid) + } +} + +func UnixSocketRequireRoot() UnixCredentialsFunc { + return UnixSocketRequireUidGid(0, 0) +} + +// UnixSocketRequireSameUser resolves the current effective unix user and returns a +// UnixCredentialsFunc that will validate incoming unix connections against the +// current credentials. +// +// This is useful when using abstract sockets that are accessible by all users. +func UnixSocketRequireSameUser() UnixCredentialsFunc { + euid, egid := os.Geteuid(), os.Getegid() + return UnixSocketRequireUidGid(euid, egid) +} + +func requireRoot(ucred *unix.Ucred) error { + return requireUidGid(ucred, 0, 0) +} + +func requireUidGid(ucred *unix.Ucred, uid, gid int) error { + if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) { + return errors.Wrap(syscall.EPERM, "ttrpc: invalid credentials") + } + return nil +} + +func requireUnixSocket(conn net.Conn) (*net.UnixConn, error) { + uc, ok := conn.(*net.UnixConn) + if !ok { + return nil, errors.New("a unix socket connection is required") + } + + return uc, nil +} diff --git a/vendor/github.com/urfave/cli/LICENSE b/vendor/github.com/urfave/cli/LICENSE new file mode 100644 index 000000000..42a597e29 --- /dev/null +++ b/vendor/github.com/urfave/cli/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Jeremy Saenz & Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/urfave/cli/README.md b/vendor/github.com/urfave/cli/README.md new file mode 100644 index 000000000..34055fe74 --- /dev/null +++ b/vendor/github.com/urfave/cli/README.md @@ -0,0 +1,1391 @@ +cli +=== + +[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli) +[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/urfave/cli) +[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) +[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) +[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) +[![top level coverage](https://gocover.io/_badge/github.com/urfave/cli?0 "top level coverage")](http://gocover.io/github.com/urfave/cli) / +[![altsrc coverage](https://gocover.io/_badge/github.com/urfave/cli/altsrc?0 "altsrc coverage")](http://gocover.io/github.com/urfave/cli/altsrc) + +**Notice:** This is the library formerly known as +`github.com/codegangsta/cli` -- Github will automatically redirect requests +to this repository, but we recommend updating your references for clarity. + +cli is a simple, fast, and fun package for building command line apps in Go. The +goal is to enable developers to write fast and distributable command line +applications in an expressive way. + + + +- [Overview](#overview) +- [Installation](#installation) + * [Supported platforms](#supported-platforms) + * [Using the `v2` branch](#using-the-v2-branch) + * [Pinning to the `v1` releases](#pinning-to-the-v1-releases) +- [Getting Started](#getting-started) +- [Examples](#examples) + * [Arguments](#arguments) + * [Flags](#flags) + + [Placeholder Values](#placeholder-values) + + [Alternate Names](#alternate-names) + + [Ordering](#ordering) + + [Values from the Environment](#values-from-the-environment) + + [Values from alternate input sources (YAML, TOML, and others)](#values-from-alternate-input-sources-yaml-toml-and-others) + + [Precedence](#precedence) + * [Subcommands](#subcommands) + * [Subcommands categories](#subcommands-categories) + * [Exit code](#exit-code) + * [Bash Completion](#bash-completion) + + [Enabling](#enabling) + + [Distribution](#distribution) + + [Customization](#customization) + * [Generated Help Text](#generated-help-text) + + [Customization](#customization-1) + * [Version Flag](#version-flag) + + [Customization](#customization-2) + + [Full API Example](#full-api-example) +- [Contribution Guidelines](#contribution-guidelines) + + + +## Overview + +Command line apps are usually so tiny that there is absolutely no reason why +your code should *not* be self-documenting. Things like generating help text and +parsing command flags/options should not hinder productivity when writing a +command line app. + +**This is where cli comes into play.** cli makes command line programming fun, +organized, and expressive! + +## Installation + +Make sure you have a working Go environment. Go version 1.2+ is supported. [See +the install instructions for Go](http://golang.org/doc/install.html). + +To install cli, simply run: +``` +$ go get github.com/urfave/cli +``` + +Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can +be easily used: +``` +export PATH=$PATH:$GOPATH/bin +``` + +### Supported platforms + +cli is tested against multiple versions of Go on Linux, and against the latest +released version of Go on OS X and Windows. For full details, see +[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml). + +### Using the `v2` branch + +**Warning**: The `v2` branch is currently unreleased and considered unstable. + +There is currently a long-lived branch named `v2` that is intended to land as +the new `master` branch once development there has settled down. The current +`master` branch (mirrored as `v1`) is being manually merged into `v2` on +an irregular human-based schedule, but generally if one wants to "upgrade" to +`v2` *now* and accept the volatility (read: "awesomeness") that comes along with +that, please use whatever version pinning of your preference, such as via +`gopkg.in`: + +``` +$ go get gopkg.in/urfave/cli.v2 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v2" // imports as package "cli" +) +... +``` + +### Pinning to the `v1` releases + +Similarly to the section above describing use of the `v2` branch, if one wants +to avoid any unexpected compatibility pains once `v2` becomes `master`, then +pinning to `v1` is an acceptable option, e.g.: + +``` +$ go get gopkg.in/urfave/cli.v1 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v1" // imports as package "cli" +) +... +``` + +This will pull the latest tagged `v1` release (e.g. `v1.18.1` at the time of writing). + +## Getting Started + +One of the philosophies behind cli is that an API should be playful and full of +discovery. So a cli app can be as little as one line of code in `main()`. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.NewApp().Run(os.Args) +} +``` + +This app will run and show help text, but is not very useful. Let's give an +action to execute and some help documentation: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "boom" + app.Usage = "make an explosive entrance" + app.Action = func(c *cli.Context) error { + fmt.Println("boom! I say!") + return nil + } + + app.Run(os.Args) +} +``` + +Running this already gives you a ton of functionality, plus support for things +like subcommands and flags, which are covered below. + +## Examples + +Being a programmer can be a lonely job. Thankfully by the power of automation +that is not the case! Let's create a greeter app to fend off our demons of +loneliness! + +Start by creating a directory named `greet`, and within it, add a file, +`greet.go` with the following code in it: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "greet" + app.Usage = "fight the loneliness!" + app.Action = func(c *cli.Context) error { + fmt.Println("Hello friend!") + return nil + } + + app.Run(os.Args) +} +``` + +Install our command to the `$GOPATH/bin` directory: + +``` +$ go install +``` + +Finally run our new command: + +``` +$ greet +Hello friend! +``` + +cli also generates neat help text: + +``` +$ greet help +NAME: + greet - fight the loneliness! + +USAGE: + greet [global options] command [command options] [arguments...] + +VERSION: + 0.0.0 + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS + --version Shows version information +``` + +### Arguments + +You can lookup arguments by calling the `Args` function on `cli.Context`, e.g.: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Action = func(c *cli.Context) error { + fmt.Printf("Hello %q", c.Args().Get(0)) + return nil + } + + app.Run(os.Args) +} +``` + +### Flags + +Setting and querying flags is simple. + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + }, + } + + app.Action = func(c *cli.Context) error { + name := "Nefertiti" + if c.NArg() > 0 { + name = c.Args().Get(0) + } + if c.String("lang") == "spanish" { + fmt.Println("Hola", name) + } else { + fmt.Println("Hello", name) + } + return nil + } + + app.Run(os.Args) +} +``` + +You can also set a destination variable for a flag, to which the content will be +scanned. + + +``` go +package main + +import ( + "os" + "fmt" + + "github.com/urfave/cli" +) + +func main() { + var language string + + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + Destination: &language, + }, + } + + app.Action = func(c *cli.Context) error { + name := "someone" + if c.NArg() > 0 { + name = c.Args()[0] + } + if language == "spanish" { + fmt.Println("Hola", name) + } else { + fmt.Println("Hello", name) + } + return nil + } + + app.Run(os.Args) +} +``` + +See full list of flags at http://godoc.org/github.com/urfave/cli + +#### Placeholder Values + +Sometimes it's useful to specify a flag's value within the usage string itself. +Such placeholders are indicated with back quotes. + +For example this: + + +```go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "config, c", + Usage: "Load configuration from `FILE`", + }, + } + + app.Run(os.Args) +} +``` + +Will result in help output like: + +``` +--config FILE, -c FILE Load configuration from FILE +``` + +Note that only the first placeholder is used. Subsequent back-quoted words will +be left as-is. + +#### Alternate Names + +You can set alternate (or short) names for flags by providing a comma-delimited +list for the `Name`. e.g. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + }, + } + + app.Run(os.Args) +} +``` + +That flag can then be set with `--lang spanish` or `-l spanish`. Note that +giving two different forms of the same flag in the same command invocation is an +error. + +#### Ordering + +Flags for the application and commands are shown in the order they are defined. +However, it's possible to sort them from outside this library by using `FlagsByName` +or `CommandsByName` with `sort`. + +For example this: + + +``` go +package main + +import ( + "os" + "sort" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "Language for the greeting", + }, + cli.StringFlag{ + Name: "config, c", + Usage: "Load configuration from `FILE`", + }, + } + + app.Commands = []cli.Command{ + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) error { + return nil + }, + }, + { + Name: "add", + Aliases: []string{"a"}, + Usage: "add a task to the list", + Action: func(c *cli.Context) error { + return nil + }, + }, + } + + sort.Sort(cli.FlagsByName(app.Flags)) + sort.Sort(cli.CommandsByName(app.Commands)) + + app.Run(os.Args) +} +``` + +Will result in help output like: + +``` +--config FILE, -c FILE Load configuration from FILE +--lang value, -l value Language for the greeting (default: "english") +``` + +#### Values from the Environment + +You can also have the default value set from the environment via `EnvVar`. e.g. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + EnvVar: "APP_LANG", + }, + } + + app.Run(os.Args) +} +``` + +The `EnvVar` may also be given as a comma-delimited "cascade", where the first +environment variable that resolves is used as the default. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", + }, + } + + app.Run(os.Args) +} +``` + +#### Values from alternate input sources (YAML, TOML, and others) + +There is a separate package altsrc that adds support for getting flag values +from other file input sources. + +Currently supported input source formats: +* YAML +* TOML + +In order to get values for a flag from an alternate input source the following +code would be added to wrap an existing cli.Flag like below: + +``` go + altsrc.NewIntFlag(cli.IntFlag{Name: "test"}) +``` + +Initialization must also occur for these flags. Below is an example initializing +getting data from a yaml file below. + +``` go + command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) +``` + +The code above will use the "load" string as a flag name to get the file name of +a yaml file from the cli.Context. It will then use that file name to initialize +the yaml input source for any flags that are defined on that command. As a note +the "load" flag used would also have to be defined on the command flags in order +for this code snipped to work. + +Currently only the aboved specified formats are supported but developers can +add support for other input sources by implementing the +altsrc.InputSourceContext for their given sources. + +Here is a more complete sample of a command using YAML support: + + +``` go +package notmain + +import ( + "fmt" + "os" + + "github.com/urfave/cli" + "github.com/urfave/cli/altsrc" +) + +func main() { + app := cli.NewApp() + + flags := []cli.Flag{ + altsrc.NewIntFlag(cli.IntFlag{Name: "test"}), + cli.StringFlag{Name: "load"}, + } + + app.Action = func(c *cli.Context) error { + fmt.Println("yaml ist rad") + return nil + } + + app.Before = altsrc.InitInputSourceWithContext(flags, altsrc.NewYamlSourceFromFlagFunc("load")) + app.Flags = flags + + app.Run(os.Args) +} +``` + +#### Precedence + +The precedence for flag value sources is as follows (highest to lowest): + +0. Command line flag value from user +0. Environment variable (if specified) +0. Configuration file (if specified) +0. Default defined on the flag + +### Subcommands + +Subcommands can be defined for a more git-like command line app. + + +```go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Commands = []cli.Command{ + { + Name: "add", + Aliases: []string{"a"}, + Usage: "add a task to the list", + Action: func(c *cli.Context) error { + fmt.Println("added task: ", c.Args().First()) + return nil + }, + }, + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil + }, + }, + { + Name: "template", + Aliases: []string{"t"}, + Usage: "options for task templates", + Subcommands: []cli.Command{ + { + Name: "add", + Usage: "add a new template", + Action: func(c *cli.Context) error { + fmt.Println("new task template: ", c.Args().First()) + return nil + }, + }, + { + Name: "remove", + Usage: "remove an existing template", + Action: func(c *cli.Context) error { + fmt.Println("removed task template: ", c.Args().First()) + return nil + }, + }, + }, + }, + } + + app.Run(os.Args) +} +``` + +### Subcommands categories + +For additional organization in apps that have many subcommands, you can +associate a category for each command to group them together in the help +output. + +E.g. + +```go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Commands = []cli.Command{ + { + Name: "noop", + }, + { + Name: "add", + Category: "Template actions", + }, + { + Name: "remove", + Category: "Template actions", + }, + } + + app.Run(os.Args) +} +``` + +Will include: + +``` +COMMANDS: + noop + + Template actions: + add + remove +``` + +### Exit code + +Calling `App.Run` will not automatically call `os.Exit`, which means that by +default the exit code will "fall through" to being `0`. An explicit exit code +may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a +`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.: + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Flags = []cli.Flag{ + cli.BoolTFlag{ + Name: "ginger-crouton", + Usage: "is it in the soup?", + }, + } + app.Action = func(ctx *cli.Context) error { + if !ctx.Bool("ginger-crouton") { + return cli.NewExitError("it is not in the soup", 86) + } + return nil + } + + app.Run(os.Args) +} +``` + +### Bash Completion + +You can enable completion commands by setting the `EnableBashCompletion` +flag on the `App` object. By default, this setting will only auto-complete to +show an app's subcommands, but you can write your own completion methods for +the App or its subcommands. + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + tasks := []string{"cook", "clean", "laundry", "eat", "sleep", "code"} + + app := cli.NewApp() + app.EnableBashCompletion = true + app.Commands = []cli.Command{ + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil + }, + BashComplete: func(c *cli.Context) { + // This will complete if no args are passed + if c.NArg() > 0 { + return + } + for _, t := range tasks { + fmt.Println(t) + } + }, + }, + } + + app.Run(os.Args) +} +``` + +#### Enabling + +Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while +setting the `PROG` variable to the name of your program: + +`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` + +#### Distribution + +Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename +it to the name of the program you wish to add autocomplete support for (or +automatically install it there if you are distributing a package). Don't forget +to source the file to make it active in the current shell. + +``` +sudo cp src/bash_autocomplete /etc/bash_completion.d/ +source /etc/bash_completion.d/ +``` + +Alternatively, you can just document that users should source the generic +`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set +to the name of their program (as above). + +#### Customization + +The default bash completion flag (`--generate-bash-completion`) is defined as +`cli.BashCompletionFlag`, and may be redefined if desired, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.BashCompletionFlag = cli.BoolFlag{ + Name: "compgen", + Hidden: true, + } + + app := cli.NewApp() + app.EnableBashCompletion = true + app.Commands = []cli.Command{ + { + Name: "wat", + }, + } + app.Run(os.Args) +} +``` + +### Generated Help Text + +The default help flag (`-h/--help`) is defined as `cli.HelpFlag` and is checked +by the cli internals in order to print generated help text for the app, command, +or subcommand, and break execution. + +#### Customization + +All of the help text generation may be customized, and at multiple levels. The +templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and +`SubcommandHelpTemplate` which may be reassigned or augmented, and full override +is possible by assigning a compatible func to the `cli.HelpPrinter` variable, +e.g.: + + +``` go +package main + +import ( + "fmt" + "io" + "os" + + "github.com/urfave/cli" +) + +func main() { + // EXAMPLE: Append to an existing template + cli.AppHelpTemplate = fmt.Sprintf(`%s + +WEBSITE: http://awesometown.example.com + +SUPPORT: support@awesometown.example.com + +`, cli.AppHelpTemplate) + + // EXAMPLE: Override a template + cli.AppHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + {{if len .Authors}} +AUTHOR: + {{range .Authors}}{{ . }}{{end}} + {{end}}{{if .Commands}} +COMMANDS: +{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t"}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}} +GLOBAL OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}}{{if .Copyright }} +COPYRIGHT: + {{.Copyright}} + {{end}}{{if .Version}} +VERSION: + {{.Version}} + {{end}} +` + + // EXAMPLE: Replace the `HelpPrinter` func + cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { + fmt.Println("Ha HA. I pwnd the help!!1") + } + + cli.NewApp().Run(os.Args) +} +``` + +The default flag may be customized to something other than `-h/--help` by +setting `cli.HelpFlag`, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.HelpFlag = cli.BoolFlag{ + Name: "halp, haaaaalp", + Usage: "HALP", + EnvVar: "SHOW_HALP,HALPPLZ", + } + + cli.NewApp().Run(os.Args) +} +``` + +### Version Flag + +The default version flag (`-v/--version`) is defined as `cli.VersionFlag`, which +is checked by the cli internals in order to print the `App.Version` via +`cli.VersionPrinter` and break execution. + +#### Customization + +The default flag may be customized to something other than `-v/--version` by +setting `cli.VersionFlag`, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.VersionFlag = cli.BoolFlag{ + Name: "print-version, V", + Usage: "print only the version", + } + + app := cli.NewApp() + app.Name = "partay" + app.Version = "19.99.0" + app.Run(os.Args) +} +``` + +Alternatively, the version printer at `cli.VersionPrinter` may be overridden, e.g.: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +var ( + Revision = "fafafaf" +) + +func main() { + cli.VersionPrinter = func(c *cli.Context) { + fmt.Printf("version=%s revision=%s\n", c.App.Version, Revision) + } + + app := cli.NewApp() + app.Name = "partay" + app.Version = "19.99.0" + app.Run(os.Args) +} +``` + +#### Full API Example + +**Notice**: This is a contrived (functioning) example meant strictly for API +demonstration purposes. Use of one's imagination is encouraged. + + +``` go +package main + +import ( + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/urfave/cli" +) + +func init() { + cli.AppHelpTemplate += "\nCUSTOMIZED: you bet ur muffins\n" + cli.CommandHelpTemplate += "\nYMMV\n" + cli.SubcommandHelpTemplate += "\nor something\n" + + cli.HelpFlag = cli.BoolFlag{Name: "halp"} + cli.BashCompletionFlag = cli.BoolFlag{Name: "compgen", Hidden: true} + cli.VersionFlag = cli.BoolFlag{Name: "print-version, V"} + + cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { + fmt.Fprintf(w, "best of luck to you\n") + } + cli.VersionPrinter = func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "version=%s\n", c.App.Version) + } + cli.OsExiter = func(c int) { + fmt.Fprintf(cli.ErrWriter, "refusing to exit %d\n", c) + } + cli.ErrWriter = ioutil.Discard + cli.FlagStringer = func(fl cli.Flag) string { + return fmt.Sprintf("\t\t%s", fl.GetName()) + } +} + +type hexWriter struct{} + +func (w *hexWriter) Write(p []byte) (int, error) { + for _, b := range p { + fmt.Printf("%x", b) + } + fmt.Printf("\n") + + return len(p), nil +} + +type genericType struct{ + s string +} + +func (g *genericType) Set(value string) error { + g.s = value + return nil +} + +func (g *genericType) String() string { + return g.s +} + +func main() { + app := cli.NewApp() + app.Name = "kənˈtrīv" + app.Version = "19.99.0" + app.Compiled = time.Now() + app.Authors = []cli.Author{ + cli.Author{ + Name: "Example Human", + Email: "human@example.com", + }, + } + app.Copyright = "(c) 1999 Serious Enterprise" + app.HelpName = "contrive" + app.Usage = "demonstrate available API" + app.UsageText = "contrive - demonstrating the available API" + app.ArgsUsage = "[args and such]" + app.Commands = []cli.Command{ + cli.Command{ + Name: "doo", + Aliases: []string{"do"}, + Category: "motion", + Usage: "do the doo", + UsageText: "doo - does the dooing", + Description: "no really, there is a lot of dooing to be done", + ArgsUsage: "[arrgh]", + Flags: []cli.Flag{ + cli.BoolFlag{Name: "forever, forevvarr"}, + }, + Subcommands: cli.Commands{ + cli.Command{ + Name: "wop", + Action: wopAction, + }, + }, + SkipFlagParsing: false, + HideHelp: false, + Hidden: false, + HelpName: "doo!", + BashComplete: func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "--better\n") + }, + Before: func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "brace for impact\n") + return nil + }, + After: func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "did we lose anyone?\n") + return nil + }, + Action: func(c *cli.Context) error { + c.Command.FullName() + c.Command.HasName("wop") + c.Command.Names() + c.Command.VisibleFlags() + fmt.Fprintf(c.App.Writer, "dodododododoodododddooooododododooo\n") + if c.Bool("forever") { + c.Command.Run(c) + } + return nil + }, + OnUsageError: func(c *cli.Context, err error, isSubcommand bool) error { + fmt.Fprintf(c.App.Writer, "for shame\n") + return err + }, + }, + } + app.Flags = []cli.Flag{ + cli.BoolFlag{Name: "fancy"}, + cli.BoolTFlag{Name: "fancier"}, + cli.DurationFlag{Name: "howlong, H", Value: time.Second * 3}, + cli.Float64Flag{Name: "howmuch"}, + cli.GenericFlag{Name: "wat", Value: &genericType{}}, + cli.Int64Flag{Name: "longdistance"}, + cli.Int64SliceFlag{Name: "intervals"}, + cli.IntFlag{Name: "distance"}, + cli.IntSliceFlag{Name: "times"}, + cli.StringFlag{Name: "dance-move, d"}, + cli.StringSliceFlag{Name: "names, N"}, + cli.UintFlag{Name: "age"}, + cli.Uint64Flag{Name: "bigage"}, + } + app.EnableBashCompletion = true + app.HideHelp = false + app.HideVersion = false + app.BashComplete = func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "lipstick\nkiss\nme\nlipstick\nringo\n") + } + app.Before = func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "HEEEERE GOES\n") + return nil + } + app.After = func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "Phew!\n") + return nil + } + app.CommandNotFound = func(c *cli.Context, command string) { + fmt.Fprintf(c.App.Writer, "Thar be no %q here.\n", command) + } + app.OnUsageError = func(c *cli.Context, err error, isSubcommand bool) error { + if isSubcommand { + return err + } + + fmt.Fprintf(c.App.Writer, "WRONG: %#v\n", err) + return nil + } + app.Action = func(c *cli.Context) error { + cli.DefaultAppComplete(c) + cli.HandleExitCoder(errors.New("not an exit coder, though")) + cli.ShowAppHelp(c) + cli.ShowCommandCompletions(c, "nope") + cli.ShowCommandHelp(c, "also-nope") + cli.ShowCompletions(c) + cli.ShowSubcommandHelp(c) + cli.ShowVersion(c) + + categories := c.App.Categories() + categories.AddCommand("sounds", cli.Command{ + Name: "bloop", + }) + + for _, category := range c.App.Categories() { + fmt.Fprintf(c.App.Writer, "%s\n", category.Name) + fmt.Fprintf(c.App.Writer, "%#v\n", category.Commands) + fmt.Fprintf(c.App.Writer, "%#v\n", category.VisibleCommands()) + } + + fmt.Printf("%#v\n", c.App.Command("doo")) + if c.Bool("infinite") { + c.App.Run([]string{"app", "doo", "wop"}) + } + + if c.Bool("forevar") { + c.App.RunAsSubcommand(c) + } + c.App.Setup() + fmt.Printf("%#v\n", c.App.VisibleCategories()) + fmt.Printf("%#v\n", c.App.VisibleCommands()) + fmt.Printf("%#v\n", c.App.VisibleFlags()) + + fmt.Printf("%#v\n", c.Args().First()) + if len(c.Args()) > 0 { + fmt.Printf("%#v\n", c.Args()[1]) + } + fmt.Printf("%#v\n", c.Args().Present()) + fmt.Printf("%#v\n", c.Args().Tail()) + + set := flag.NewFlagSet("contrive", 0) + nc := cli.NewContext(c.App, set, c) + + fmt.Printf("%#v\n", nc.Args()) + fmt.Printf("%#v\n", nc.Bool("nope")) + fmt.Printf("%#v\n", nc.BoolT("nerp")) + fmt.Printf("%#v\n", nc.Duration("howlong")) + fmt.Printf("%#v\n", nc.Float64("hay")) + fmt.Printf("%#v\n", nc.Generic("bloop")) + fmt.Printf("%#v\n", nc.Int64("bonk")) + fmt.Printf("%#v\n", nc.Int64Slice("burnks")) + fmt.Printf("%#v\n", nc.Int("bips")) + fmt.Printf("%#v\n", nc.IntSlice("blups")) + fmt.Printf("%#v\n", nc.String("snurt")) + fmt.Printf("%#v\n", nc.StringSlice("snurkles")) + fmt.Printf("%#v\n", nc.Uint("flub")) + fmt.Printf("%#v\n", nc.Uint64("florb")) + fmt.Printf("%#v\n", nc.GlobalBool("global-nope")) + fmt.Printf("%#v\n", nc.GlobalBoolT("global-nerp")) + fmt.Printf("%#v\n", nc.GlobalDuration("global-howlong")) + fmt.Printf("%#v\n", nc.GlobalFloat64("global-hay")) + fmt.Printf("%#v\n", nc.GlobalGeneric("global-bloop")) + fmt.Printf("%#v\n", nc.GlobalInt("global-bips")) + fmt.Printf("%#v\n", nc.GlobalIntSlice("global-blups")) + fmt.Printf("%#v\n", nc.GlobalString("global-snurt")) + fmt.Printf("%#v\n", nc.GlobalStringSlice("global-snurkles")) + + fmt.Printf("%#v\n", nc.FlagNames()) + fmt.Printf("%#v\n", nc.GlobalFlagNames()) + fmt.Printf("%#v\n", nc.GlobalIsSet("wat")) + fmt.Printf("%#v\n", nc.GlobalSet("wat", "nope")) + fmt.Printf("%#v\n", nc.NArg()) + fmt.Printf("%#v\n", nc.NumFlags()) + fmt.Printf("%#v\n", nc.Parent()) + + nc.Set("wat", "also-nope") + + ec := cli.NewExitError("ohwell", 86) + fmt.Fprintf(c.App.Writer, "%d", ec.ExitCode()) + fmt.Printf("made it!\n") + return ec + } + + if os.Getenv("HEXY") != "" { + app.Writer = &hexWriter{} + app.ErrWriter = &hexWriter{} + } + + app.Metadata = map[string]interface{}{ + "layers": "many", + "explicable": false, + "whatever-values": 19.99, + } + + app.Run(os.Args) +} + +func wopAction(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, ":wave: over here, eh\n") + return nil +} +``` + +## Contribution Guidelines + +Feel free to put up a pull request to fix a bug or maybe add a feature. I will +give it a code review and make sure that it does not break backwards +compatibility. If I or any other collaborators agree that it is in line with +the vision of the project, we will work with you to get the code into +a mergeable state and merge it into the master branch. + +If you have contributed something significant to the project, we will most +likely add you as a collaborator. As a collaborator you are given the ability +to merge others pull requests. It is very important that new code does not +break existing code, so be careful about what code you do choose to merge. + +If you feel like you have contributed to the project but have not yet been +added as a collaborator, we probably forgot to add you, please open an issue. diff --git a/vendor/github.com/urfave/cli/app.go b/vendor/github.com/urfave/cli/app.go new file mode 100644 index 000000000..60599b046 --- /dev/null +++ b/vendor/github.com/urfave/cli/app.go @@ -0,0 +1,509 @@ +package cli + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "time" +) + +var ( + changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) + + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + + errInvalidActionType = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) +) + +// App is the main structure of a cli application. It is recommended that +// an app be created with the cli.NewApp() function +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Text to override the USAGE section of help + UsageText string + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // Description of the program + Description string + // List of commands to execute + Commands []Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command + HideHelp bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + // Populate on app startup, only gettable through method Categories() + categories CommandCategories + // An action to execute when the bash-completion flag is set + BashComplete BashCompleteFunc + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + + // The action to execute when no subcommands are specified + // Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}` + // *Note*: support for the deprecated `Action` signature will be removed in a future version + Action interface{} + + // Execute this function if the proper command cannot be found + CommandNotFound CommandNotFoundFunc + // Execute this function if an usage error occurs + OnUsageError OnUsageErrorFunc + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []Author + // Copyright of the binary if any + Copyright string + // Name of Author (Note: Use App.Authors, this is deprecated) + Author string + // Email of Author (Note: Use App.Authors, this is deprecated) + Email string + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // Execute this function to handle ExitErrors. If not provided, HandleExitCoder is provided to + // function as a default, so this is optional. + ExitErrHandler ExitErrHandlerFunc + // Other custom info + Metadata map[string]interface{} + // Carries a function which returns app specific info. + ExtraInfo func() map[string]string + // CustomAppHelpTemplate the text template for app help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomAppHelpTemplate string + + didSetup bool +} + +// Tries to find out when this binary was compiled. +// Returns the current time if it fails to find it. +func compileTime() time.Time { + info, err := os.Stat(os.Args[0]) + if err != nil { + return time.Now() + } + return info.ModTime() +} + +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. +func NewApp() *App { + return &App{ + Name: filepath.Base(os.Args[0]), + HelpName: filepath.Base(os.Args[0]), + Usage: "A new cli application", + UsageText: "", + Version: "0.0.0", + BashComplete: DefaultAppComplete, + Action: helpCommand.Action, + Compiled: compileTime(), + Writer: os.Stdout, + } +} + +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + + if a.Author != "" || a.Email != "" { + a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + + if !a.HideVersion { + a.appendFlag(VersionFlag) + } + + a.categories = CommandCategories{} + for _, command := range a.Commands { + a.categories = a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories) + + if a.Metadata == nil { + a.Metadata = make(map[string]interface{}) + } + + if a.Writer == nil { + a.Writer = os.Stdout + } +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + a.Setup() + + // handle the completion flag separately from the flagset since + // completion could be attempted after a flag, but before its value was put + // on the command line. this causes the flagset to interpret the completion + // flag name as the value of the flag before it which is undesirable + // note that we can only do this because the shell autocomplete function + // always appends the completion flag at the end of the command + shellComplete, arguments := checkShellCompleteFlag(a, arguments) + + // parse flags + set, err := flagSet(a.Name, a.Flags) + if err != nil { + return err + } + + set.SetOutput(ioutil.Discard) + err = set.Parse(arguments[1:]) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, nil) + if nerr != nil { + fmt.Fprintln(a.Writer, nerr) + ShowAppHelp(context) + return nerr + } + context.shellComplete = shellComplete + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err := a.OnUsageError(context, err, false) + a.handleExitCoder(context, err) + return err + } + fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + ShowAppHelp(context) + return err + } + + if !a.HideHelp && checkHelp(context) { + ShowAppHelp(context) + return nil + } + + if !a.HideVersion && checkVersion(context) { + ShowVersion(context) + return nil + } + + if a.After != nil { + defer func() { + if afterErr := a.After(context); afterErr != nil { + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + fmt.Fprintf(a.Writer, "%v\n\n", beforeErr) + ShowAppHelp(context) + a.handleExitCoder(context, beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + if a.Action == nil { + a.Action = helpCommand.Action + } + + // Run default Action + err = HandleAction(a.Action, context) + + a.handleExitCoder(context, err) + return err +} + +// RunAndExitOnError calls .Run() and exits non-zero if an error was returned +// +// Deprecated: instead you should return an error that fulfills cli.ExitCoder +// to cli.App.Run. This will cause the application to exit with the given eror +// code in the cli.ExitCoder +func (a *App) RunAndExitOnError() { + if err := a.Run(os.Args); err != nil { + fmt.Fprintln(a.errWriter(), err) + OsExiter(1) + } +} + +// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to +// generate command-specific flags +func (a *App) RunAsSubcommand(ctx *Context) (err error) { + // append help to commands + if len(a.Commands) > 0 { + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + // parse flags + set, err := flagSet(a.Name, a.Flags) + if err != nil { + return err + } + + set.SetOutput(ioutil.Discard) + err = set.Parse(ctx.Args().Tail()) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, ctx) + + if nerr != nil { + fmt.Fprintln(a.Writer, nerr) + fmt.Fprintln(a.Writer) + if len(a.Commands) > 0 { + ShowSubcommandHelp(context) + } else { + ShowCommandHelp(ctx, context.Args().First()) + } + return nerr + } + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err = a.OnUsageError(context, err, true) + a.handleExitCoder(context, err) + return err + } + fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + ShowSubcommandHelp(context) + return err + } + + if len(a.Commands) > 0 { + if checkSubcommandHelp(context) { + return nil + } + } else { + if checkCommandHelp(ctx, context.Args().First()) { + return nil + } + } + + if a.After != nil { + defer func() { + afterErr := a.After(context) + if afterErr != nil { + a.handleExitCoder(context, err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + a.handleExitCoder(context, beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + err = HandleAction(a.Action, context) + + a.handleExitCoder(context, err) + return err +} + +// Command returns the named command on App. Returns nil if the command does not exist +func (a *App) Command(name string) *Command { + for _, c := range a.Commands { + if c.HasName(name) { + return &c + } + } + + return nil +} + +// Categories returns a slice containing all the categories with the commands they contain +func (a *App) Categories() CommandCategories { + return a.categories +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []*CommandCategory { + ret := []*CommandCategory{} + for _, category := range a.categories { + if visible := func() *CommandCategory { + for _, command := range category.Commands { + if !command.Hidden { + return category + } + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []Command { + ret := []Command{} + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + +func (a *App) hasFlag(flag Flag) bool { + for _, f := range a.Flags { + if flag == f { + return true + } + } + + return false +} + +func (a *App) errWriter() io.Writer { + + // When the app ErrWriter is nil use the package level one. + if a.ErrWriter == nil { + return ErrWriter + } + + return a.ErrWriter +} + +func (a *App) appendFlag(flag Flag) { + if !a.hasFlag(flag) { + a.Flags = append(a.Flags, flag) + } +} + +func (a *App) handleExitCoder(context *Context, err error) { + if a.ExitErrHandler != nil { + a.ExitErrHandler(context, err) + } else { + HandleExitCoder(err) + } +} + +// Author represents someone who has contributed to a cli project. +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + +// String makes Author comply to the Stringer interface, to allow an easy print in the templating process +func (a Author) String() string { + e := "" + if a.Email != "" { + e = " <" + a.Email + ">" + } + + return fmt.Sprintf("%v%v", a.Name, e) +} + +// HandleAction attempts to figure out which Action signature was used. If +// it's an ActionFunc or a func with the legacy signature for Action, the func +// is run! +func HandleAction(action interface{}, context *Context) (err error) { + if a, ok := action.(ActionFunc); ok { + return a(context) + } else if a, ok := action.(func(*Context) error); ok { + return a(context) + } else if a, ok := action.(func(*Context)); ok { // deprecated function signature + a(context) + return nil + } + + return errInvalidActionType +} diff --git a/vendor/github.com/urfave/cli/category.go b/vendor/github.com/urfave/cli/category.go new file mode 100644 index 000000000..1a6055023 --- /dev/null +++ b/vendor/github.com/urfave/cli/category.go @@ -0,0 +1,44 @@ +package cli + +// CommandCategories is a slice of *CommandCategory. +type CommandCategories []*CommandCategory + +// CommandCategory is a category containing commands. +type CommandCategory struct { + Name string + Commands Commands +} + +func (c CommandCategories) Less(i, j int) bool { + return c[i].Name < c[j].Name +} + +func (c CommandCategories) Len() int { + return len(c) +} + +func (c CommandCategories) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// AddCommand adds a command to a category. +func (c CommandCategories) AddCommand(category string, command Command) CommandCategories { + for _, commandCategory := range c { + if commandCategory.Name == category { + commandCategory.Commands = append(commandCategory.Commands, command) + return c + } + } + return append(c, &CommandCategory{Name: category, Commands: []Command{command}}) +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (c *CommandCategory) VisibleCommands() []Command { + ret := []Command{} + for _, command := range c.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} diff --git a/vendor/github.com/urfave/cli/cli.go b/vendor/github.com/urfave/cli/cli.go new file mode 100644 index 000000000..90c07eb8e --- /dev/null +++ b/vendor/github.com/urfave/cli/cli.go @@ -0,0 +1,22 @@ +// Package cli provides a minimal framework for creating and organizing command line +// Go applications. cli is designed to be easy to understand and write, the most simple +// cli application can be written as follows: +// func main() { +// cli.NewApp().Run(os.Args) +// } +// +// Of course this application does not do much, so let's make this an actual application: +// func main() { +// app := cli.NewApp() +// app.Name = "greet" +// app.Usage = "say a greeting" +// app.Action = func(c *cli.Context) error { +// println("Greetings") +// return nil +// } +// +// app.Run(os.Args) +// } +package cli + +//go:generate python ./generate-flag-types cli -i flag-types.json -o flag_generated.go diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go new file mode 100644 index 000000000..502fc9f30 --- /dev/null +++ b/vendor/github.com/urfave/cli/command.go @@ -0,0 +1,304 @@ +package cli + +import ( + "fmt" + "io/ioutil" + "sort" + "strings" +) + +// Command is a subcommand for a cli.App. +type Command struct { + // The name of the command + Name string + // short name of the command. Typically one character (deprecated, use `Aliases`) + ShortName string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // Custom text to show on USAGE section of help + UsageText string + // A longer explanation of how the command works + Description string + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string + // The function to call when checking for bash command completions + BashComplete BashCompleteFunc + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The function to call when this command is invoked + Action interface{} + // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind + // of deprecation period has passed, maybe? + + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc + // List of child commands + Subcommands Commands + // List of flags to parse + Flags []Flag + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Skip argument reordering which attempts to move flags before arguments, + // but only works if all flags appear after all arguments. This behavior was + // removed n version 2 since it only works under specific conditions so we + // backport here by exposing it as an option for compatibility. + SkipArgReorder bool + // Boolean to hide built-in help command + HideHelp bool + // Boolean to hide this command from help or completion + Hidden bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string + + // CustomHelpTemplate the text template for the command help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomHelpTemplate string +} + +type CommandsByName []Command + +func (c CommandsByName) Len() int { + return len(c) +} + +func (c CommandsByName) Less(i, j int) bool { + return c[i].Name < c[j].Name +} + +func (c CommandsByName) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// FullName returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") +} + +// Commands is a slice of Command +type Commands []Command + +// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags +func (c Command) Run(ctx *Context) (err error) { + if len(c.Subcommands) > 0 { + return c.startApp(ctx) + } + + if !c.HideHelp && (HelpFlag != BoolFlag{}) { + // append help to flags + c.Flags = append( + c.Flags, + HelpFlag, + ) + } + + set, err := flagSet(c.Name, c.Flags) + if err != nil { + return err + } + set.SetOutput(ioutil.Discard) + + if c.SkipFlagParsing { + err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) + } else if !c.SkipArgReorder { + firstFlagIndex := -1 + terminatorIndex := -1 + for index, arg := range ctx.Args() { + if arg == "--" { + terminatorIndex = index + break + } else if arg == "-" { + // Do nothing. A dash alone is not really a flag. + continue + } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { + firstFlagIndex = index + } + } + + if firstFlagIndex > -1 { + args := ctx.Args() + regularArgs := make([]string, len(args[1:firstFlagIndex])) + copy(regularArgs, args[1:firstFlagIndex]) + + var flagArgs []string + if terminatorIndex > -1 { + flagArgs = args[firstFlagIndex:terminatorIndex] + regularArgs = append(regularArgs, args[terminatorIndex:]...) + } else { + flagArgs = args[firstFlagIndex:] + } + + err = set.Parse(append(flagArgs, regularArgs...)) + } else { + err = set.Parse(ctx.Args().Tail()) + } + } else { + err = set.Parse(ctx.Args().Tail()) + } + + nerr := normalizeFlags(c.Flags, set) + if nerr != nil { + fmt.Fprintln(ctx.App.Writer, nerr) + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + return nerr + } + + context := NewContext(ctx.App, set, ctx) + context.Command = c + if checkCommandCompletions(context, c.Name) { + return nil + } + + if err != nil { + if c.OnUsageError != nil { + err := c.OnUsageError(context, err, false) + context.App.handleExitCoder(context, err) + return err + } + fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error()) + fmt.Fprintln(context.App.Writer) + ShowCommandHelp(context, c.Name) + return err + } + + if checkCommandHelp(context, c.Name) { + return nil + } + + if c.After != nil { + defer func() { + afterErr := c.After(context) + if afterErr != nil { + context.App.handleExitCoder(context, err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if c.Before != nil { + err = c.Before(context) + if err != nil { + ShowCommandHelp(context, c.Name) + context.App.handleExitCoder(context, err) + return err + } + } + + if c.Action == nil { + c.Action = helpSubcommand.Action + } + + err = HandleAction(c.Action, context) + + if err != nil { + context.App.handleExitCoder(context, err) + } + return err +} + +// Names returns the names including short names and aliases. +func (c Command) Names() []string { + names := []string{c.Name} + + if c.ShortName != "" { + names = append(names, c.ShortName) + } + + return append(names, c.Aliases...) +} + +// HasName returns true if Command.Name or Command.ShortName matches given name +func (c Command) HasName(name string) bool { + for _, n := range c.Names() { + if n == name { + return true + } + } + return false +} + +func (c Command) startApp(ctx *Context) error { + app := NewApp() + app.Metadata = ctx.App.Metadata + // set the name and usage + app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) + if c.HelpName == "" { + app.HelpName = c.HelpName + } else { + app.HelpName = app.Name + } + + app.Usage = c.Usage + app.Description = c.Description + app.ArgsUsage = c.ArgsUsage + + // set CommandNotFound + app.CommandNotFound = ctx.App.CommandNotFound + app.CustomAppHelpTemplate = c.CustomHelpTemplate + + // set the flags and commands + app.Commands = c.Subcommands + app.Flags = c.Flags + app.HideHelp = c.HideHelp + + app.Version = ctx.App.Version + app.HideVersion = ctx.App.HideVersion + app.Compiled = ctx.App.Compiled + app.Author = ctx.App.Author + app.Email = ctx.App.Email + app.Writer = ctx.App.Writer + app.ErrWriter = ctx.App.ErrWriter + + app.categories = CommandCategories{} + for _, command := range c.Subcommands { + app.categories = app.categories.AddCommand(command.Category, command) + } + + sort.Sort(app.categories) + + // bash completion + app.EnableBashCompletion = ctx.App.EnableBashCompletion + if c.BashComplete != nil { + app.BashComplete = c.BashComplete + } + + // set the actions + app.Before = c.Before + app.After = c.After + if c.Action != nil { + app.Action = c.Action + } else { + app.Action = helpSubcommand.Action + } + app.OnUsageError = c.OnUsageError + + for index, cc := range app.Commands { + app.Commands[index].commandNamePath = []string{c.Name, cc.Name} + } + + return app.RunAsSubcommand(ctx) +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} diff --git a/vendor/github.com/urfave/cli/context.go b/vendor/github.com/urfave/cli/context.go new file mode 100644 index 000000000..012b9b586 --- /dev/null +++ b/vendor/github.com/urfave/cli/context.go @@ -0,0 +1,278 @@ +package cli + +import ( + "errors" + "flag" + "reflect" + "strings" + "syscall" +) + +// Context is a type that is passed through to +// each Handler action in a cli application. Context +// can be used to retrieve context-specific Args and +// parsed command-line options. +type Context struct { + App *App + Command Command + shellComplete bool + flagSet *flag.FlagSet + setFlags map[string]bool + parentContext *Context +} + +// NewContext creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + c := &Context{App: app, flagSet: set, parentContext: parentCtx} + + if parentCtx != nil { + c.shellComplete = parentCtx.shellComplete + } + + return c +} + +// NumFlags returns the number of flags set +func (c *Context) NumFlags() int { + return c.flagSet.NFlag() +} + +// Set sets a context flag to a value. +func (c *Context) Set(name, value string) error { + c.setFlags = nil + return c.flagSet.Set(name, value) +} + +// GlobalSet sets a context flag to a value on the global flagset +func (c *Context) GlobalSet(name, value string) error { + globalContext(c).setFlags = nil + return globalContext(c).flagSet.Set(name, value) +} + +// IsSet determines if the flag was actually set +func (c *Context) IsSet(name string) bool { + if c.setFlags == nil { + c.setFlags = make(map[string]bool) + + c.flagSet.Visit(func(f *flag.Flag) { + c.setFlags[f.Name] = true + }) + + c.flagSet.VisitAll(func(f *flag.Flag) { + if _, ok := c.setFlags[f.Name]; ok { + return + } + c.setFlags[f.Name] = false + }) + + // XXX hack to support IsSet for flags with EnvVar + // + // There isn't an easy way to do this with the current implementation since + // whether a flag was set via an environment variable is very difficult to + // determine here. Instead, we intend to introduce a backwards incompatible + // change in version 2 to add `IsSet` to the Flag interface to push the + // responsibility closer to where the information required to determine + // whether a flag is set by non-standard means such as environment + // variables is available. + // + // See https://github.com/urfave/cli/issues/294 for additional discussion + flags := c.Command.Flags + if c.Command.Name == "" { // cannot == Command{} since it contains slice types + if c.App != nil { + flags = c.App.Flags + } + } + for _, f := range flags { + eachName(f.GetName(), func(name string) { + if isSet, ok := c.setFlags[name]; isSet || !ok { + return + } + + val := reflect.ValueOf(f) + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + + envVarValue := val.FieldByName("EnvVar") + if !envVarValue.IsValid() { + return + } + + eachName(envVarValue.String(), func(envVar string) { + envVar = strings.TrimSpace(envVar) + if _, ok := syscall.Getenv(envVar); ok { + c.setFlags[name] = true + return + } + }) + }) + } + } + + return c.setFlags[name] +} + +// GlobalIsSet determines if the global flag was actually set +func (c *Context) GlobalIsSet(name string) bool { + ctx := c + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + + for ; ctx != nil; ctx = ctx.parentContext { + if ctx.IsSet(name) { + return true + } + } + return false +} + +// FlagNames returns a slice of flag names used in this context. +func (c *Context) FlagNames() (names []string) { + for _, flag := range c.Command.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" { + continue + } + names = append(names, name) + } + return +} + +// GlobalFlagNames returns a slice of global flag names used by the app. +func (c *Context) GlobalFlagNames() (names []string) { + for _, flag := range c.App.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" || name == "version" { + continue + } + names = append(names, name) + } + return +} + +// Parent returns the parent context, if any +func (c *Context) Parent() *Context { + return c.parentContext +} + +// value returns the value of the flag coressponding to `name` +func (c *Context) value(name string) interface{} { + return c.flagSet.Lookup(name).Value.(flag.Getter).Get() +} + +// Args contains apps console arguments +type Args []string + +// Args returns the command line arguments associated with the context. +func (c *Context) Args() Args { + args := Args(c.flagSet.Args()) + return args +} + +// NArg returns the number of the command line arguments. +func (c *Context) NArg() int { + return len(c.Args()) +} + +// Get returns the nth argument, or else a blank string +func (a Args) Get(n int) string { + if len(a) > n { + return a[n] + } + return "" +} + +// First returns the first argument, or else a blank string +func (a Args) First() string { + return a.Get(0) +} + +// Tail returns the rest of the arguments (not the first one) +// or else an empty string slice +func (a Args) Tail() []string { + if len(a) >= 2 { + return []string(a)[1:] + } + return []string{} +} + +// Present checks if there are any arguments present +func (a Args) Present() bool { + return len(a) != 0 +} + +// Swap swaps arguments at the given indexes +func (a Args) Swap(from, to int) error { + if from >= len(a) || to >= len(a) { + return errors.New("index out of range") + } + a[from], a[to] = a[to], a[from] + return nil +} + +func globalContext(ctx *Context) *Context { + if ctx == nil { + return nil + } + + for { + if ctx.parentContext == nil { + return ctx + } + ctx = ctx.parentContext + } +} + +func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil; ctx = ctx.parentContext { + if f := ctx.flagSet.Lookup(name); f != nil { + return ctx.flagSet + } + } + return nil +} + +func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { + switch ff.Value.(type) { + case *StringSlice: + default: + set.Set(name, ff.Value.String()) + } +} + +func normalizeFlags(flags []Flag, set *flag.FlagSet) error { + visited := make(map[string]bool) + set.Visit(func(f *flag.Flag) { + visited[f.Name] = true + }) + for _, f := range flags { + parts := strings.Split(f.GetName(), ",") + if len(parts) == 1 { + continue + } + var ff *flag.Flag + for _, name := range parts { + name = strings.Trim(name, " ") + if visited[name] { + if ff != nil { + return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) + } + ff = set.Lookup(name) + } + } + if ff == nil { + continue + } + for _, name := range parts { + name = strings.Trim(name, " ") + if !visited[name] { + copyFlag(name, ff, set) + } + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/errors.go b/vendor/github.com/urfave/cli/errors.go new file mode 100644 index 000000000..562b2953c --- /dev/null +++ b/vendor/github.com/urfave/cli/errors.go @@ -0,0 +1,115 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError struct { + Errors []error +} + +// NewMultiError creates a new MultiError. Pass in one or more errors. +func NewMultiError(err ...error) MultiError { + return MultiError{Errors: err} +} + +// Error implements the error interface. +func (m MultiError) Error() string { + errs := make([]string, len(m.Errors)) + for i, err := range m.Errors { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +type ErrorFormatter interface { + Format(s fmt.State, verb rune) +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +// ExitError fulfills both the builtin `error` interface and `ExitCoder` +type ExitError struct { + exitCode int + message interface{} +} + +// NewExitError makes a new *ExitError +func NewExitError(message interface{}, exitCode int) *ExitError { + return &ExitError{ + exitCode: exitCode, + message: message, + } +} + +// Error returns the string message, fulfilling the interface required by +// `error` +func (ee *ExitError) Error() string { + return fmt.Sprintf("%v", ee.message) +} + +// ExitCode returns the exit code, fulfilling the interface required by +// `ExitCoder` +func (ee *ExitError) ExitCode() int { + return ee.exitCode +} + +// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if +// so prints the error to stderr (if it is non-empty) and calls OsExiter with the +// given exit code. If the given error is a MultiError, then this func is +// called on all members of the Errors slice and calls OsExiter with the last exit code. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + if _, ok := exitErr.(ErrorFormatter); ok { + fmt.Fprintf(ErrWriter, "%+v\n", err) + } else { + fmt.Fprintln(ErrWriter, err) + } + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + code := handleMultiError(multiErr) + OsExiter(code) + return + } +} + +func handleMultiError(multiErr MultiError) int { + code := 1 + for _, merr := range multiErr.Errors { + if multiErr2, ok := merr.(MultiError); ok { + code = handleMultiError(multiErr2) + } else { + fmt.Fprintln(ErrWriter, merr) + if exitErr, ok := merr.(ExitCoder); ok { + code = exitErr.ExitCode() + } + } + } + return code +} diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go new file mode 100644 index 000000000..b17f5b9b6 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag.go @@ -0,0 +1,807 @@ +package cli + +import ( + "flag" + "fmt" + "reflect" + "runtime" + "strconv" + "strings" + "syscall" + "time" +) + +const defaultPlaceholder = "value" + +// BashCompletionFlag enables bash-completion for all commands and subcommands +var BashCompletionFlag Flag = BoolFlag{ + Name: "generate-bash-completion", + Hidden: true, +} + +// VersionFlag prints the version for the application +var VersionFlag Flag = BoolFlag{ + Name: "version, v", + Usage: "print the version", +} + +// HelpFlag prints the help for all commands and subcommands +// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand +// unless HideHelp is set to true) +var HelpFlag Flag = BoolFlag{ + Name: "help, h", + Usage: "show help", +} + +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + +// FlagNamePrefixer converts a full flag name and its placeholder into the help +// message flag prefix. This is used by the default FlagStringer. +var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames + +// FlagEnvHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagEnvHinter FlagEnvHintFunc = withEnvHint + +// FlagsByName is a slice of Flag. +type FlagsByName []Flag + +func (f FlagsByName) Len() int { + return len(f) +} + +func (f FlagsByName) Less(i, j int) bool { + return f[i].GetName() < f[j].GetName() +} + +func (f FlagsByName) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// Flag is a common interface related to parsing flags in cli. +// For more advanced flag parsing techniques, it is recommended that +// this interface be implemented. +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) + GetName() string +} + +// errorableFlag is an interface that allows us to return errors during apply +// it allows flags defined in this library to return errors in a fashion backwards compatible +// TODO remove in v2 and modify the existing Flag interface to return errors +type errorableFlag interface { + Flag + + ApplyWithError(*flag.FlagSet) error +} + +func flagSet(name string, flags []Flag) (*flag.FlagSet, error) { + set := flag.NewFlagSet(name, flag.ContinueOnError) + + for _, f := range flags { + //TODO remove in v2 when errorableFlag is removed + if ef, ok := f.(errorableFlag); ok { + if err := ef.ApplyWithError(set); err != nil { + return nil, err + } + } else { + f.Apply(set) + } + } + return set, nil +} + +func eachName(longName string, fn func(string)) { + parts := strings.Split(longName, ",") + for _, name := range parts { + name = strings.Trim(name, " ") + fn(name) + } +} + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +// Ignores parsing errors +func (f GenericFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f GenericFlag) ApplyWithError(set *flag.FlagSet) error { + val := f.Value + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + if err := val.Set(envVal); err != nil { + return fmt.Errorf("could not parse %s as value for flag %s: %s", envVal, f.Name, err) + } + break + } + } + } + + eachName(f.Name, func(name string) { + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// StringSlice is an opaque type for []string to satisfy flag.Value and flag.Getter +type StringSlice []string + +// Set appends the string value to the list of values +func (f *StringSlice) Set(value string) error { + *f = append(*f, value) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *StringSlice) String() string { + return fmt.Sprintf("%s", *f) +} + +// Value returns the slice of strings set by this flag +func (f *StringSlice) Value() []string { + return *f +} + +// Get returns the slice of strings set by this flag +func (f *StringSlice) Get() interface{} { + return *f +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f StringSliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringSliceFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + newVal := &StringSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as string value for flag %s: %s", envVal, f.Name, err) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &StringSlice{} + } + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// IntSlice is an opaque type for []int to satisfy flag.Value and flag.Getter +type IntSlice []int + +// Set parses the value into an integer and appends it to the list of values +func (f *IntSlice) Set(value string) error { + tmp, err := strconv.Atoi(value) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *IntSlice) String() string { + return fmt.Sprintf("%#v", *f) +} + +// Value returns the slice of ints set by this flag +func (f *IntSlice) Value() []int { + return *f +} + +// Get returns the slice of ints set by this flag +func (f *IntSlice) Get() interface{} { + return *f +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f IntSliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f IntSliceFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + newVal := &IntSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int slice value for flag %s: %s", envVal, f.Name, err) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &IntSlice{} + } + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// Int64Slice is an opaque type for []int to satisfy flag.Value and flag.Getter +type Int64Slice []int64 + +// Set parses the value into an integer and appends it to the list of values +func (f *Int64Slice) Set(value string) error { + tmp, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Int64Slice) String() string { + return fmt.Sprintf("%#v", *f) +} + +// Value returns the slice of ints set by this flag +func (f *Int64Slice) Value() []int64 { + return *f +} + +// Get returns the slice of ints set by this flag +func (f *Int64Slice) Get() interface{} { + return *f +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Int64SliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64SliceFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + newVal := &Int64Slice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int64 slice value for flag %s: %s", envVal, f.Name, err) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &Int64Slice{} + } + set.Var(f.Value, name, f.Usage) + }) + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f BoolFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolFlag) ApplyWithError(set *flag.FlagSet) error { + val := false + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + if envVal == "" { + val = false + break + } + + envValBool, err := strconv.ParseBool(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) + } + + val = envValBool + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f BoolTFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolTFlag) ApplyWithError(set *flag.FlagSet) error { + val := true + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + if envVal == "" { + val = false + break + } + + envValBool, err := strconv.ParseBool(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) + } + + val = envValBool + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f StringFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + f.Value = envVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + return + } + set.String(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f IntFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f IntFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) + } + f.Value = int(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Int(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Int64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64Flag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValInt + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Int64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Int64(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f UintFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f UintFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = uint(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.UintVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Uint64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Uint64Flag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint64 value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = uint64(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Uint64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint64(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f DurationFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f DurationFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValDuration, err := time.ParseDuration(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as duration for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValDuration + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Duration(name, f.Value, f.Usage) + }) + + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Float64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Float64Flag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValFloat, err := strconv.ParseFloat(envVal, 10) + if err != nil { + return fmt.Errorf("could not parse %s as float64 value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = float64(envValFloat) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Float64(name, f.Value, f.Usage) + }) + + return nil +} + +func visibleFlags(fl []Flag) []Flag { + visible := []Flag{} + for _, flag := range fl { + field := flagValue(flag).FieldByName("Hidden") + if !field.IsValid() || !field.Bool() { + visible = append(visible, flag) + } + } + return visible +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(fullName, placeholder string) string { + var prefixed string + parts := strings.Split(fullName, ",") + for i, name := range parts { + name = strings.Trim(name, " ") + prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } + if i < len(parts)-1 { + prefixed += ", " + } + } + return prefixed +} + +func withEnvHint(envVar, str string) string { + envText := "" + if envVar != "" { + prefix := "$" + suffix := "" + sep := ", $" + if runtime.GOOS == "windows" { + prefix = "%" + suffix = "%" + sep = "%, %" + } + envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix) + } + return str + envText +} + +func flagValue(f Flag) reflect.Value { + fv := reflect.ValueOf(f) + for fv.Kind() == reflect.Ptr { + fv = reflect.Indirect(fv) + } + return fv +} + +func stringifyFlag(f Flag) string { + fv := flagValue(f) + + switch f.(type) { + case IntSliceFlag: + return FlagEnvHinter(fv.FieldByName("EnvVar").String(), + stringifyIntSliceFlag(f.(IntSliceFlag))) + case Int64SliceFlag: + return FlagEnvHinter(fv.FieldByName("EnvVar").String(), + stringifyInt64SliceFlag(f.(Int64SliceFlag))) + case StringSliceFlag: + return FlagEnvHinter(fv.FieldByName("EnvVar").String(), + stringifyStringSliceFlag(f.(StringSliceFlag))) + } + + placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) + + needsPlaceholder := false + defaultValueString := "" + + if val := fv.FieldByName("Value"); val.IsValid() { + needsPlaceholder = true + defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface()) + + if val.Kind() == reflect.String && val.String() != "" { + defaultValueString = fmt.Sprintf(" (default: %q)", val.String()) + } + } + + if defaultValueString == " (default: )" { + defaultValueString = "" + } + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString)) + + return FlagEnvHinter(fv.FieldByName("EnvVar").String(), + fmt.Sprintf("%s\t%s", FlagNamePrefixer(fv.FieldByName("Name").String(), placeholder), usageWithDefault)) +} + +func stringifyIntSliceFlag(f IntSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyInt64SliceFlag(f Int64SliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyStringSliceFlag(f StringSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, fmt.Sprintf("%q", s)) + } + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifySliceFlag(usage, name string, defaultVals []string) string { + placeholder, usage := unquoteUsage(usage) + if placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultVal := "" + if len(defaultVals) > 0 { + defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", ")) + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal)) + return fmt.Sprintf("%s\t%s", FlagNamePrefixer(name, placeholder), usageWithDefault) +} diff --git a/vendor/github.com/urfave/cli/flag_generated.go b/vendor/github.com/urfave/cli/flag_generated.go new file mode 100644 index 000000000..491b61956 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_generated.go @@ -0,0 +1,627 @@ +package cli + +import ( + "flag" + "strconv" + "time" +) + +// WARNING: This file is generated! + +// BoolFlag is a flag with type bool +type BoolFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolFlag) GetName() string { + return f.Name +} + +// Bool looks up the value of a local BoolFlag, returns +// false if not found +func (c *Context) Bool(name string) bool { + return lookupBool(name, c.flagSet) +} + +// GlobalBool looks up the value of a global BoolFlag, returns +// false if not found +func (c *Context) GlobalBool(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} + +// BoolTFlag is a flag with type bool that is true by default +type BoolTFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolTFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolTFlag) GetName() string { + return f.Name +} + +// BoolT looks up the value of a local BoolTFlag, returns +// false if not found +func (c *Context) BoolT(name string) bool { + return lookupBoolT(name, c.flagSet) +} + +// GlobalBoolT looks up the value of a global BoolTFlag, returns +// false if not found +func (c *Context) GlobalBoolT(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBoolT(name, fs) + } + return false +} + +func lookupBoolT(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} + +// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) +type DurationFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value time.Duration + Destination *time.Duration +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f DurationFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f DurationFlag) GetName() string { + return f.Name +} + +// Duration looks up the value of a local DurationFlag, returns +// 0 if not found +func (c *Context) Duration(name string) time.Duration { + return lookupDuration(name, c.flagSet) +} + +// GlobalDuration looks up the value of a global DurationFlag, returns +// 0 if not found +func (c *Context) GlobalDuration(name string) time.Duration { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + parsed, err := time.ParseDuration(f.Value.String()) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// Float64Flag is a flag with type float64 +type Float64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value float64 + Destination *float64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Float64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Float64Flag) GetName() string { + return f.Name +} + +// Float64 looks up the value of a local Float64Flag, returns +// 0 if not found +func (c *Context) Float64(name string) float64 { + return lookupFloat64(name, c.flagSet) +} + +// GlobalFloat64 looks up the value of a global Float64Flag, returns +// 0 if not found +func (c *Context) GlobalFloat64(name string) float64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupFloat64(name, fs) + } + return 0 +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// GenericFlag is a flag with type Generic +type GenericFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value Generic +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f GenericFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f GenericFlag) GetName() string { + return f.Name +} + +// Generic looks up the value of a local GenericFlag, returns +// nil if not found +func (c *Context) Generic(name string) interface{} { + return lookupGeneric(name, c.flagSet) +} + +// GlobalGeneric looks up the value of a global GenericFlag, returns +// nil if not found +func (c *Context) GlobalGeneric(name string) interface{} { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value, error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Int64Flag is a flag with type int64 +type Int64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value int64 + Destination *int64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64Flag) GetName() string { + return f.Name +} + +// Int64 looks up the value of a local Int64Flag, returns +// 0 if not found +func (c *Context) Int64(name string) int64 { + return lookupInt64(name, c.flagSet) +} + +// GlobalInt64 looks up the value of a global Int64Flag, returns +// 0 if not found +func (c *Context) GlobalInt64(name string) int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64(name, fs) + } + return 0 +} + +func lookupInt64(name string, set *flag.FlagSet) int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// IntFlag is a flag with type int +type IntFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value int + Destination *int +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntFlag) GetName() string { + return f.Name +} + +// Int looks up the value of a local IntFlag, returns +// 0 if not found +func (c *Context) Int(name string) int { + return lookupInt(name, c.flagSet) +} + +// GlobalInt looks up the value of a global IntFlag, returns +// 0 if not found +func (c *Context) GlobalInt(name string) int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return int(parsed) + } + return 0 +} + +// IntSliceFlag is a flag with type *IntSlice +type IntSliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *IntSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntSliceFlag) GetName() string { + return f.Name +} + +// IntSlice looks up the value of a local IntSliceFlag, returns +// nil if not found +func (c *Context) IntSlice(name string) []int { + return lookupIntSlice(name, c.flagSet) +} + +// GlobalIntSlice looks up the value of a global IntSliceFlag, returns +// nil if not found +func (c *Context) GlobalIntSlice(name string) []int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupIntSlice(name, fs) + } + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*IntSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Int64SliceFlag is a flag with type *Int64Slice +type Int64SliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *Int64Slice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64SliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64SliceFlag) GetName() string { + return f.Name +} + +// Int64Slice looks up the value of a local Int64SliceFlag, returns +// nil if not found +func (c *Context) Int64Slice(name string) []int64 { + return lookupInt64Slice(name, c.flagSet) +} + +// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns +// nil if not found +func (c *Context) GlobalInt64Slice(name string) []int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64Slice(name, fs) + } + return nil +} + +func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*Int64Slice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// StringFlag is a flag with type string +type StringFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value string + Destination *string +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringFlag) GetName() string { + return f.Name +} + +// String looks up the value of a local StringFlag, returns +// "" if not found +func (c *Context) String(name string) string { + return lookupString(name, c.flagSet) +} + +// GlobalString looks up the value of a global StringFlag, returns +// "" if not found +func (c *Context) GlobalString(name string) string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" +} + +func lookupString(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value.String(), error(nil) + if err != nil { + return "" + } + return parsed + } + return "" +} + +// StringSliceFlag is a flag with type *StringSlice +type StringSliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *StringSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringSliceFlag) GetName() string { + return f.Name +} + +// StringSlice looks up the value of a local StringSliceFlag, returns +// nil if not found +func (c *Context) StringSlice(name string) []string { + return lookupStringSlice(name, c.flagSet) +} + +// GlobalStringSlice looks up the value of a global StringSliceFlag, returns +// nil if not found +func (c *Context) GlobalStringSlice(name string) []string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*StringSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Uint64Flag is a flag with type uint64 +type Uint64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value uint64 + Destination *uint64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Uint64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Uint64Flag) GetName() string { + return f.Name +} + +// Uint64 looks up the value of a local Uint64Flag, returns +// 0 if not found +func (c *Context) Uint64(name string) uint64 { + return lookupUint64(name, c.flagSet) +} + +// GlobalUint64 looks up the value of a global Uint64Flag, returns +// 0 if not found +func (c *Context) GlobalUint64(name string) uint64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint64(name, fs) + } + return 0 +} + +func lookupUint64(name string, set *flag.FlagSet) uint64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// UintFlag is a flag with type uint +type UintFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value uint + Destination *uint +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f UintFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f UintFlag) GetName() string { + return f.Name +} + +// Uint looks up the value of a local UintFlag, returns +// 0 if not found +func (c *Context) Uint(name string) uint { + return lookupUint(name, c.flagSet) +} + +// GlobalUint looks up the value of a global UintFlag, returns +// 0 if not found +func (c *Context) GlobalUint(name string) uint { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint(name, fs) + } + return 0 +} + +func lookupUint(name string, set *flag.FlagSet) uint { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return uint(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/funcs.go b/vendor/github.com/urfave/cli/funcs.go new file mode 100644 index 000000000..b335dbf6d --- /dev/null +++ b/vendor/github.com/urfave/cli/funcs.go @@ -0,0 +1,41 @@ +package cli + +// BashCompleteFunc is an action to execute when the bash-completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error + +// ExitErrHandlerFunc is executed if provided in order to handle ExitError values +// returned by Actions and Before/After functions. +type ExitErrHandlerFunc func(context *Context, err error) + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string + +// FlagNamePrefixFunc is used by the default FlagStringFunc to create prefix +// text for a flag's full name. +type FlagNamePrefixFunc func(fullName, placeholder string) string + +// FlagEnvHintFunc is used by the default FlagStringFunc to annotate flag help +// with the environment variable details. +type FlagEnvHintFunc func(envVar, str string) string + diff --git a/vendor/github.com/urfave/cli/help.go b/vendor/github.com/urfave/cli/help.go new file mode 100644 index 000000000..ed084fc1d --- /dev/null +++ b/vendor/github.com/urfave/cli/help.go @@ -0,0 +1,339 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + "text/template" +) + +// AppHelpTemplate is the text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{.Name}}{{if .Usage}} - {{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if len .Authors}} + +AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: + {{range $index, $author := .Authors}}{{if $index}} + {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + +GLOBAL OPTIONS: + {{range $index, $option := .VisibleFlags}}{{if $index}} + {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}} + +COPYRIGHT: + {{.Copyright}}{{end}} +` + +// CommandHelpTemplate is the text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +// SubcommandHelpTemplate is the text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} +{{end}}{{if .VisibleFlags}} +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +var helpCommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + ShowAppHelp(c) + return nil + }, +} + +var helpSubcommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + return ShowSubcommandHelp(c) + }, +} + +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) + +// Prints help for the App or Command with custom template function. +type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{}) + +// HelpPrinter is a function that writes the help output. If not set a default +// is used. The function signature is: +// func(w io.Writer, templ string, data interface{}) +var HelpPrinter helpPrinter = printHelp + +// HelpPrinterCustom is same as HelpPrinter but +// takes a custom function for template function map. +var HelpPrinterCustom helpPrinterCustom = printHelpCustom + +// VersionPrinter prints the version for the App +var VersionPrinter = printVersion + +// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code. +func ShowAppHelpAndExit(c *Context, exitCode int) { + ShowAppHelp(c) + os.Exit(exitCode) +} + +// ShowAppHelp is an action that displays the help. +func ShowAppHelp(c *Context) (err error) { + if c.App.CustomAppHelpTemplate == "" { + HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) + return + } + customAppData := func() map[string]interface{} { + if c.App.ExtraInfo == nil { + return nil + } + return map[string]interface{}{ + "ExtraInfo": c.App.ExtraInfo, + } + } + HelpPrinterCustom(c.App.Writer, c.App.CustomAppHelpTemplate, c.App, customAppData()) + return nil +} + +// DefaultAppComplete prints the list of subcommands as the default app completion method +func DefaultAppComplete(c *Context) { + for _, command := range c.App.Commands { + if command.Hidden { + continue + } + for _, name := range command.Names() { + fmt.Fprintln(c.App.Writer, name) + } + } +} + +// ShowCommandHelpAndExit - exits with code after showing help +func ShowCommandHelpAndExit(c *Context, command string, code int) { + ShowCommandHelp(c, command) + os.Exit(code) +} + +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { + // show the subcommand help for a command with subcommands + if command == "" { + HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) + return nil + } + + for _, c := range ctx.App.Commands { + if c.HasName(command) { + if c.CustomHelpTemplate != "" { + HelpPrinterCustom(ctx.App.Writer, c.CustomHelpTemplate, c, nil) + } else { + HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) + } + return nil + } + } + + if ctx.App.CommandNotFound == nil { + return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3) + } + + ctx.App.CommandNotFound(ctx, command) + return nil +} + +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(c *Context) error { + return ShowCommandHelp(c, c.Command.Name) +} + +// ShowVersion prints the version number of the App +func ShowVersion(c *Context) { + VersionPrinter(c) +} + +func printVersion(c *Context) { + fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) +} + +// ShowCompletions prints the lists of commands within a given context +func ShowCompletions(c *Context) { + a := c.App + if a != nil && a.BashComplete != nil { + a.BashComplete(c) + } +} + +// ShowCommandCompletions prints the custom completions for a given command +func ShowCommandCompletions(ctx *Context, command string) { + c := ctx.App.Command(command) + if c != nil && c.BashComplete != nil { + c.BashComplete(ctx) + } +} + +func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc map[string]interface{}) { + funcMap := template.FuncMap{ + "join": strings.Join, + } + if customFunc != nil { + for key, value := range customFunc { + funcMap[key] = value + } + } + + w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) + err := t.Execute(w, data) + if err != nil { + // If the writer is closed, t.Execute will fail, and there's nothing + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + return + } + w.Flush() +} + +func printHelp(out io.Writer, templ string, data interface{}) { + printHelpCustom(out, templ, data, nil) +} + +func checkVersion(c *Context) bool { + found := false + if VersionFlag.GetName() != "" { + eachName(VersionFlag.GetName(), func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkHelp(c *Context) bool { + found := false + if HelpFlag.GetName() != "" { + eachName(HelpFlag.GetName(), func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkCommandHelp(c *Context, name string) bool { + if c.Bool("h") || c.Bool("help") { + ShowCommandHelp(c, name) + return true + } + + return false +} + +func checkSubcommandHelp(c *Context) bool { + if c.Bool("h") || c.Bool("help") { + ShowSubcommandHelp(c) + return true + } + + return false +} + +func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { + if !a.EnableBashCompletion { + return false, arguments + } + + pos := len(arguments) - 1 + lastArg := arguments[pos] + + if lastArg != "--"+BashCompletionFlag.GetName() { + return false, arguments + } + + return true, arguments[:pos] +} + +func checkCompletions(c *Context) bool { + if !c.shellComplete { + return false + } + + if args := c.Args(); args.Present() { + name := args.First() + if cmd := c.App.Command(name); cmd != nil { + // let the command handle the completion + return false + } + } + + ShowCompletions(c) + return true +} + +func checkCommandCompletions(c *Context, name string) bool { + if !c.shellComplete { + return false + } + + ShowCommandCompletions(c, name) + return true +} diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go new file mode 100644 index 000000000..c6212f406 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/health.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_health_v1/health.proto + +// Package health provides some utility functions to health-check a server. The implementation +// is based on protobuf. Users need to write their own implementations if other IDLs are used. +package health + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" +) + +// Server implements `service Health`. +type Server struct { + mu sync.Mutex + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if in.Service == "" { + // check the server overall health status. + return &healthpb.HealthCheckResponse{ + Status: healthpb.HealthCheckResponse_SERVING, + }, nil + } + if status, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: status, + }, nil + } + return nil, grpc.Errorf(codes.NotFound, "unknown service") +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + s.statusMap[service] = status + s.mu.Unlock() +} From cc2726054d144f6e001a150712a83b4533a687fb Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Wed, 21 Feb 2018 02:48:41 +0000 Subject: [PATCH 2/2] Add test-containerd for plugin mode test Signed-off-by: Lantao Liu --- .travis.yml | 8 +++-- Makefile | 59 ++++++++++++++++++++---------------- cmd/containerd/containerd.go | 56 ++++++++++++++++++++++++++++++++++ hack/install-deps.sh | 36 ---------------------- 4 files changed, 94 insertions(+), 65 deletions(-) create mode 100644 cmd/containerd/containerd.go diff --git a/.travis.yml b/.travis.yml index 70cadd641..4366ea465 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,13 +32,13 @@ jobs: - make install.tools - make .gitvalidation - make binaries - - make plugin + - make containerd go: 1.9.x - script: - make install.tools - make .gitvalidation - make binaries - - make plugin + - make containerd go: tip - stage: Test script: @@ -54,7 +54,9 @@ jobs: - cat /tmp/test-cri/containerd.log go: 1.9.x - script: - - make install.deps COOK_CONTAINERD=true + - make install.deps + - make containerd + - sudo make install-containerd - make test-integration STANDALONE_CRI_CONTAINERD=false - make test-cri STANDALONE_CRI_CONTAINERD=false after_script: diff --git a/Makefile b/Makefile index 6f4141eab..850e0bac0 100644 --- a/Makefile +++ b/Makefile @@ -40,24 +40,25 @@ default: help help: @echo "Usage: make " @echo - @echo " * 'install' - Install binaries to system locations" - @echo " * 'binaries' - Build cri-containerd and ctrcri" - @echo " * 'plugin' - Build cri-containerd as a plugin package" - @echo " * 'static-binaries - Build static cri-containerd and ctrcri" - @echo " * 'release' - Build release tarball" - @echo " * 'push' - Push release tarball to GCS" - @echo " * 'test' - Test cri-containerd with unit test" - @echo " * 'test-integration' - Test cri-containerd with integration test" - @echo " * 'test-cri' - Test cri-containerd with cri validation test" - @echo " * 'test-e2e-node' - Test cri-containerd with Kubernetes node e2e test" - @echo " * 'clean' - Clean artifacts" - @echo " * 'verify' - Execute the source code verification tools" - @echo " * 'proto' - Update protobuf of cri-containerd api" - @echo " * 'install.tools' - Install tools used by verify" - @echo " * 'install.deps' - Install dependencies of cri-containerd (containerd, runc, cni) Note: BUILDTAGS defaults to 'seccomp apparmor' for runc build" - @echo " * 'uninstall' - Remove installed binaries from system locations" - @echo " * 'version' - Print current cri-containerd release version" - @echo " * 'update-vendor' - Syncs containerd/vendor.conf -> vendor.conf and sorts vendor.conf" + @echo " * 'install' - Install binaries to system locations" + @echo " * 'binaries' - Build cri-containerd and ctrcri" + @echo " * 'static-binaries - Build static cri-containerd and ctrcri" + @echo " * 'release' - Build release tarball" + @echo " * 'push' - Push release tarball to GCS" + @echo " * 'containerd' - Build a customized containerd with CRI plugin for testing" + @echo " * 'install-containerd' - Install customized containerd to system location" + @echo " * 'test' - Test cri-containerd with unit test" + @echo " * 'test-integration' - Test cri-containerd with integration test" + @echo " * 'test-cri' - Test cri-containerd with cri validation test" + @echo " * 'test-e2e-node' - Test cri-containerd with Kubernetes node e2e test" + @echo " * 'clean' - Clean artifacts" + @echo " * 'verify' - Execute the source code verification tools" + @echo " * 'proto' - Update protobuf of cri-containerd api" + @echo " * 'install.tools' - Install tools used by verify" + @echo " * 'install.deps' - Install dependencies of cri-containerd (containerd, runc, cni) Note: BUILDTAGS defaults to 'seccomp apparmor' for runc build" + @echo " * 'uninstall' - Remove installed binaries from system locations" + @echo " * 'version' - Print current cri-containerd release version" + @echo " * 'update-vendor' - Syncs containerd/vendor.conf -> vendor.conf and sorts vendor.conf" verify: lint gofmt boiler @@ -102,6 +103,13 @@ $(BUILD_DIR)/ctrcri: $(SOURCES) -gcflags '$(GO_GCFLAGS)' \ $(PROJECT)/cmd/ctrcri +$(BUILD_DIR)/containerd: $(SOURCES) $(PLUGIN_SOURCES) + $(GO) build -o $@ \ + -tags '$(BUILD_TAGS)' \ + -ldflags '$(GO_LDFLAGS)' \ + -gcflags '$(GO_GCFLAGS)' \ + $(PROJECT)/cmd/containerd + test: $(GO) test -timeout=10m -race ./pkg/... \ -tags '$(BUILD_TAGS)' \ @@ -125,13 +133,6 @@ clean: binaries: $(BUILD_DIR)/cri-containerd $(BUILD_DIR)/ctrcri -# TODO(random-liu): Make this only build when source files change and -# add this to target all. -plugin: $(PLUGIN_SOURCES) $(SOURCES) - $(GO) build -tags '$(BUILD_TAGS)' \ - -ldflags '$(GO_LDFLAGS)' \ - -gcflags '$(GO_GCFLAGS)' \ - static-binaries: GO_LDFLAGS += -extldflags "-fno-PIC -static" static-binaries: $(BUILD_DIR)/cri-containerd $(BUILD_DIR)/ctrcri @@ -151,6 +152,11 @@ release: $(BUILD_DIR)/$(TARBALL) push: $(BUILD_DIR)/$(TARBALL) @BUILD_DIR=$(BUILD_DIR) TARBALL=$(TARBALL) VERSION=$(VERSION) ./hack/push.sh +containerd: $(BUILD_DIR)/containerd + +install-containerd: containerd + install -D -m 755 $(BUILD_DIR)/containerd $(BINDIR)/containerd + proto: @hack/update-proto.sh @@ -183,7 +189,6 @@ install.tools: .install.gitvalidation .install.gometalinter .PHONY: \ binaries \ static-binaries \ - plugin \ release \ push \ boiler \ @@ -193,6 +198,8 @@ install.tools: .install.gitvalidation .install.gometalinter help \ install \ lint \ + containerd \ + install-containerd \ test \ test-integration \ test-cri \ diff --git a/cmd/containerd/containerd.go b/cmd/containerd/containerd.go new file mode 100644 index 000000000..bea512838 --- /dev/null +++ b/cmd/containerd/containerd.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The containerd Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os" + + _ "github.com/containerd/containerd/diff/walking/plugin" + _ "github.com/containerd/containerd/gc/scheduler" + _ "github.com/containerd/containerd/linux" + _ "github.com/containerd/containerd/metrics/cgroups" + _ "github.com/containerd/containerd/services/containers" + _ "github.com/containerd/containerd/services/content" + _ "github.com/containerd/containerd/services/diff" + _ "github.com/containerd/containerd/services/events" + _ "github.com/containerd/containerd/services/healthcheck" + _ "github.com/containerd/containerd/services/images" + _ "github.com/containerd/containerd/services/introspection" + _ "github.com/containerd/containerd/services/leases" + _ "github.com/containerd/containerd/services/namespaces" + _ "github.com/containerd/containerd/services/snapshots" + _ "github.com/containerd/containerd/services/tasks" + _ "github.com/containerd/containerd/services/version" + _ "github.com/containerd/containerd/snapshots/overlay" + _ "github.com/containerd/cri-containerd" + + "github.com/containerd/containerd/cmd/containerd/command" + "github.com/sirupsen/logrus" + + "github.com/containerd/cri-containerd/pkg/version" +) + +func main() { + app := command.App() + app.Version = version.CRIContainerdVersion + "-TEST-with-cri-plugin" + logrus.Warn("This customized containerd is only for CI test, DO NOT use it for distribution.") + if err := app.Run(os.Args); err != nil { + fmt.Fprintf(os.Stderr, "containerd: %s\n", err) + os.Exit(1) + } +} diff --git a/hack/install-deps.sh b/hack/install-deps.sh index 81d1d1524..7cd0d883f 100755 --- a/hack/install-deps.sh +++ b/hack/install-deps.sh @@ -51,11 +51,6 @@ fi # and configurations in cluster. INSTALL_CNI=${INSTALL_CNI:-true} -# COOK_CONTAINERD indicates whether to update containerd with newest -# cri plugin before install. This is mainly used for testing new cri -# plugin change. -COOK_CONTAINERD=${COOK_CONTAINERD:-false} - CONTAINERD_DIR=${DESTDIR}/usr/local RUNC_DIR=${DESTDIR} CNI_DIR=${DESTDIR}/opt/cni @@ -138,37 +133,6 @@ fi # Install containerd checkout_repo ${CONTAINERD_PKG} ${CONTAINERD_VERSION} ${CONTAINERD_REPO} cd ${GOPATH}/src/${CONTAINERD_PKG} -if ${COOK_CONTAINERD}; then - # Verify that vendor.conf is in sync with containerd before cook containerd, - # this is a hard requirement for now because of below overwrite of - # containerd's vendor tree. - # TODO(random-liu): Remove this and the below import code after containerd - # starts to vendor cri plugin. - if ! ${ROOT}/hack/sync-vendor.sh -only-verify; then - echo "Please run hack/sync-vendor.sh before cook containerd." - exit 1 - fi - # Import cri plugin into containerd. - echo "import _ \"${CRI_CONTAINERD_PKG}\"" >> cmd/containerd/builtins_linux.go - # 1. Copy all cri-containerd vendors into containerd vendor. This makes sure - # all dependencies introduced by cri-containerd will be updated. There might - # be unnecessary vendors introduced, but it still builds. - cp -rT ${ROOT}/vendor/ vendor/ - # 2. Remove containerd repo itself from vendor. - rm -rf vendor/${CONTAINERD_PKG} - # 3. Create cri-containerd vendor in containerd vendor, and copy the newest - # cri-containerd there. - if [ -d "vendor/${CRI_CONTAINERD_PKG}" ]; then - rm -rf vendor/${CRI_CONTAINERD_PKG}/* - else - mkdir -p vendor/${CRI_CONTAINERD_PKG} - fi - cp -rT ${ROOT} vendor/${CRI_CONTAINERD_PKG} - # 4. Remove the extra vendor in cri-containerd. - rm -rf vendor/${CRI_CONTAINERD_PKG}/vendor - # After the 4 steps above done, we have a containerd with newest cri-containerd - # plugin. -fi make BUILDTAGS="${BUILDTAGS}" # containerd make install requires `go` to work. Explicitly # set PATH to make sure it can find `go` even with `sudo`.