Vendor containerd d184a0a343
Signed-off-by: Kohei Tokunaga <ktokunaga.mail@gmail.com>
This commit is contained in:
parent
8448b92d23
commit
b51177bfcc
10
vendor.conf
10
vendor.conf
@ -8,15 +8,15 @@ github.com/willf/bitset d5bec3311243426a3c6d1b7a795f
|
||||
github.com/beorn7/perks v1.0.1
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/containerd/cgroups e9676da73eddf8ed2433f77aaf3b9cf8f0f75b8c
|
||||
github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff
|
||||
github.com/containerd/console v1.0.0
|
||||
github.com/containerd/containerd v1.4.0-beta.0
|
||||
github.com/containerd/containerd d184a0a3430dc4a17a47cce37fb36126ac0c699a
|
||||
github.com/containerd/continuity d3ef23f19fbb106bb73ffde425d07a9187e30745
|
||||
github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf
|
||||
github.com/containerd/go-runc 7016d3ce2328dd2cb1192b2076ebd565c4e8df0c
|
||||
github.com/containerd/ttrpc v1.0.1
|
||||
github.com/containerd/typeurl v1.0.1
|
||||
github.com/coreos/go-systemd/v22 v22.0.0
|
||||
github.com/coreos/go-systemd/v22 v22.1.0
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0
|
||||
github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f
|
||||
github.com/docker/go-metrics v0.0.1
|
||||
@ -49,12 +49,12 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/urfave/cli v1.22.1 # NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092
|
||||
go.etcd.io/bbolt v1.3.3
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.opencensus.io v0.22.0
|
||||
golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
golang.org/x/sys 9dae0f8f577553e0f21298e18926efc9644c281d
|
||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||
golang.org/x/text v0.3.3
|
||||
google.golang.org/genproto e50cd9704f63023d62cd06a1994b98227fc4d21a
|
||||
google.golang.org/grpc v1.27.1
|
||||
|
||||
|
19
vendor/github.com/containerd/cgroups/blkio.go
generated
vendored
19
vendor/github.com/containerd/cgroups/blkio.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@ -72,8 +71,8 @@ func (b *blkioController) Create(path string, resources *specs.LinuxResources) e
|
||||
}
|
||||
for _, t := range createBlkioSettings(resources.BlockIO) {
|
||||
if t.value != nil {
|
||||
if err := ioutil.WriteFile(
|
||||
filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", t.name)),
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(b.Path(path), "blkio."+t.name),
|
||||
t.format(t.value),
|
||||
defaultFilePerm,
|
||||
); err != nil {
|
||||
@ -94,7 +93,7 @@ func (b *blkioController) Stat(path string, stats *v1.Metrics) error {
|
||||
var settings []blkioStatSettings
|
||||
|
||||
// Try to read CFQ stats available on all CFQ enabled kernels first
|
||||
if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil {
|
||||
if _, err := os.Lstat(filepath.Join(b.Path(path), "blkio.io_serviced_recursive")); err == nil {
|
||||
settings = []blkioStatSettings{
|
||||
{
|
||||
name: "sectors_recursive",
|
||||
@ -174,7 +173,7 @@ func (b *blkioController) Stat(path string, stats *v1.Metrics) error {
|
||||
}
|
||||
|
||||
func (b *blkioController) readEntry(devices map[deviceKey]string, path, name string, entry *[]*v1.BlkIOEntry) error {
|
||||
f, err := os.Open(filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", name)))
|
||||
f, err := os.Open(filepath.Join(b.Path(path), "blkio."+name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -188,7 +187,7 @@ func (b *blkioController) readEntry(devices map[deviceKey]string, path, name str
|
||||
// skip total line
|
||||
continue
|
||||
} else {
|
||||
return fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
|
||||
return fmt.Errorf("invalid line found while parsing %s: %s", path, sc.Text())
|
||||
}
|
||||
}
|
||||
major, err := strconv.ParseUint(fields[0], 10, 64)
|
||||
@ -357,11 +356,3 @@ func getDevices(r io.Reader) (map[deviceKey]string, error) {
|
||||
}
|
||||
return devices, s.Err()
|
||||
}
|
||||
|
||||
func major(devNumber uint64) uint64 {
|
||||
return (devNumber >> 8) & 0xfff
|
||||
}
|
||||
|
||||
func minor(devNumber uint64) uint64 {
|
||||
return (devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)
|
||||
}
|
||||
|
15
vendor/github.com/containerd/cgroups/cgroup.go
generated
vendored
15
vendor/github.com/containerd/cgroups/cgroup.go
generated
vendored
@ -18,7 +18,6 @@ package cgroups
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@ -169,7 +168,7 @@ func (c *cgroup) add(process Process) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(s.Path(p), cgroupProcs),
|
||||
[]byte(strconv.Itoa(process.Pid)),
|
||||
defaultFilePerm,
|
||||
@ -199,7 +198,7 @@ func (c *cgroup) addTask(process Process) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(s.Path(p), cgroupTasks),
|
||||
[]byte(strconv.Itoa(process.Pid)),
|
||||
defaultFilePerm,
|
||||
@ -217,7 +216,7 @@ func (c *cgroup) Delete() error {
|
||||
if c.err != nil {
|
||||
return c.err
|
||||
}
|
||||
var errors []string
|
||||
var errs []string
|
||||
for _, s := range c.subsystems {
|
||||
if d, ok := s.(deleter); ok {
|
||||
sp, err := c.path(s.Name())
|
||||
@ -225,7 +224,7 @@ func (c *cgroup) Delete() error {
|
||||
return err
|
||||
}
|
||||
if err := d.Delete(sp); err != nil {
|
||||
errors = append(errors, string(s.Name()))
|
||||
errs = append(errs, string(s.Name()))
|
||||
}
|
||||
continue
|
||||
}
|
||||
@ -236,12 +235,12 @@ func (c *cgroup) Delete() error {
|
||||
}
|
||||
path := p.Path(sp)
|
||||
if err := remove(path); err != nil {
|
||||
errors = append(errors, path)
|
||||
errs = append(errs, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errors, ", "))
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errs, ", "))
|
||||
}
|
||||
c.err = ErrCgroupDeleted
|
||||
return nil
|
||||
|
6
vendor/github.com/containerd/cgroups/cpu.go
generated
vendored
6
vendor/github.com/containerd/cgroups/cpu.go
generated
vendored
@ -18,8 +18,6 @@ package cgroups
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@ -84,8 +82,8 @@ func (c *cpuController) Create(path string, resources *specs.LinuxResources) err
|
||||
value = []byte(strconv.FormatInt(*t.ivalue, 10))
|
||||
}
|
||||
if value != nil {
|
||||
if err := ioutil.WriteFile(
|
||||
filepath.Join(c.Path(path), fmt.Sprintf("cpu.%s", t.name)),
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(c.Path(path), "cpu."+t.name),
|
||||
value,
|
||||
defaultFilePerm,
|
||||
); err != nil {
|
||||
|
8
vendor/github.com/containerd/cgroups/cpuset.go
generated
vendored
8
vendor/github.com/containerd/cgroups/cpuset.go
generated
vendored
@ -69,8 +69,8 @@ func (c *cpusetController) Create(path string, resources *specs.LinuxResources)
|
||||
},
|
||||
} {
|
||||
if t.value != "" {
|
||||
if err := ioutil.WriteFile(
|
||||
filepath.Join(c.Path(path), fmt.Sprintf("cpuset.%s", t.name)),
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(c.Path(path), "cpuset."+t.name),
|
||||
[]byte(t.value),
|
||||
defaultFilePerm,
|
||||
); err != nil {
|
||||
@ -134,7 +134,7 @@ func (c *cpusetController) copyIfNeeded(current, parent string) error {
|
||||
return err
|
||||
}
|
||||
if isEmpty(currentCpus) {
|
||||
if err := ioutil.WriteFile(
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(current, "cpuset.cpus"),
|
||||
parentCpus,
|
||||
defaultFilePerm,
|
||||
@ -143,7 +143,7 @@ func (c *cpusetController) copyIfNeeded(current, parent string) error {
|
||||
}
|
||||
}
|
||||
if isEmpty(currentMems) {
|
||||
if err := ioutil.WriteFile(
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(current, "cpuset.mems"),
|
||||
parentMems,
|
||||
defaultFilePerm,
|
||||
|
3
vendor/github.com/containerd/cgroups/devices.go
generated
vendored
3
vendor/github.com/containerd/cgroups/devices.go
generated
vendored
@ -18,7 +18,6 @@ package cgroups
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@ -61,7 +60,7 @@ func (d *devicesController) Create(path string, resources *specs.LinuxResources)
|
||||
if device.Type == "" {
|
||||
device.Type = "a"
|
||||
}
|
||||
if err := ioutil.WriteFile(
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(d.Path(path), file),
|
||||
[]byte(deviceString(device)),
|
||||
defaultFilePerm,
|
||||
|
2
vendor/github.com/containerd/cgroups/freezer.go
generated
vendored
2
vendor/github.com/containerd/cgroups/freezer.go
generated
vendored
@ -50,7 +50,7 @@ func (f *freezerController) Thaw(path string) error {
|
||||
}
|
||||
|
||||
func (f *freezerController) changeState(path string, state State) error {
|
||||
return ioutil.WriteFile(
|
||||
return retryingWriteFile(
|
||||
filepath.Join(f.root, path, "freezer.state"),
|
||||
[]byte(strings.ToUpper(string(state))),
|
||||
defaultFilePerm,
|
||||
|
3
vendor/github.com/containerd/cgroups/hugetlb.go
generated
vendored
3
vendor/github.com/containerd/cgroups/hugetlb.go
generated
vendored
@ -17,7 +17,6 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@ -57,7 +56,7 @@ func (h *hugetlbController) Create(path string, resources *specs.LinuxResources)
|
||||
return err
|
||||
}
|
||||
for _, limit := range resources.HugepageLimits {
|
||||
if err := ioutil.WriteFile(
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", limit.Pagesize, "limit_in_bytes"}, ".")),
|
||||
[]byte(strconv.FormatUint(limit.Limit, 10)),
|
||||
defaultFilePerm,
|
||||
|
9
vendor/github.com/containerd/cgroups/memory.go
generated
vendored
9
vendor/github.com/containerd/cgroups/memory.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@ -214,7 +213,7 @@ func (m *memoryController) Create(path string, resources *specs.LinuxResources)
|
||||
// until a limit is set on the cgroup and limit cannot be set once the
|
||||
// cgroup has children, or if there are already tasks in the cgroup.
|
||||
for _, i := range []int64{1, -1} {
|
||||
if err := ioutil.WriteFile(
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(m.Path(path), "memory.kmem.limit_in_bytes"),
|
||||
[]byte(strconv.FormatInt(i, 10)),
|
||||
defaultFilePerm,
|
||||
@ -378,8 +377,8 @@ func (m *memoryController) parseStats(r io.Reader, stat *v1.MemoryStat) error {
|
||||
func (m *memoryController) set(path string, settings []memorySettings) error {
|
||||
for _, t := range settings {
|
||||
if t.value != nil {
|
||||
if err := ioutil.WriteFile(
|
||||
filepath.Join(m.Path(path), fmt.Sprintf("memory.%s", t.name)),
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(m.Path(path), "memory."+t.name),
|
||||
[]byte(strconv.FormatInt(*t.value, 10)),
|
||||
defaultFilePerm,
|
||||
); err != nil {
|
||||
@ -468,7 +467,7 @@ func (m *memoryController) memoryEvent(path string, event MemoryEvent) (uintptr,
|
||||
defer evtFile.Close()
|
||||
data := fmt.Sprintf("%d %d %s", efd, evtFile.Fd(), event.Arg())
|
||||
evctlPath := filepath.Join(root, "cgroup.event_control")
|
||||
if err := ioutil.WriteFile(evctlPath, []byte(data), 0700); err != nil {
|
||||
if err := retryingWriteFile(evctlPath, []byte(data), 0700); err != nil {
|
||||
unix.Close(efd)
|
||||
return 0, err
|
||||
}
|
||||
|
3
vendor/github.com/containerd/cgroups/net_cls.go
generated
vendored
3
vendor/github.com/containerd/cgroups/net_cls.go
generated
vendored
@ -17,7 +17,6 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@ -48,7 +47,7 @@ func (n *netclsController) Create(path string, resources *specs.LinuxResources)
|
||||
return err
|
||||
}
|
||||
if resources.Network != nil && resources.Network.ClassID != nil && *resources.Network.ClassID > 0 {
|
||||
return ioutil.WriteFile(
|
||||
return retryingWriteFile(
|
||||
filepath.Join(n.Path(path), "net_cls.classid"),
|
||||
[]byte(strconv.FormatUint(uint64(*resources.Network.ClassID), 10)),
|
||||
defaultFilePerm,
|
||||
|
3
vendor/github.com/containerd/cgroups/net_prio.go
generated
vendored
3
vendor/github.com/containerd/cgroups/net_prio.go
generated
vendored
@ -18,7 +18,6 @@ package cgroups
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@ -49,7 +48,7 @@ func (n *netprioController) Create(path string, resources *specs.LinuxResources)
|
||||
}
|
||||
if resources.Network != nil {
|
||||
for _, prio := range resources.Network.Priorities {
|
||||
if err := ioutil.WriteFile(
|
||||
if err := retryingWriteFile(
|
||||
filepath.Join(n.Path(path), "net_prio.ifpriomap"),
|
||||
formatPrio(prio.Name, prio.Priority),
|
||||
defaultFilePerm,
|
||||
|
4
vendor/github.com/containerd/cgroups/opts.go
generated
vendored
4
vendor/github.com/containerd/cgroups/opts.go
generated
vendored
@ -48,12 +48,12 @@ func newInitConfig() *InitConfig {
|
||||
type InitCheck func(Subsystem, Path, error) error
|
||||
|
||||
// AllowAny allows any subsystem errors to be skipped
|
||||
func AllowAny(s Subsystem, p Path, err error) error {
|
||||
func AllowAny(_ Subsystem, _ Path, _ error) error {
|
||||
return ErrIgnoreSubsystem
|
||||
}
|
||||
|
||||
// RequireDevices requires the device subsystem but no others
|
||||
func RequireDevices(s Subsystem, p Path, err error) error {
|
||||
func RequireDevices(s Subsystem, _ Path, _ error) error {
|
||||
if s.Name() == Devices {
|
||||
return ErrDevicesRequired
|
||||
}
|
||||
|
6
vendor/github.com/containerd/cgroups/paths.go
generated
vendored
6
vendor/github.com/containerd/cgroups/paths.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
|
||||
type Path func(subsystem Name) (string, error)
|
||||
|
||||
func RootPath(subsysem Name) (string, error) {
|
||||
func RootPath(subsystem Name) (string, error) {
|
||||
return "/", nil
|
||||
}
|
||||
|
||||
@ -63,7 +63,7 @@ var ErrControllerNotActive = errors.New("controller is not supported")
|
||||
func existingPath(paths map[string]string, suffix string) Path {
|
||||
// localize the paths based on the root mount dest for nested cgroups
|
||||
for n, p := range paths {
|
||||
dest, err := getCgroupDestination(string(n))
|
||||
dest, err := getCgroupDestination(n)
|
||||
if err != nil {
|
||||
return errorPath(err)
|
||||
}
|
||||
@ -79,7 +79,7 @@ func existingPath(paths map[string]string, suffix string) Path {
|
||||
return func(name Name) (string, error) {
|
||||
root, ok := paths[string(name)]
|
||||
if !ok {
|
||||
if root, ok = paths[fmt.Sprintf("name=%s", name)]; !ok {
|
||||
if root, ok = paths["name="+string(name)]; !ok {
|
||||
return "", ErrControllerNotActive
|
||||
}
|
||||
}
|
||||
|
2
vendor/github.com/containerd/cgroups/pids.go
generated
vendored
2
vendor/github.com/containerd/cgroups/pids.go
generated
vendored
@ -50,7 +50,7 @@ func (p *pidsController) Create(path string, resources *specs.LinuxResources) er
|
||||
return err
|
||||
}
|
||||
if resources.Pids != nil && resources.Pids.Limit > 0 {
|
||||
return ioutil.WriteFile(
|
||||
return retryingWriteFile(
|
||||
filepath.Join(p.Path(path), "pids.max"),
|
||||
[]byte(strconv.FormatInt(resources.Pids.Limit, 10)),
|
||||
defaultFilePerm,
|
||||
|
2
vendor/github.com/containerd/cgroups/rdma.go
generated
vendored
2
vendor/github.com/containerd/cgroups/rdma.go
generated
vendored
@ -67,7 +67,7 @@ func (p *rdmaController) Create(path string, resources *specs.LinuxResources) er
|
||||
|
||||
for device, limit := range resources.Rdma {
|
||||
if device != "" && (limit.HcaHandles != nil || limit.HcaObjects != nil) {
|
||||
return ioutil.WriteFile(
|
||||
return retryingWriteFile(
|
||||
filepath.Join(p.Path(path), "rdma.max"),
|
||||
[]byte(createCmdString(device, &limit)),
|
||||
defaultFilePerm,
|
||||
|
9
vendor/github.com/containerd/cgroups/systemd.go
generated
vendored
9
vendor/github.com/containerd/cgroups/systemd.go
generated
vendored
@ -17,7 +17,6 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -78,7 +77,7 @@ func (s *SystemdController) Name() Name {
|
||||
return SystemdDbus
|
||||
}
|
||||
|
||||
func (s *SystemdController) Create(path string, resources *specs.LinuxResources) error {
|
||||
func (s *SystemdController) Create(path string, _ *specs.LinuxResources) error {
|
||||
conn, err := systemdDbus.New()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -105,7 +104,7 @@ func (s *SystemdController) Create(path string, resources *specs.LinuxResources)
|
||||
}
|
||||
once.Do(checkDelegate)
|
||||
properties := []systemdDbus.Property{
|
||||
systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)),
|
||||
systemdDbus.PropDescription("cgroup " + name),
|
||||
systemdDbus.PropWants(slice),
|
||||
newProperty("DefaultDependencies", false),
|
||||
newProperty("MemoryAccounting", true),
|
||||
@ -150,10 +149,6 @@ func newProperty(name string, units interface{}) systemdDbus.Property {
|
||||
}
|
||||
}
|
||||
|
||||
func unitName(name string) string {
|
||||
return fmt.Sprintf("%s.slice", name)
|
||||
}
|
||||
|
||||
func splitName(path string) (slice string, unit string) {
|
||||
slice, unit = filepath.Split(path)
|
||||
return strings.TrimSuffix(slice, "/"), unit
|
||||
|
17
vendor/github.com/containerd/cgroups/utils.go
generated
vendored
17
vendor/github.com/containerd/cgroups/utils.go
generated
vendored
@ -18,6 +18,7 @@ package cgroups
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -26,6 +27,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
@ -380,5 +382,18 @@ func cleanPath(path string) string {
|
||||
if !filepath.IsAbs(path) {
|
||||
path, _ = filepath.Rel(string(os.PathSeparator), filepath.Clean(string(os.PathSeparator)+path))
|
||||
}
|
||||
return filepath.Clean(path)
|
||||
return path
|
||||
}
|
||||
|
||||
func retryingWriteFile(path string, data []byte, mode os.FileMode) error {
|
||||
// Retry writes on EINTR; see:
|
||||
// https://github.com/golang/go/issues/38033
|
||||
for {
|
||||
err := ioutil.WriteFile(path, data, mode)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if !errors.Is(err, syscall.EINTR) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
4
vendor/github.com/containerd/cgroups/v2/errors.go
generated
vendored
4
vendor/github.com/containerd/cgroups/v2/errors.go
generated
vendored
@ -44,7 +44,3 @@ func IgnoreNotExist(err error) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func errPassthrough(err error) error {
|
||||
return err
|
||||
}
|
||||
|
40
vendor/github.com/containerd/cgroups/v2/manager.go
generated
vendored
40
vendor/github.com/containerd/cgroups/v2/manager.go
generated
vendored
@ -18,26 +18,22 @@ package v2
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/containerd/cgroups/v2/stats"
|
||||
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
|
||||
"github.com/godbus/dbus/v5"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -49,13 +45,8 @@ const (
|
||||
|
||||
var (
|
||||
canDelegate bool
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
type cgValuer interface {
|
||||
Values() []Value
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
Low uint64
|
||||
High uint64
|
||||
@ -149,11 +140,21 @@ func (c *Value) write(path string, perm os.FileMode) error {
|
||||
default:
|
||||
return ErrInvalidFormat
|
||||
}
|
||||
return ioutil.WriteFile(
|
||||
filepath.Join(path, c.filename),
|
||||
data,
|
||||
perm,
|
||||
)
|
||||
|
||||
// Retry writes on EINTR; see:
|
||||
// https://github.com/golang/go/issues/38033
|
||||
for {
|
||||
err := ioutil.WriteFile(
|
||||
filepath.Join(path, c.filename),
|
||||
data,
|
||||
perm,
|
||||
)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if !errors.Is(err, syscall.EINTR) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeValues(path string, values []Value) error {
|
||||
@ -259,7 +260,7 @@ func (c *Manager) ToggleControllers(controllers []string, t ControllerToggle) er
|
||||
// Note that /sys/fs/cgroup/foo/bar/baz/cgroup.subtree_control does not need to be written.
|
||||
split := strings.Split(c.path, "/")
|
||||
var lastErr error
|
||||
for i, _ := range split {
|
||||
for i := range split {
|
||||
f := strings.Join(split[:i], "/")
|
||||
if !strings.HasPrefix(f, c.unifiedMountpoint) || f == c.path {
|
||||
continue
|
||||
@ -362,8 +363,7 @@ func (c *Manager) Stat() (*stats.Metrics, error) {
|
||||
for _, controller := range controllers {
|
||||
switch controller {
|
||||
case "cpu", "memory":
|
||||
filename := fmt.Sprintf("%s.stat", controller)
|
||||
if err := readKVStatsFile(c.path, filename, out); err != nil {
|
||||
if err := readKVStatsFile(c.path, controller+".stat", out); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
@ -670,7 +670,7 @@ func NewSystemd(slice, group string, pid int, resources *Resources) (*Manager, e
|
||||
defer conn.Close()
|
||||
|
||||
properties := []systemdDbus.Property{
|
||||
systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", group)),
|
||||
systemdDbus.PropDescription("cgroup " + group),
|
||||
newSystemdProperty("DefaultDependencies", false),
|
||||
newSystemdProperty("MemoryAccounting", true),
|
||||
newSystemdProperty("CPUAccounting", true),
|
||||
|
2
vendor/github.com/containerd/cgroups/v2/paths.go
generated
vendored
2
vendor/github.com/containerd/cgroups/v2/paths.go
generated
vendored
@ -29,7 +29,7 @@ func NestedGroupPath(suffix string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(string(path), suffix), nil
|
||||
return filepath.Join(path, suffix), nil
|
||||
}
|
||||
|
||||
// PidGroupPath will return the correct cgroup paths for an existing process running inside a cgroup
|
||||
|
17
vendor/github.com/containerd/cgroups/v2/utils.go
generated
vendored
17
vendor/github.com/containerd/cgroups/v2/utils.go
generated
vendored
@ -28,9 +28,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/godbus/dbus/v5"
|
||||
|
||||
"github.com/containerd/cgroups/v2/stats"
|
||||
"github.com/godbus/dbus/v5"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -106,14 +105,6 @@ func parseKV(raw string) (string, interface{}, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func readUint(path string) (uint64, error) {
|
||||
v, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return parseUint(strings.TrimSpace(string(v)), 10, 64)
|
||||
}
|
||||
|
||||
func parseUint(s string, base, bitSize int) (uint64, error) {
|
||||
v, err := strconv.ParseUint(s, base, bitSize)
|
||||
if err != nil {
|
||||
@ -178,7 +169,7 @@ func ToResources(spec *specs.LinuxResources) *Resources {
|
||||
Mems: cpu.Mems,
|
||||
}
|
||||
if shares := cpu.Shares; shares != nil {
|
||||
convertedWeight := (1 + ((*shares-2)*9999)/262142)
|
||||
convertedWeight := 1 + ((*shares-2)*9999)/262142
|
||||
resources.CPU.Weight = &convertedWeight
|
||||
}
|
||||
if period := cpu.Period; period != nil {
|
||||
@ -301,8 +292,8 @@ func readIoStats(path string) []*stats.IOEntry {
|
||||
Major: major,
|
||||
Minor: minor,
|
||||
}
|
||||
for _, stats := range parts {
|
||||
keyPairValue := strings.Split(stats, "=")
|
||||
for _, s := range parts {
|
||||
keyPairValue := strings.Split(s, "=")
|
||||
if len(keyPairValue) != 2 {
|
||||
continue
|
||||
}
|
||||
|
51
vendor/github.com/containerd/containerd/cio/io.go
generated
vendored
51
vendor/github.com/containerd/containerd/cio/io.go
generated
vendored
@ -245,19 +245,11 @@ func LogURI(uri *url.URL) Creator {
|
||||
// BinaryIO forwards container STDOUT|STDERR directly to a logging binary
|
||||
func BinaryIO(binary string, args map[string]string) Creator {
|
||||
return func(_ string) (IO, error) {
|
||||
binary = filepath.Clean(binary)
|
||||
if !strings.HasPrefix(binary, "/") {
|
||||
return nil, errors.New("absolute path needed")
|
||||
uri, err := LogURIGenerator("binary", binary, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uri := &url.URL{
|
||||
Scheme: "binary",
|
||||
Path: binary,
|
||||
}
|
||||
q := uri.Query()
|
||||
for k, v := range args {
|
||||
q.Set(k, v)
|
||||
}
|
||||
uri.RawQuery = q.Encode()
|
||||
|
||||
res := uri.String()
|
||||
return &logURI{
|
||||
config: Config{
|
||||
@ -272,14 +264,11 @@ func BinaryIO(binary string, args map[string]string) Creator {
|
||||
// If the log file already exists, the logs will be appended to the file.
|
||||
func LogFile(path string) Creator {
|
||||
return func(_ string) (IO, error) {
|
||||
path = filepath.Clean(path)
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
return nil, errors.New("absolute path needed")
|
||||
}
|
||||
uri := &url.URL{
|
||||
Scheme: "file",
|
||||
Path: path,
|
||||
uri, err := LogURIGenerator("file", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := uri.String()
|
||||
return &logURI{
|
||||
config: Config{
|
||||
@ -290,6 +279,30 @@ func LogFile(path string) Creator {
|
||||
}
|
||||
}
|
||||
|
||||
// LogURIGenerator is the helper to generate log uri with specific scheme.
|
||||
func LogURIGenerator(scheme string, path string, args map[string]string) (*url.URL, error) {
|
||||
path = filepath.Clean(path)
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
return nil, errors.New("absolute path needed")
|
||||
}
|
||||
|
||||
uri := &url.URL{
|
||||
Scheme: scheme,
|
||||
Path: path,
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return uri, nil
|
||||
}
|
||||
|
||||
q := uri.Query()
|
||||
for k, v := range args {
|
||||
q.Set(k, v)
|
||||
}
|
||||
uri.RawQuery = q.Encode()
|
||||
return uri, nil
|
||||
}
|
||||
|
||||
type logURI struct {
|
||||
config Config
|
||||
}
|
||||
|
2
vendor/github.com/containerd/containerd/cio/io_unix.go
generated
vendored
2
vendor/github.com/containerd/containerd/cio/io_unix.go
generated
vendored
@ -132,7 +132,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (pipes, error) {
|
||||
}
|
||||
}()
|
||||
}
|
||||
if fifos.Stderr != "" {
|
||||
if !fifos.Terminal && fifos.Stderr != "" {
|
||||
if f.Stderr, err = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
|
||||
return f, errors.Wrapf(err, "failed to open stderr fifo")
|
||||
}
|
||||
|
4
vendor/github.com/containerd/containerd/client.go
generated
vendored
4
vendor/github.com/containerd/containerd/client.go
generated
vendored
@ -351,6 +351,10 @@ type RemoteContext struct {
|
||||
|
||||
// AllMetadata downloads all manifests and known-configuration files
|
||||
AllMetadata bool
|
||||
|
||||
// ChildLabelMap sets the labels used to reference child objects in the content
|
||||
// store. By default, all GC reference labels will be set for all fetched content.
|
||||
ChildLabelMap func(ocispec.Descriptor) []string
|
||||
}
|
||||
|
||||
func defaultRemoteContext() *RemoteContext {
|
||||
|
13
vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
13
vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -175,6 +176,18 @@ func WithPullLabels(labels map[string]string) RemoteOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithChildLabelMap sets the map function used to define the labels set
|
||||
// on referenced child content in the content store. This can be used
|
||||
// to overwrite the default GC labels or filter which labels get set
|
||||
// for content.
|
||||
// The default is `images.ChildGCLabels`.
|
||||
func WithChildLabelMap(fn func(ocispec.Descriptor) []string) RemoteOpt {
|
||||
return func(_ *Client, c *RemoteContext) error {
|
||||
c.ChildLabelMap = fn
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSchema1Conversion is used to convert Docker registry schema 1
|
||||
// manifests to oci manifests on pull. Without this option schema 1
|
||||
// manifests will return a not supported error.
|
||||
|
43
vendor/github.com/containerd/containerd/contrib/apparmor/apparmor_unsupported.go
generated
vendored
Normal file
43
vendor/github.com/containerd/containerd/contrib/apparmor/apparmor_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apparmor
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/oci"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// WithProfile sets the provided apparmor profile to the spec
|
||||
func WithProfile(profile string) oci.SpecOpts {
|
||||
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
|
||||
return errors.New("apparmor is not supported")
|
||||
}
|
||||
}
|
||||
|
||||
// WithDefaultProfile will generate a default apparmor profile under the provided name
|
||||
// for the container. It is only generated if a profile under that name does not exist.
|
||||
func WithDefaultProfile(name string) oci.SpecOpts {
|
||||
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
|
||||
return errors.New("apparmor is not supported")
|
||||
}
|
||||
}
|
6
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go
generated
vendored
6
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go
generated
vendored
@ -47,7 +47,7 @@ func arches() []specs.Arch {
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultProfile defines the whitelist for the default seccomp profile.
|
||||
// DefaultProfile defines the allowed syscalls for the default seccomp profile.
|
||||
func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
||||
syscalls := []specs.LinuxSyscall{
|
||||
{
|
||||
@ -64,6 +64,8 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
||||
"chmod",
|
||||
"chown",
|
||||
"chown32",
|
||||
"clock_adjtime",
|
||||
"clock_adjtime64",
|
||||
"clock_getres",
|
||||
"clock_getres_time64",
|
||||
"clock_gettime",
|
||||
@ -253,6 +255,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
||||
"renameat2",
|
||||
"restart_syscall",
|
||||
"rmdir",
|
||||
"rseq",
|
||||
"rt_sigaction",
|
||||
"rt_sigpending",
|
||||
"rt_sigprocmask",
|
||||
@ -513,7 +516,6 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
||||
"delete_module",
|
||||
"init_module",
|
||||
"finit_module",
|
||||
"query_module",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
|
@ -20,7 +20,7 @@ package seccomp
|
||||
|
||||
import specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
||||
// DefaultProfile defines the whitelist for the default seccomp profile.
|
||||
// DefaultProfile defines the allowed syscalls for the default seccomp profile.
|
||||
func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
||||
return &specs.LinuxSeccomp{}
|
||||
}
|
||||
|
2
vendor/github.com/containerd/containerd/diff/lcow/lcow.go
generated
vendored
2
vendor/github.com/containerd/containerd/diff/lcow/lcow.go
generated
vendored
@ -178,7 +178,7 @@ func (s windowsLcowDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mou
|
||||
// Compare creates a diff between the given mounts and uploads the result
|
||||
// to the content store.
|
||||
func (s windowsLcowDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) {
|
||||
return emptyDesc, errdefs.ErrNotImplemented
|
||||
return emptyDesc, errors.Wrap(errdefs.ErrNotImplemented, "windowsLcowDiff does not implement Compare method")
|
||||
}
|
||||
|
||||
type readCounter struct {
|
||||
|
4
vendor/github.com/containerd/containerd/diff/windows/windows.go
generated
vendored
4
vendor/github.com/containerd/containerd/diff/windows/windows.go
generated
vendored
@ -158,7 +158,7 @@ func (s windowsDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts
|
||||
// Compare creates a diff between the given mounts and uploads the result
|
||||
// to the content store.
|
||||
func (s windowsDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) {
|
||||
return emptyDesc, errdefs.ErrNotImplemented
|
||||
return emptyDesc, errors.Wrap(errdefs.ErrNotImplemented, "windowsDiff does not implement Compare method")
|
||||
}
|
||||
|
||||
type readCounter struct {
|
||||
@ -181,7 +181,7 @@ func mountsToLayerAndParents(mounts []mount.Mount) (string, []string, error) {
|
||||
// This is a special case error. When this is received the diff service
|
||||
// will attempt the next differ in the chain which for Windows is the
|
||||
// lcow differ that we want.
|
||||
return "", nil, errdefs.ErrNotImplemented
|
||||
return "", nil, errors.Wrapf(errdefs.ErrNotImplemented, "windowsDiff does not support layer type %s", mnt.Type)
|
||||
}
|
||||
|
||||
parentLayerPaths, err := mnt.GetParentPaths()
|
||||
|
36
vendor/github.com/containerd/containerd/image.go
generated
vendored
36
vendor/github.com/containerd/containerd/image.go
generated
vendored
@ -203,24 +203,26 @@ func (i *image) Usage(ctx context.Context, opts ...UsageOpt) (int64, error) {
|
||||
desc.Size = info.Size
|
||||
}
|
||||
|
||||
for k, v := range info.Labels {
|
||||
const prefix = "containerd.io/gc.ref.snapshot."
|
||||
if !strings.HasPrefix(k, prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
sn := i.client.SnapshotService(k[len(prefix):])
|
||||
if sn == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
u, err := sn.Usage(ctx, v)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) && !errdefs.IsInvalidArgument(err) {
|
||||
return nil, err
|
||||
if config.snapshots {
|
||||
for k, v := range info.Labels {
|
||||
const prefix = "containerd.io/gc.ref.snapshot."
|
||||
if !strings.HasPrefix(k, prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
sn := i.client.SnapshotService(k[len(prefix):])
|
||||
if sn == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
u, err := sn.Usage(ctx, v)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) && !errdefs.IsInvalidArgument(err) {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
usage += u.Size
|
||||
}
|
||||
} else {
|
||||
usage += u.Size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
41
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
41
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
@ -170,6 +170,19 @@ func ChildrenHandler(provider content.Provider) HandlerFunc {
|
||||
// the children returned by the handler and passes through the children.
|
||||
// Must follow a handler that returns the children to be labeled.
|
||||
func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc {
|
||||
return SetChildrenMappedLabels(manager, f, nil)
|
||||
}
|
||||
|
||||
// SetChildrenMappedLabels is a handler wrapper which sets labels for the content on
|
||||
// the children returned by the handler and passes through the children.
|
||||
// Must follow a handler that returns the children to be labeled.
|
||||
// The label map allows the caller to control the labels per child descriptor.
|
||||
// For returned labels, the index of the child will be appended to the end
|
||||
// except for the first index when the returned label does not end with '.'.
|
||||
func SetChildrenMappedLabels(manager content.Manager, f HandlerFunc, labelMap func(ocispec.Descriptor) []string) HandlerFunc {
|
||||
if labelMap == nil {
|
||||
labelMap = ChildGCLabels
|
||||
}
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
children, err := f(ctx, desc)
|
||||
if err != nil {
|
||||
@ -177,14 +190,26 @@ func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc {
|
||||
}
|
||||
|
||||
if len(children) > 0 {
|
||||
info := content.Info{
|
||||
Digest: desc.Digest,
|
||||
Labels: map[string]string{},
|
||||
}
|
||||
fields := []string{}
|
||||
for i, ch := range children {
|
||||
info.Labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = ch.Digest.String()
|
||||
fields = append(fields, fmt.Sprintf("labels.containerd.io/gc.ref.content.%d", i))
|
||||
var (
|
||||
info = content.Info{
|
||||
Digest: desc.Digest,
|
||||
Labels: map[string]string{},
|
||||
}
|
||||
fields = []string{}
|
||||
keys = map[string]uint{}
|
||||
)
|
||||
for _, ch := range children {
|
||||
labelKeys := labelMap(ch)
|
||||
for _, key := range labelKeys {
|
||||
idx := keys[key]
|
||||
keys[key] = idx + 1
|
||||
if idx > 0 || key[len(key)-1] == '.' {
|
||||
key = fmt.Sprintf("%s%d", key, idx)
|
||||
}
|
||||
|
||||
info.Labels[key] = ch.Digest.String()
|
||||
fields = append(fields, "labels."+key)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := manager.Update(ctx, info, fields...)
|
||||
|
2
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
2
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
@ -362,7 +362,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
|
||||
// childless data types.
|
||||
return nil, nil
|
||||
}
|
||||
log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
|
||||
log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType)
|
||||
}
|
||||
|
||||
return descs, nil
|
||||
|
31
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
31
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// mediatype definitions for image components handled in containerd.
|
||||
@ -81,7 +82,7 @@ func DiffCompression(ctx context.Context, mediaType string) (string, error) {
|
||||
}
|
||||
return "", nil
|
||||
default:
|
||||
return "", errdefs.ErrNotImplemented
|
||||
return "", errors.Wrapf(errdefs.ErrNotImplemented, "unrecognised mediatype %s", mediaType)
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,3 +125,31 @@ func IsKnownConfig(mt string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ChildGCLabels returns the label for a given descriptor to reference it
|
||||
func ChildGCLabels(desc ocispec.Descriptor) []string {
|
||||
mt := desc.MediaType
|
||||
if IsKnownConfig(mt) {
|
||||
return []string{"containerd.io/gc.ref.content.config"}
|
||||
}
|
||||
|
||||
switch mt {
|
||||
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
return []string{"containerd.io/gc.ref.content.m."}
|
||||
}
|
||||
|
||||
if IsLayerType(mt) {
|
||||
return []string{"containerd.io/gc.ref.content.l."}
|
||||
}
|
||||
|
||||
return []string{"containerd.io/gc.ref.content."}
|
||||
}
|
||||
|
||||
// ChildGCLabelsFilterLayers returns the labels for a given descriptor to
|
||||
// reference it, skipping layer media types
|
||||
func ChildGCLabelsFilterLayers(desc ocispec.Descriptor) []string {
|
||||
if IsLayerType(desc.MediaType) {
|
||||
return nil
|
||||
}
|
||||
return ChildGCLabels(desc)
|
||||
}
|
||||
|
15
vendor/github.com/containerd/containerd/metrics/cgroups/v2/cgroups.go
generated
vendored
15
vendor/github.com/containerd/containerd/metrics/cgroups/v2/cgroups.go
generated
vendored
@ -21,10 +21,8 @@ package v2
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/events"
|
||||
"github.com/containerd/containerd/runtime"
|
||||
"github.com/containerd/containerd/runtime/v1/linux"
|
||||
metrics "github.com/docker/go-metrics"
|
||||
)
|
||||
|
||||
@ -48,19 +46,6 @@ func (m *cgroupsMonitor) Monitor(c runtime.Task) error {
|
||||
if err := m.collector.Add(c); err != nil {
|
||||
return err
|
||||
}
|
||||
t, ok := c.(*linux.Task)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
cg, err := t.Cgroup()
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
// OOM handler is not implemented yet
|
||||
_ = cg
|
||||
return nil
|
||||
}
|
||||
|
||||
|
16
vendor/github.com/containerd/containerd/metrics/cgroups/v2/memory.go
generated
vendored
16
vendor/github.com/containerd/containerd/metrics/cgroups/v2/memory.go
generated
vendored
@ -586,4 +586,20 @@ var memoryMetrics = []*metric{
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "memory_oom",
|
||||
help: "The number of times a container has received an oom event",
|
||||
unit: metrics.Total,
|
||||
vt: prometheus.GaugeValue,
|
||||
getValues: func(stats *v2.Metrics) []value {
|
||||
if stats.MemoryEvents == nil {
|
||||
return nil
|
||||
}
|
||||
return []value{
|
||||
{
|
||||
v: float64(stats.MemoryEvents.Oom),
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
32
vendor/github.com/containerd/containerd/mount/mount_linux.go
generated
vendored
32
vendor/github.com/containerd/containerd/mount/mount_linux.go
generated
vendored
@ -363,10 +363,34 @@ func (m *Mount) mountWithHelper(helperBinary, typePrefix, target string) error {
|
||||
args = append(args, "-o", o)
|
||||
}
|
||||
args = append(args, "-t", strings.TrimPrefix(m.Type, typePrefix))
|
||||
cmd := exec.Command(helperBinary, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
infoBeforeMount, err := Lookup(target)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "mount helper [%s %v] failed: %q", helperBinary, args, string(out))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
// cmd.CombinedOutput() may intermittently return ECHILD because of our signal handling in shim.
|
||||
// See #4387 and wait(2).
|
||||
const retriesOnECHILD = 10
|
||||
for i := 0; i < retriesOnECHILD; i++ {
|
||||
cmd := exec.Command(helperBinary, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, unix.ECHILD) {
|
||||
return errors.Wrapf(err, "mount helper [%s %v] failed: %q", helperBinary, args, string(out))
|
||||
}
|
||||
// We got ECHILD, we are not sure whether the mount was successful.
|
||||
// If the mount ID has changed, we are sure we got some new mount, but still not sure it is fully completed.
|
||||
// So we attempt to unmount the new mount before retrying.
|
||||
infoAfterMount, err := Lookup(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if infoAfterMount.ID != infoBeforeMount.ID {
|
||||
_ = unmount(target, 0)
|
||||
}
|
||||
}
|
||||
return errors.Errorf("mount helper [%s %v] failed with ECHILD (retired %d times)", helperBinary, args, retriesOnECHILD)
|
||||
}
|
||||
|
4
vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
generated
vendored
4
vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
generated
vendored
@ -81,11 +81,11 @@ func parseInfoFile(r io.Reader) ([]Info, error) {
|
||||
p.Major, _ = strconv.Atoi(mm[0])
|
||||
p.Minor, _ = strconv.Atoi(mm[1])
|
||||
|
||||
p.Root, err = strconv.Unquote(`"` + fields[3] + `"`)
|
||||
p.Root, err = strconv.Unquote(`"` + strings.Replace(fields[3], `"`, `\"`, -1) + `"`)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote root field", fields[3])
|
||||
}
|
||||
p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`)
|
||||
p.Mountpoint, err = strconv.Unquote(`"` + strings.Replace(fields[4], `"`, `\"`, -1) + `"`)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote mount point field", fields[4])
|
||||
}
|
||||
|
2
vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
generated
vendored
2
vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
generated
vendored
@ -52,7 +52,7 @@ func WithWindowsIgnoreFlushesDuringBoot() SpecOpts {
|
||||
}
|
||||
}
|
||||
|
||||
// WithWindowNetworksAllowUnqualifiedDNSQuery sets `Windows.IgnoreFlushesDuringBoot`.
|
||||
// WithWindowNetworksAllowUnqualifiedDNSQuery sets `Windows.Network.AllowUnqualifiedDNSQuery`.
|
||||
func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts {
|
||||
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
|
||||
if s.Windows == nil {
|
||||
|
8
vendor/github.com/containerd/containerd/pkg/process/init.go
generated
vendored
8
vendor/github.com/containerd/containerd/pkg/process/init.go
generated
vendored
@ -27,7 +27,6 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/console"
|
||||
@ -39,6 +38,7 @@ import (
|
||||
google_protobuf "github.com/gogo/protobuf/types"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Init represents an initial process for a container
|
||||
@ -87,7 +87,7 @@ func NewRunc(root, path, namespace, runtime, criu string, systemd bool) *runc.Ru
|
||||
Command: runtime,
|
||||
Log: filepath.Join(path, "log.json"),
|
||||
LogFormat: runc.JSON,
|
||||
PdeathSignal: syscall.SIGKILL,
|
||||
PdeathSignal: unix.SIGKILL,
|
||||
Root: filepath.Join(root, namespace),
|
||||
Criu: criu,
|
||||
SystemdCgroup: systemd,
|
||||
@ -176,7 +176,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
|
||||
}
|
||||
|
||||
func (p *Init) openStdin(path string) error {
|
||||
sc, err := fifo.OpenFifo(context.Background(), path, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
|
||||
sc, err := fifo.OpenFifo(context.Background(), path, unix.O_WRONLY|unix.O_NONBLOCK, 0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open stdin fifo %s", path)
|
||||
}
|
||||
@ -361,7 +361,7 @@ func (p *Init) KillAll(ctx context.Context) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
err := p.runtime.Kill(ctx, p.id, int(syscall.SIGKILL), &runc.KillOpts{
|
||||
err := p.runtime.Kill(ctx, p.id, int(unix.SIGKILL), &runc.KillOpts{
|
||||
All: true,
|
||||
})
|
||||
return p.runtimeError(err, "OCI runtime killall failed")
|
||||
|
2
vendor/github.com/containerd/containerd/pkg/process/io.go
generated
vendored
2
vendor/github.com/containerd/containerd/pkg/process/io.go
generated
vendored
@ -381,7 +381,7 @@ func (b *binaryIO) cancel() error {
|
||||
return result.ErrorOrNil()
|
||||
}
|
||||
|
||||
done := make(chan error)
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- b.cmd.Wait()
|
||||
}()
|
||||
|
4
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
4
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
@ -74,8 +74,8 @@ func getCPUInfo(pattern string) (info string, err error) {
|
||||
}
|
||||
|
||||
func getCPUVariant() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
|
||||
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
|
||||
// Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use
|
||||
// runtime.GOARCH to determine the variants
|
||||
var variant string
|
||||
switch runtime.GOARCH {
|
||||
|
2
vendor/github.com/containerd/containerd/pull.go
generated
vendored
2
vendor/github.com/containerd/containerd/pull.go
generated
vendored
@ -159,7 +159,7 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
|
||||
// Get all the children for a descriptor
|
||||
childrenHandler := images.ChildrenHandler(store)
|
||||
// Set any children labels for that content
|
||||
childrenHandler = images.SetChildrenLabels(store, childrenHandler)
|
||||
childrenHandler = images.SetChildrenMappedLabels(store, childrenHandler, rCtx.ChildLabelMap)
|
||||
if rCtx.AllMetadata {
|
||||
// Filter manifests by platforms but allow to handle manifest
|
||||
// and configuration for not-target platforms
|
||||
|
2
vendor/github.com/containerd/containerd/remotes/docker/pusher.go
generated
vendored
2
vendor/github.com/containerd/containerd/remotes/docker/pusher.go
generated
vendored
@ -235,7 +235,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
||||
|
||||
go func() {
|
||||
defer close(respC)
|
||||
resp, err = req.do(ctx)
|
||||
resp, err := req.do(ctx)
|
||||
if err != nil {
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
|
1
vendor/github.com/containerd/containerd/remotes/docker/registry.go
generated
vendored
1
vendor/github.com/containerd/containerd/remotes/docker/registry.go
generated
vendored
@ -70,6 +70,7 @@ type RegistryHost struct {
|
||||
Scheme string
|
||||
Path string
|
||||
Capabilities HostCapabilities
|
||||
Header http.Header
|
||||
}
|
||||
|
||||
// RegistryHosts fetches the registry hosts for a given namespace,
|
||||
|
3
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
3
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
@ -450,6 +450,9 @@ func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *re
|
||||
for key, value := range r.header {
|
||||
header[key] = append(header[key], value...)
|
||||
}
|
||||
for key, value := range host.Header {
|
||||
header[key] = append(header[key], value...)
|
||||
}
|
||||
parts := append([]string{"/", host.Path, r.namespace}, ps...)
|
||||
p := path.Join(parts...)
|
||||
// Join strips trailing slash, re-add ending "/" if included
|
||||
|
20
vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go
generated
vendored
20
vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go
generated
vendored
@ -324,21 +324,31 @@ func (c *Client) signalShim(ctx context.Context, sig syscall.Signal) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-c.waitForExit(pid):
|
||||
case <-c.waitForExit(ctx, pid):
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) waitForExit(pid int) <-chan struct{} {
|
||||
c.exitOnce.Do(func() {
|
||||
func (c *Client) waitForExit(ctx context.Context, pid int) <-chan struct{} {
|
||||
go c.exitOnce.Do(func() {
|
||||
defer close(c.exitCh)
|
||||
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
// use kill(pid, 0) here because the shim could have been reparented
|
||||
// and we are no longer able to waitpid(pid, ...) on the shim
|
||||
if err := unix.Kill(pid, 0); err == unix.ESRCH {
|
||||
close(c.exitCh)
|
||||
return
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-ctx.Done():
|
||||
log.G(ctx).WithField("pid", pid).Warn("timed out while waiting for shim to exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
return c.exitCh
|
||||
|
11
vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go
generated
vendored
11
vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go
generated
vendored
@ -130,7 +130,9 @@ func (l *RemoteEventsPublisher) Publish(ctx context.Context, topic string, event
|
||||
func (l *RemoteEventsPublisher) forwardRequest(ctx context.Context, req *v1.ForwardRequest) error {
|
||||
service, err := l.client.EventsService()
|
||||
if err == nil {
|
||||
_, err = service.Forward(ctx, req)
|
||||
fCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
_, err = service.Forward(fCtx, req)
|
||||
cancel()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
@ -149,7 +151,12 @@ func (l *RemoteEventsPublisher) forwardRequest(ctx context.Context, req *v1.Forw
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = service.Forward(ctx, req); err != nil {
|
||||
|
||||
// try again with a fresh context, otherwise we may get a context timeout unexpectedly.
|
||||
fCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
_, err = service.Forward(fCtx, req)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
13
vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go
generated
vendored
13
vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go
generated
vendored
@ -70,6 +70,7 @@ type snapshotter struct {
|
||||
root string
|
||||
ms *storage.MetaStore
|
||||
asyncRemove bool
|
||||
indexOff bool
|
||||
}
|
||||
|
||||
// NewSnapshotter returns a Snapshotter which uses overlayfs. The overlayfs
|
||||
@ -102,10 +103,17 @@ func NewSnapshotter(root string, opts ...Opt) (snapshots.Snapshotter, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// figure out whether "index=off" option is recognized by the kernel
|
||||
var indexOff bool
|
||||
if _, err = os.Stat("/sys/module/overlay/parameters/index"); err == nil {
|
||||
indexOff = true
|
||||
}
|
||||
|
||||
return &snapshotter{
|
||||
root: root,
|
||||
ms: ms,
|
||||
asyncRemove: config.asyncRemove,
|
||||
indexOff: indexOff,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -465,6 +473,11 @@ func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount {
|
||||
}
|
||||
var options []string
|
||||
|
||||
// set index=off when mount overlayfs
|
||||
if o.indexOff {
|
||||
options = append(options, "index=off")
|
||||
}
|
||||
|
||||
if s.Kind == snapshots.KindActive {
|
||||
options = append(options,
|
||||
fmt.Sprintf("workdir=%s", o.workPath(s.ID)),
|
||||
|
35
vendor/github.com/containerd/containerd/snapshotter_opts_unix.go
generated
vendored
Normal file
35
vendor/github.com/containerd/containerd/snapshotter_opts_unix.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
)
|
||||
|
||||
// WithRemapperLabels creates the labels used by any supporting snapshotter
|
||||
// to shift the filesystem ownership (user namespace mapping) automatically; currently
|
||||
// supported by the fuse-overlayfs snapshotter
|
||||
func WithRemapperLabels(ctrUID, hostUID, ctrGID, hostGID, length uint32) snapshots.Opt {
|
||||
return snapshots.WithLabels(map[string]string{
|
||||
"containerd.io/snapshot/uidmapping": fmt.Sprintf("%d:%d:%d", ctrUID, hostUID, length),
|
||||
"containerd.io/snapshot/gidmapping": fmt.Sprintf("%d:%d:%d", ctrGID, hostGID, length),
|
||||
})
|
||||
}
|
4
vendor/github.com/containerd/containerd/unpacker.go
generated
vendored
4
vendor/github.com/containerd/containerd/unpacker.go
generated
vendored
@ -178,13 +178,13 @@ EachLayer:
|
||||
fetchC[i] = make(chan struct{})
|
||||
}
|
||||
|
||||
go func() {
|
||||
go func(i int) {
|
||||
err := u.fetch(ctx, h, layers[i:], fetchC)
|
||||
if err != nil {
|
||||
fetchErr <- err
|
||||
}
|
||||
close(fetchErr)
|
||||
}()
|
||||
}(i)
|
||||
}
|
||||
|
||||
select {
|
||||
|
65
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
65
vendor/github.com/containerd/containerd/vendor.conf
generated
vendored
@ -2,22 +2,22 @@ github.com/beorn7/perks v1.0.1
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/containerd/btrfs 153935315f4ab9be5bf03650a1341454b05efa5d
|
||||
github.com/containerd/cgroups b4448137398923af7f4918b8b2ad8249172ca7a6
|
||||
github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff
|
||||
github.com/containerd/console v1.0.0
|
||||
github.com/containerd/continuity d3ef23f19fbb106bb73ffde425d07a9187e30745
|
||||
github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf
|
||||
github.com/containerd/go-runc 7016d3ce2328dd2cb1192b2076ebd565c4e8df0c
|
||||
github.com/containerd/ttrpc v1.0.1
|
||||
github.com/containerd/typeurl v1.0.1
|
||||
github.com/coreos/go-systemd/v22 v22.0.0
|
||||
github.com/cpuguy83/go-md2man v1.0.10
|
||||
github.com/coreos/go-systemd/v22 v22.1.0
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0
|
||||
github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f
|
||||
github.com/docker/go-metrics v0.0.1
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/godbus/dbus/v5 v5.0.3
|
||||
github.com/gogo/googleapis v1.3.2
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang/protobuf v1.3.3
|
||||
github.com/golang/protobuf v1.3.5
|
||||
github.com/google/go-cmp v0.2.0
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
@ -31,65 +31,66 @@ github.com/Microsoft/go-winio v0.4.14
|
||||
github.com/Microsoft/hcsshim v0.8.9
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc v1.0.0-rc10
|
||||
github.com/opencontainers/runtime-spec v1.0.2
|
||||
github.com/opencontainers/runc v1.0.0-rc91
|
||||
github.com/opencontainers/runtime-spec 237cc4f519e2e8f9b235bacccfa8ef5a84df2875 # v1.0.3-0.20200520003142-237cc4f519e2
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.3.0
|
||||
github.com/prometheus/client_model v0.1.0
|
||||
github.com/prometheus/common v0.7.0
|
||||
github.com/prometheus/procfs v0.0.8
|
||||
github.com/russross/blackfriday v1.5.2
|
||||
github.com/prometheus/client_golang v1.6.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.9.1
|
||||
github.com/prometheus/procfs v0.0.11
|
||||
github.com/russross/blackfriday/v2 v2.0.1
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/urfave/cli v1.22.0
|
||||
go.etcd.io/bbolt v1.3.3
|
||||
github.com/urfave/cli v1.22.1 # NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.opencensus.io v0.22.0
|
||||
golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
golang.org/x/sys 5c8b2ff67527cb88b770f693cebf3799036d8bc0
|
||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||
golang.org/x/sys 9dae0f8f577553e0f21298e18926efc9644c281d
|
||||
golang.org/x/text v0.3.3
|
||||
google.golang.org/genproto e50cd9704f63023d62cd06a1994b98227fc4d21a
|
||||
google.golang.org/grpc v1.27.1
|
||||
gotest.tools/v3 v3.0.2
|
||||
|
||||
# cgroups dependencies
|
||||
github.com/cilium/ebpf 4032b1d8aae306b7bb94a2a11002932caf88c644
|
||||
github.com/cilium/ebpf 1c8d4c9ef7759622653a1d319284a44652333b28
|
||||
|
||||
# cri dependencies
|
||||
github.com/containerd/cri 64aa9da76fc0ab333119f455f3b292244c1fae8c # master
|
||||
github.com/containerd/cri 8448b92d237e877bed1e4aa7a0baf0dee234dbcb # master
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/docker/docker 4634ce647cf2ce2c6031129ccd109e557244986f
|
||||
github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
|
||||
github.com/emicklei/go-restful v2.9.5
|
||||
github.com/go-logr/logr v0.2.0
|
||||
github.com/google/gofuzz v1.1.0
|
||||
github.com/json-iterator/go v1.1.8
|
||||
github.com/json-iterator/go v1.1.9
|
||||
github.com/modern-go/concurrent 1.0.3
|
||||
github.com/modern-go/reflect2 v1.0.1
|
||||
github.com/opencontainers/selinux v1.5.1
|
||||
github.com/opencontainers/selinux v1.6.0
|
||||
github.com/seccomp/libseccomp-golang v0.9.1
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/willf/bitset d5bec3311243426a3c6d1b7a795f24b17c686dbb # 1.1.10+ used by selinux pkg
|
||||
golang.org/x/crypto bac4c82f69751a6dd76e702d54b3ceb88adab236
|
||||
golang.org/x/oauth2 0f29369cfe4552d0e4bcddc57cc75f4d7e672a33
|
||||
golang.org/x/time 9d24e82272b4f38b78bc8cff74fa936d31ccd8ef
|
||||
golang.org/x/oauth2 858c2ad4c8b6c5d10852cb89079f6ca1c7309787
|
||||
golang.org/x/time 555d28b269f0569763d25dbe1a237ae74c6bcc82
|
||||
gopkg.in/inf.v0 v0.9.1
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
k8s.io/api v0.18.2
|
||||
k8s.io/apimachinery v0.18.2
|
||||
k8s.io/apiserver v0.18.2
|
||||
k8s.io/client-go v0.18.2
|
||||
k8s.io/cri-api v0.18.2
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/kubernetes v1.18.2
|
||||
k8s.io/utils a9aa75ae1b89e1b992c33383f48e942d97e52dae
|
||||
k8s.io/api v0.19.0-beta.2
|
||||
k8s.io/apimachinery v0.19.0-beta.2
|
||||
k8s.io/apiserver v0.19.0-beta.2
|
||||
k8s.io/client-go v0.19.0-beta.2
|
||||
k8s.io/cri-api v0.19.0-beta.2
|
||||
k8s.io/klog/v2 v2.2.0
|
||||
k8s.io/utils 2df71ebbae66f39338aed4cd0bb82d2212ee33cc
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
|
||||
# cni dependencies
|
||||
github.com/containerd/go-cni 0d360c50b10b350b6bb23863fd4dfb1c232b01c9
|
||||
github.com/containerd/go-cni v1.0.0
|
||||
github.com/containernetworking/cni v0.7.1
|
||||
github.com/containernetworking/plugins v0.7.6
|
||||
github.com/fsnotify/fsnotify v1.4.8
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
|
||||
# image decrypt depedencies
|
||||
github.com/containerd/imgcrypt v1.0.1
|
||||
|
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
@ -23,7 +23,7 @@ var (
|
||||
Package = "github.com/containerd/containerd"
|
||||
|
||||
// Version holds the complete version number. Filled in at linking time.
|
||||
Version = "1.4.0-beta.0+unknown"
|
||||
Version = "1.4.0-beta.2+unknown"
|
||||
|
||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
||||
// the program at linking time.
|
||||
|
10
vendor/github.com/coreos/go-systemd/v22/README.md
generated
vendored
10
vendor/github.com/coreos/go-systemd/v22/README.md
generated
vendored
@ -1,7 +1,7 @@
|
||||
# go-systemd
|
||||
|
||||
[](https://travis-ci.org/coreos/go-systemd)
|
||||
[](http://godoc.org/github.com/coreos/go-systemd)
|
||||
[](https://pkg.go.dev/mod/github.com/coreos/go-systemd/v22/?tab=packages)
|
||||

|
||||
|
||||
|
||||
@ -24,13 +24,15 @@ https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver
|
||||
|
||||
## systemd Service Notification
|
||||
|
||||
The `daemon` package is an implementation of the [sd_notify protocol](https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description). It can be used to inform systemd of service start-up completion, watchdog events, and other status changes.
|
||||
The `daemon` package is an implementation of the [sd_notify protocol](https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description).
|
||||
It can be used to inform systemd of service start-up completion, watchdog events, and other status changes.
|
||||
|
||||
## D-Bus
|
||||
|
||||
The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here:
|
||||
The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units.
|
||||
[API documentation][dbus-doc] is available online.
|
||||
|
||||
http://godoc.org/github.com/coreos/go-systemd/dbus
|
||||
[dbus-doc]: https://pkg.go.dev/github.com/coreos/go-systemd/v22/dbus?tab=doc
|
||||
|
||||
### Debugging
|
||||
|
||||
|
15
vendor/go.etcd.io/bbolt/README.md
generated
vendored
15
vendor/go.etcd.io/bbolt/README.md
generated
vendored
@ -152,11 +152,12 @@ are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
Transactions should not depend on one another and generally shouldn't be opened
|
||||
simultaneously in the same goroutine. This can cause a deadlock as the read-write
|
||||
transaction needs to periodically re-map the data file but it cannot do so while
|
||||
any read-only transaction is open. Even a nested read-only transaction can cause
|
||||
a deadlock, as the child transaction can block the parent transaction from releasing
|
||||
its resources.
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
@ -275,7 +276,7 @@ should be writable.
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
bucket must be unique. You can create a bucket using the `Tx.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
@ -923,6 +924,7 @@ Below is a list of public, open source projects that use Bolt:
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more)
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
@ -935,6 +937,7 @@ Below is a list of public, open source projects that use Bolt:
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
|
3
vendor/go.etcd.io/bbolt/bolt_386.go
generated
vendored
3
vendor/go.etcd.io/bbolt/bolt_386.go
generated
vendored
@ -5,6 +5,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
3
vendor/go.etcd.io/bbolt/bolt_amd64.go
generated
vendored
3
vendor/go.etcd.io/bbolt/bolt_amd64.go
generated
vendored
@ -5,6 +5,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
21
vendor/go.etcd.io/bbolt/bolt_arm.go
generated
vendored
21
vendor/go.etcd.io/bbolt/bolt_arm.go
generated
vendored
@ -1,28 +1,7 @@
|
||||
package bbolt
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned bool
|
||||
|
||||
func init() {
|
||||
// Simple check to see whether this arch handles unaligned load/stores
|
||||
// correctly.
|
||||
|
||||
// ARM9 and older devices require load/stores to be from/to aligned
|
||||
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||
// read in a jumbled up order.
|
||||
|
||||
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||
|
||||
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||
|
||||
brokenUnaligned = val != 0x11222211
|
||||
}
|
||||
|
3
vendor/go.etcd.io/bbolt/bolt_arm64.go
generated
vendored
3
vendor/go.etcd.io/bbolt/bolt_arm64.go
generated
vendored
@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
3
vendor/go.etcd.io/bbolt/bolt_mips64x.go
generated
vendored
3
vendor/go.etcd.io/bbolt/bolt_mips64x.go
generated
vendored
@ -7,6 +7,3 @@ const maxMapSize = 0x8000000000 // 512GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
3
vendor/go.etcd.io/bbolt/bolt_mipsx.go
generated
vendored
3
vendor/go.etcd.io/bbolt/bolt_mipsx.go
generated
vendored
@ -7,6 +7,3 @@ const maxMapSize = 0x40000000 // 1GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
3
vendor/go.etcd.io/bbolt/bolt_ppc.go
generated
vendored
3
vendor/go.etcd.io/bbolt/bolt_ppc.go
generated
vendored
@ -7,6 +7,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
3
vendor/go.etcd.io/bbolt/bolt_ppc64.go
generated
vendored
3
vendor/go.etcd.io/bbolt/bolt_ppc64.go
generated
vendored
@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
3
vendor/go.etcd.io/bbolt/bolt_ppc64le.go
generated
vendored
3
vendor/go.etcd.io/bbolt/bolt_ppc64le.go
generated
vendored
@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
3
vendor/go.etcd.io/bbolt/bolt_riscv64.go
generated
vendored
3
vendor/go.etcd.io/bbolt/bolt_riscv64.go
generated
vendored
@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = true
|
||||
|
3
vendor/go.etcd.io/bbolt/bolt_s390x.go
generated
vendored
3
vendor/go.etcd.io/bbolt/bolt_s390x.go
generated
vendored
@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
2
vendor/go.etcd.io/bbolt/bolt_unix.go
generated
vendored
2
vendor/go.etcd.io/bbolt/bolt_unix.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build !windows,!plan9,!solaris
|
||||
// +build !windows,!plan9,!solaris,!aix
|
||||
|
||||
package bbolt
|
||||
|
||||
|
90
vendor/go.etcd.io/bbolt/bolt_unix_aix.go
generated
vendored
Normal file
90
vendor/go.etcd.io/bbolt/bolt_unix_aix.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
// +build aix
|
||||
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
fd := db.file.Fd()
|
||||
var lockType int16
|
||||
if exclusive {
|
||||
lockType = syscall.F_WRLCK
|
||||
} else {
|
||||
lockType = syscall.F_RDLCK
|
||||
}
|
||||
for {
|
||||
// Attempt to obtain an exclusive lock.
|
||||
lock := syscall.Flock_t{Type: lockType}
|
||||
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed out then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Type = syscall.F_UNLCK
|
||||
lock.Whence = 0
|
||||
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := unix.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
34
vendor/go.etcd.io/bbolt/bucket.go
generated
vendored
34
vendor/go.etcd.io/bbolt/bucket.go
generated
vendored
@ -123,10 +123,12 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
var child = newBucket(b.tx)
|
||||
|
||||
// If unaligned load/stores are broken on this arch and value is
|
||||
// unaligned simply clone to an aligned byte array.
|
||||
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||
|
||||
// Unaligned access requires a copy to be made.
|
||||
const unalignedMask = unsafe.Alignof(struct {
|
||||
bucket
|
||||
page
|
||||
}{}) - 1
|
||||
unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0
|
||||
if unaligned {
|
||||
value = cloneBytes(value)
|
||||
}
|
||||
@ -206,7 +208,7 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket at the given key.
|
||||
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
|
||||
// Returns an error if the bucket does not exist, or if the key represents a non-bucket value.
|
||||
func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
@ -228,7 +230,7 @@ func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
// Recursively delete all child buckets.
|
||||
child := b.Bucket(key)
|
||||
err := child.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 {
|
||||
if err := child.DeleteBucket(k); err != nil {
|
||||
return fmt.Errorf("delete bucket: %s", err)
|
||||
}
|
||||
@ -409,7 +411,7 @@ func (b *Bucket) Stats() BucketStats {
|
||||
|
||||
if p.count != 0 {
|
||||
// If page has any elements, add all element headers.
|
||||
used += leafPageElementSize * int(p.count-1)
|
||||
used += leafPageElementSize * uintptr(p.count-1)
|
||||
|
||||
// Add all element key, value sizes.
|
||||
// The computation takes advantage of the fact that the position
|
||||
@ -417,16 +419,16 @@ func (b *Bucket) Stats() BucketStats {
|
||||
// of all previous elements' keys and values.
|
||||
// It also includes the last element's header.
|
||||
lastElement := p.leafPageElement(p.count - 1)
|
||||
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
}
|
||||
|
||||
if b.root == 0 {
|
||||
// For inlined bucket just update the inline stats
|
||||
s.InlineBucketInuse += used
|
||||
s.InlineBucketInuse += int(used)
|
||||
} else {
|
||||
// For non-inlined bucket update all the leaf stats
|
||||
s.LeafPageN++
|
||||
s.LeafInuse += used
|
||||
s.LeafInuse += int(used)
|
||||
s.LeafOverflowN += int(p.overflow)
|
||||
|
||||
// Collect stats from sub-buckets.
|
||||
@ -447,13 +449,13 @@ func (b *Bucket) Stats() BucketStats {
|
||||
|
||||
// used totals the used bytes for the page
|
||||
// Add header and all element headers.
|
||||
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
|
||||
used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1))
|
||||
|
||||
// Add size of all keys and values.
|
||||
// Again, use the fact that last element's position equals to
|
||||
// the total of key, value sizes of all previous elements.
|
||||
used += int(lastElement.pos + lastElement.ksize)
|
||||
s.BranchInuse += used
|
||||
used += uintptr(lastElement.pos + lastElement.ksize)
|
||||
s.BranchInuse += int(used)
|
||||
s.BranchOverflowN += int(p.overflow)
|
||||
}
|
||||
|
||||
@ -593,7 +595,7 @@ func (b *Bucket) inlineable() bool {
|
||||
// our threshold for inline bucket size.
|
||||
var size = pageHeaderSize
|
||||
for _, inode := range n.inodes {
|
||||
size += leafPageElementSize + len(inode.key) + len(inode.value)
|
||||
size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value))
|
||||
|
||||
if inode.flags&bucketLeafFlag != 0 {
|
||||
return false
|
||||
@ -606,8 +608,8 @@ func (b *Bucket) inlineable() bool {
|
||||
}
|
||||
|
||||
// Returns the maximum total size of a bucket to make it a candidate for inlining.
|
||||
func (b *Bucket) maxInlineBucketSize() int {
|
||||
return b.tx.db.pageSize / 4
|
||||
func (b *Bucket) maxInlineBucketSize() uintptr {
|
||||
return uintptr(b.tx.db.pageSize / 4)
|
||||
}
|
||||
|
||||
// write allocates and writes a bucket to a byte slice.
|
||||
|
2
vendor/go.etcd.io/bbolt/cursor.go
generated
vendored
2
vendor/go.etcd.io/bbolt/cursor.go
generated
vendored
@ -366,7 +366,7 @@ func (c *Cursor) node() *node {
|
||||
}
|
||||
for _, ref := range c.stack[:len(c.stack)-1] {
|
||||
_assert(!n.isLeaf, "expected branch node")
|
||||
n = n.childAt(int(ref.index))
|
||||
n = n.childAt(ref.index)
|
||||
}
|
||||
_assert(n.isLeaf, "expected leaf node")
|
||||
return n
|
||||
|
4
vendor/go.etcd.io/bbolt/db.go
generated
vendored
4
vendor/go.etcd.io/bbolt/db.go
generated
vendored
@ -206,12 +206,12 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
}
|
||||
|
||||
// Open data file and separate sync handler for metadata writes.
|
||||
db.path = path
|
||||
var err error
|
||||
if db.file, err = db.openFile(db.path, flag|os.O_CREATE, mode); err != nil {
|
||||
if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil {
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
db.path = db.file.Name()
|
||||
|
||||
// Lock file so that other processes using Bolt in read-write mode cannot
|
||||
// use the database at the same time. This would cause corruption since
|
||||
|
38
vendor/go.etcd.io/bbolt/freelist.go
generated
vendored
38
vendor/go.etcd.io/bbolt/freelist.go
generated
vendored
@ -71,7 +71,7 @@ func (f *freelist) size() int {
|
||||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
@ -93,7 +93,7 @@ func (f *freelist) pending_count() int {
|
||||
return count
|
||||
}
|
||||
|
||||
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||
// copyall copies a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
@ -267,17 +267,23 @@ func (f *freelist) read(p *page) {
|
||||
}
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
var idx, count = 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
|
||||
c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
|
||||
count = int(c)
|
||||
if count < 0 {
|
||||
panic(fmt.Sprintf("leading element count %d overflows int", c))
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count]
|
||||
var ids []pgid
|
||||
data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx)
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, count)
|
||||
|
||||
// copy the ids, so we don't modify on the freelist page directly
|
||||
idsCopy := make([]pgid, count)
|
||||
@ -310,16 +316,22 @@ func (f *freelist) write(p *page) error {
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
lenids := f.count()
|
||||
if lenids == 0 {
|
||||
p.count = uint16(lenids)
|
||||
} else if lenids < 0xFFFF {
|
||||
p.count = uint16(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||
l := f.count()
|
||||
if l == 0 {
|
||||
p.count = uint16(l)
|
||||
} else if l < 0xFFFF {
|
||||
p.count = uint16(l)
|
||||
var ids []pgid
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, l)
|
||||
f.copyall(ids)
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||
var ids []pgid
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, l+1)
|
||||
ids[0] = pgid(l)
|
||||
f.copyall(ids[1:])
|
||||
}
|
||||
|
||||
return nil
|
||||
|
2
vendor/go.etcd.io/bbolt/freelist_hmap.go
generated
vendored
2
vendor/go.etcd.io/bbolt/freelist_hmap.go
generated
vendored
@ -27,7 +27,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
|
||||
f.allocs[pid] = txid
|
||||
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, pid+pgid(i))
|
||||
delete(f.cache, pid+i)
|
||||
}
|
||||
return pid
|
||||
}
|
||||
|
5
vendor/go.etcd.io/bbolt/go.mod
generated
vendored
Normal file
5
vendor/go.etcd.io/bbolt/go.mod
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
module go.etcd.io/bbolt
|
||||
|
||||
go 1.12
|
||||
|
||||
require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
|
56
vendor/go.etcd.io/bbolt/node.go
generated
vendored
56
vendor/go.etcd.io/bbolt/node.go
generated
vendored
@ -41,19 +41,19 @@ func (n *node) size() int {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
|
||||
}
|
||||
return sz
|
||||
return int(sz)
|
||||
}
|
||||
|
||||
// sizeLessThan returns true if the node is less than a given size.
|
||||
// This is an optimization to avoid calculating a large node when we only need
|
||||
// to know if it fits inside a certain page size.
|
||||
func (n *node) sizeLessThan(v int) bool {
|
||||
func (n *node) sizeLessThan(v uintptr) bool {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
|
||||
if sz >= v {
|
||||
return false
|
||||
}
|
||||
@ -62,7 +62,7 @@ func (n *node) sizeLessThan(v int) bool {
|
||||
}
|
||||
|
||||
// pageElementSize returns the size of each page element based on the type of node.
|
||||
func (n *node) pageElementSize() int {
|
||||
func (n *node) pageElementSize() uintptr {
|
||||
if n.isLeaf {
|
||||
return leafPageElementSize
|
||||
}
|
||||
@ -207,10 +207,17 @@ func (n *node) write(p *page) {
|
||||
}
|
||||
|
||||
// Loop over each item and write it to the page.
|
||||
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||
// off tracks the offset into p of the start of the next data.
|
||||
off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
|
||||
for i, item := range n.inodes {
|
||||
_assert(len(item.key) > 0, "write: zero-length inode key")
|
||||
|
||||
// Create a slice to write into of needed size and advance
|
||||
// byte pointer for next iteration.
|
||||
sz := len(item.key) + len(item.value)
|
||||
b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
|
||||
off += uintptr(sz)
|
||||
|
||||
// Write the page element.
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
@ -226,20 +233,9 @@ func (n *node) write(p *page) {
|
||||
_assert(elem.pgid != p.id, "write: circular dependency occurred")
|
||||
}
|
||||
|
||||
// If the length of key+value is larger than the max allocation size
|
||||
// then we need to reallocate the byte array pointer.
|
||||
//
|
||||
// See: https://github.com/boltdb/bolt/pull/335
|
||||
klen, vlen := len(item.key), len(item.value)
|
||||
if len(b) < klen+vlen {
|
||||
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
|
||||
}
|
||||
|
||||
// Write data for the element to the end of the page.
|
||||
copy(b[0:], item.key)
|
||||
b = b[klen:]
|
||||
copy(b[0:], item.value)
|
||||
b = b[vlen:]
|
||||
l := copy(b, item.key)
|
||||
copy(b[l:], item.value)
|
||||
}
|
||||
|
||||
// DEBUG ONLY: n.dump()
|
||||
@ -247,7 +243,7 @@ func (n *node) write(p *page) {
|
||||
|
||||
// split breaks up a node into multiple smaller nodes, if appropriate.
|
||||
// This should only be called from the spill() function.
|
||||
func (n *node) split(pageSize int) []*node {
|
||||
func (n *node) split(pageSize uintptr) []*node {
|
||||
var nodes []*node
|
||||
|
||||
node := n
|
||||
@ -270,7 +266,7 @@ func (n *node) split(pageSize int) []*node {
|
||||
|
||||
// splitTwo breaks up a node into two smaller nodes, if appropriate.
|
||||
// This should only be called from the split() function.
|
||||
func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||
func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
|
||||
// Ignore the split if the page doesn't have at least enough nodes for
|
||||
// two pages or if the nodes can fit in a single page.
|
||||
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
|
||||
@ -312,18 +308,18 @@ func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||
// splitIndex finds the position where a page will fill a given threshold.
|
||||
// It returns the index as well as the size of the first page.
|
||||
// This is only be called from split().
|
||||
func (n *node) splitIndex(threshold int) (index, sz int) {
|
||||
func (n *node) splitIndex(threshold int) (index, sz uintptr) {
|
||||
sz = pageHeaderSize
|
||||
|
||||
// Loop until we only have the minimum number of keys required for the second page.
|
||||
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
|
||||
index = i
|
||||
index = uintptr(i)
|
||||
inode := n.inodes[i]
|
||||
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
|
||||
elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value))
|
||||
|
||||
// If we have at least the minimum number of keys and adding another
|
||||
// node would put us over the threshold then exit and return.
|
||||
if i >= minKeysPerPage && sz+elsize > threshold {
|
||||
if index >= minKeysPerPage && sz+elsize > uintptr(threshold) {
|
||||
break
|
||||
}
|
||||
|
||||
@ -356,7 +352,7 @@ func (n *node) spill() error {
|
||||
n.children = nil
|
||||
|
||||
// Split nodes into appropriate sizes. The first node will always be n.
|
||||
var nodes = n.split(tx.db.pageSize)
|
||||
var nodes = n.split(uintptr(tx.db.pageSize))
|
||||
for _, node := range nodes {
|
||||
// Add node's page to the freelist if it's not new.
|
||||
if node.pgid > 0 {
|
||||
@ -587,9 +583,11 @@ func (n *node) dump() {
|
||||
|
||||
type nodes []*node
|
||||
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool {
|
||||
return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1
|
||||
}
|
||||
|
||||
// inode represents an internal node inside of a node.
|
||||
// It can be used to point to elements in a page or point
|
||||
|
41
vendor/go.etcd.io/bbolt/page.go
generated
vendored
41
vendor/go.etcd.io/bbolt/page.go
generated
vendored
@ -7,12 +7,12 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
|
||||
const pageHeaderSize = unsafe.Sizeof(page{})
|
||||
|
||||
const minKeysPerPage = 2
|
||||
|
||||
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
|
||||
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
|
||||
const branchPageElementSize = unsafe.Sizeof(branchPageElement{})
|
||||
const leafPageElementSize = unsafe.Sizeof(leafPageElement{})
|
||||
|
||||
const (
|
||||
branchPageFlag = 0x01
|
||||
@ -32,7 +32,6 @@ type page struct {
|
||||
flags uint16
|
||||
count uint16
|
||||
overflow uint32
|
||||
ptr uintptr
|
||||
}
|
||||
|
||||
// typ returns a human readable page type string used for debugging.
|
||||
@ -51,13 +50,13 @@ func (p *page) typ() string {
|
||||
|
||||
// meta returns a pointer to the metadata section of the page.
|
||||
func (p *page) meta() *meta {
|
||||
return (*meta)(unsafe.Pointer(&p.ptr))
|
||||
return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
|
||||
}
|
||||
|
||||
// leafPageElement retrieves the leaf node by index
|
||||
func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
return n
|
||||
return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
|
||||
leafPageElementSize, int(index)))
|
||||
}
|
||||
|
||||
// leafPageElements retrieves a list of leaf nodes.
|
||||
@ -65,12 +64,16 @@ func (p *page) leafPageElements() []leafPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
var elems []leafPageElement
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
|
||||
return elems
|
||||
}
|
||||
|
||||
// branchPageElement retrieves the branch node by index
|
||||
func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
|
||||
unsafe.Sizeof(branchPageElement{}), int(index)))
|
||||
}
|
||||
|
||||
// branchPageElements retrieves a list of branch nodes.
|
||||
@ -78,12 +81,15 @@ func (p *page) branchPageElements() []branchPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
var elems []branchPageElement
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
|
||||
return elems
|
||||
}
|
||||
|
||||
// dump writes n bytes of the page to STDERR as hex output.
|
||||
func (p *page) hexdump(n int) {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
|
||||
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||
}
|
||||
|
||||
@ -102,8 +108,7 @@ type branchPageElement struct {
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *branchPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
|
||||
}
|
||||
|
||||
// leafPageElement represents a node on a leaf page.
|
||||
@ -116,14 +121,16 @@ type leafPageElement struct {
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *leafPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
|
||||
i := int(n.pos)
|
||||
j := i + int(n.ksize)
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
|
||||
}
|
||||
|
||||
// value returns a byte slice of the node value.
|
||||
func (n *leafPageElement) value() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
|
||||
i := int(n.pos) + int(n.ksize)
|
||||
j := i + int(n.vsize)
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
|
||||
}
|
||||
|
||||
// PageInfo represents human readable information about a page.
|
||||
|
18
vendor/go.etcd.io/bbolt/tx.go
generated
vendored
18
vendor/go.etcd.io/bbolt/tx.go
generated
vendored
@ -523,20 +523,18 @@ func (tx *Tx) write() error {
|
||||
|
||||
// Write pages to disk in order.
|
||||
for _, p := range pages {
|
||||
size := (int(p.overflow) + 1) * tx.db.pageSize
|
||||
rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize)
|
||||
offset := int64(p.id) * int64(tx.db.pageSize)
|
||||
var written uintptr
|
||||
|
||||
// Write out page in "max allocation" sized chunks.
|
||||
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
|
||||
for {
|
||||
// Limit our write to our max allocation size.
|
||||
sz := size
|
||||
sz := rem
|
||||
if sz > maxAllocSize-1 {
|
||||
sz = maxAllocSize - 1
|
||||
}
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz))
|
||||
|
||||
// Write chunk to disk.
|
||||
buf := ptr[:sz]
|
||||
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -545,14 +543,14 @@ func (tx *Tx) write() error {
|
||||
tx.stats.Write++
|
||||
|
||||
// Exit inner for loop if we've written all the chunks.
|
||||
size -= sz
|
||||
if size == 0 {
|
||||
rem -= sz
|
||||
if rem == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Otherwise move offset forward and move pointer to next chunk.
|
||||
offset += int64(sz)
|
||||
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
|
||||
written += uintptr(sz)
|
||||
}
|
||||
}
|
||||
|
||||
@ -571,7 +569,7 @@ func (tx *Tx) write() error {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
|
||||
|
||||
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
|
||||
for i := range buf {
|
||||
|
39
vendor/go.etcd.io/bbolt/unsafe.go
generated
vendored
Normal file
39
vendor/go.etcd.io/bbolt/unsafe.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(base) + offset)
|
||||
}
|
||||
|
||||
func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
|
||||
}
|
||||
|
||||
func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
|
||||
// See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
|
||||
//
|
||||
// This memory is not allocated from C, but it is unmanaged by Go's
|
||||
// garbage collector and should behave similarly, and the compiler
|
||||
// should produce similar code. Note that this conversion allows a
|
||||
// subslice to begin after the base address, with an optional offset,
|
||||
// while the URL above does not cover this case and only slices from
|
||||
// index 0. However, the wiki never says that the address must be to
|
||||
// the beginning of a C allocation (or even that malloc was used at
|
||||
// all), so this is believed to be correct.
|
||||
return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j]
|
||||
}
|
||||
|
||||
// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by
|
||||
// the slice parameter. This helper should be used over other direct
|
||||
// manipulation of reflect.SliceHeader to prevent misuse, namely, converting
|
||||
// from reflect.SliceHeader to a Go slice type.
|
||||
func unsafeSlice(slice, data unsafe.Pointer, len int) {
|
||||
s := (*reflect.SliceHeader)(slice)
|
||||
s.Data = uintptr(data)
|
||||
s.Cap = len
|
||||
s.Len = len
|
||||
}
|
23
vendor/golang.org/x/text/README
generated
vendored
23
vendor/golang.org/x/text/README
generated
vendored
@ -1,23 +0,0 @@
|
||||
This repository holds supplementary Go libraries for text processing, many involving Unicode.
|
||||
|
||||
To submit changes to this repository, see http://golang.org/doc/contribute.html.
|
||||
|
||||
To generate the tables in this repository (except for the encoding tables),
|
||||
run go generate from this directory. By default tables are generated for the
|
||||
Unicode version in core and the CLDR version defined in
|
||||
golang.org/x/text/unicode/cldr.
|
||||
|
||||
Running go generate will as a side effect create a DATA subdirectory in this
|
||||
directory which holds all files that are used as a source for generating the
|
||||
tables. This directory will also serve as a cache.
|
||||
|
||||
Run
|
||||
|
||||
go test ./...
|
||||
|
||||
from this directory to run all tests. Add the "-tags icu" flag to also run
|
||||
ICU conformance tests (if available). This requires that you have the correct
|
||||
ICU version installed on your system.
|
||||
|
||||
TODO:
|
||||
- updating unversioned source files.
|
93
vendor/golang.org/x/text/README.md
generated
vendored
Normal file
93
vendor/golang.org/x/text/README.md
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
# Go Text
|
||||
|
||||
This repository holds supplementary Go libraries for text processing, many involving Unicode.
|
||||
|
||||
## Semantic Versioning
|
||||
This repo uses Semantic versioning (http://semver.org/), so
|
||||
1. MAJOR version when you make incompatible API changes,
|
||||
1. MINOR version when you add functionality in a backwards-compatible manner,
|
||||
and
|
||||
1. PATCH version when you make backwards-compatible bug fixes.
|
||||
|
||||
Until version 1.0.0 of x/text is reached, the minor version is considered a
|
||||
major version. So going from 0.1.0 to 0.2.0 is considered to be a major version
|
||||
bump.
|
||||
|
||||
A major new CLDR version is mapped to a minor version increase in x/text.
|
||||
Any other new CLDR version is mapped to a patch version increase in x/text.
|
||||
|
||||
It is important that the Unicode version used in `x/text` matches the one used
|
||||
by your Go compiler. The `x/text` repository supports multiple versions of
|
||||
Unicode and will match the version of Unicode to that of the Go compiler. At the
|
||||
moment this is supported for Go compilers from version 1.7.
|
||||
|
||||
## Download/Install
|
||||
|
||||
The easiest way to install is to run `go get -u golang.org/x/text`. You can
|
||||
also manually git clone the repository to `$GOPATH/src/golang.org/x/text`.
|
||||
|
||||
## Contribute
|
||||
To submit changes to this repository, see http://golang.org/doc/contribute.html.
|
||||
|
||||
To generate the tables in this repository (except for the encoding tables),
|
||||
run go generate from this directory. By default tables are generated for the
|
||||
Unicode version in core and the CLDR version defined in
|
||||
golang.org/x/text/unicode/cldr.
|
||||
|
||||
Running go generate will as a side effect create a DATA subdirectory in this
|
||||
directory, which holds all files that are used as a source for generating the
|
||||
tables. This directory will also serve as a cache.
|
||||
|
||||
## Testing
|
||||
Run
|
||||
|
||||
go test ./...
|
||||
|
||||
from this directory to run all tests. Add the "-tags icu" flag to also run
|
||||
ICU conformance tests (if available). This requires that you have the correct
|
||||
ICU version installed on your system.
|
||||
|
||||
TODO:
|
||||
- updating unversioned source files.
|
||||
|
||||
## Generating Tables
|
||||
|
||||
To generate the tables in this repository (except for the encoding
|
||||
tables), run `go generate` from this directory. By default tables are
|
||||
generated for the Unicode version in core and the CLDR version defined in
|
||||
golang.org/x/text/unicode/cldr.
|
||||
|
||||
Running go generate will as a side effect create a DATA subdirectory in this
|
||||
directory which holds all files that are used as a source for generating the
|
||||
tables. This directory will also serve as a cache.
|
||||
|
||||
## Versions
|
||||
To update a Unicode version run
|
||||
|
||||
UNICODE_VERSION=x.x.x go generate
|
||||
|
||||
where `x.x.x` must correspond to a directory in https://www.unicode.org/Public/.
|
||||
If this version is newer than the version in core it will also update the
|
||||
relevant packages there. The idna package in x/net will always be updated.
|
||||
|
||||
To update a CLDR version run
|
||||
|
||||
CLDR_VERSION=version go generate
|
||||
|
||||
where `version` must correspond to a directory in
|
||||
https://www.unicode.org/Public/cldr/.
|
||||
|
||||
Note that the code gets adapted over time to changes in the data and that
|
||||
backwards compatibility is not maintained.
|
||||
So updating to a different version may not work.
|
||||
|
||||
The files in DATA/{iana|icu|w3|whatwg} are currently not versioned.
|
||||
|
||||
## Report Issues / Send Patches
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html.
|
||||
|
||||
The main issue tracker for the image repository is located at
|
||||
https://github.com/golang/go/issues. Prefix your issue with "x/text:" in the
|
||||
subject line, so it is easy to find.
|
5
vendor/golang.org/x/text/go.mod
generated
vendored
Normal file
5
vendor/golang.org/x/text/go.mod
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
module golang.org/x/text
|
||||
|
||||
require golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e
|
||||
|
||||
go 1.11
|
8
vendor/golang.org/x/text/secure/bidirule/bidirule.go
generated
vendored
8
vendor/golang.org/x/text/secure/bidirule/bidirule.go
generated
vendored
@ -155,6 +155,7 @@ func DirectionString(s string) bidi.Direction {
|
||||
e, sz := bidi.LookupString(s[i:])
|
||||
if sz == 0 {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
c := e.Class()
|
||||
if c == bidi.R || c == bidi.AL || c == bidi.AN {
|
||||
@ -202,13 +203,6 @@ func (t *Transformer) isRTL() bool {
|
||||
return t.seen&isRTL != 0
|
||||
}
|
||||
|
||||
func (t *Transformer) isFinal() bool {
|
||||
if !t.isRTL() {
|
||||
return true
|
||||
}
|
||||
return t.state == ruleLTRFinal || t.state == ruleRTLFinal || t.state == ruleInitial
|
||||
}
|
||||
|
||||
// Reset implements transform.Transformer.
|
||||
func (t *Transformer) Reset() { *t = Transformer{} }
|
||||
|
||||
|
11
vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go
generated
vendored
Normal file
11
vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package bidirule
|
||||
|
||||
func (t *Transformer) isFinal() bool {
|
||||
return t.state == ruleLTRFinal || t.state == ruleRTLFinal || t.state == ruleInitial
|
||||
}
|
14
vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go
generated
vendored
Normal file
14
vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
package bidirule
|
||||
|
||||
func (t *Transformer) isFinal() bool {
|
||||
if !t.isRTL() {
|
||||
return true
|
||||
}
|
||||
return t.state == ruleLTRFinal || t.state == ruleRTLFinal || t.state == ruleInitial
|
||||
}
|
12
vendor/golang.org/x/text/transform/transform.go
generated
vendored
12
vendor/golang.org/x/text/transform/transform.go
generated
vendored
@ -78,8 +78,8 @@ type SpanningTransformer interface {
|
||||
// considering the error err.
|
||||
//
|
||||
// A nil error means that all input bytes are known to be identical to the
|
||||
// output produced by the Transformer. A nil error can be be returned
|
||||
// regardless of whether atEOF is true. If err is nil, then then n must
|
||||
// output produced by the Transformer. A nil error can be returned
|
||||
// regardless of whether atEOF is true. If err is nil, then n must
|
||||
// equal len(src); the converse is not necessarily true.
|
||||
//
|
||||
// ErrEndOfSpan means that the Transformer output may differ from the
|
||||
@ -493,7 +493,7 @@ func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro
|
||||
return dstL.n, srcL.p, err
|
||||
}
|
||||
|
||||
// Deprecated: use runes.Remove instead.
|
||||
// Deprecated: Use runes.Remove instead.
|
||||
func RemoveFunc(f func(r rune) bool) Transformer {
|
||||
return removeF(f)
|
||||
}
|
||||
@ -648,7 +648,8 @@ func String(t Transformer, s string) (result string, n int, err error) {
|
||||
// Transform the remaining input, growing dst and src buffers as necessary.
|
||||
for {
|
||||
n := copy(src, s[pSrc:])
|
||||
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
|
||||
atEOF := pSrc+n == len(s)
|
||||
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], atEOF)
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
|
||||
@ -659,6 +660,9 @@ func String(t Transformer, s string) (result string, n int, err error) {
|
||||
dst = grow(dst, pDst)
|
||||
}
|
||||
} else if err == ErrShortSrc {
|
||||
if atEOF {
|
||||
return string(dst[:pDst]), pSrc, err
|
||||
}
|
||||
if nSrc == 0 {
|
||||
src = grow(src, 0)
|
||||
}
|
||||
|
2
vendor/golang.org/x/text/unicode/bidi/bidi.go
generated
vendored
2
vendor/golang.org/x/text/unicode/bidi/bidi.go
generated
vendored
@ -6,7 +6,7 @@
|
||||
|
||||
// Package bidi contains functionality for bidirectional text support.
|
||||
//
|
||||
// See http://www.unicode.org/reports/tr9.
|
||||
// See https://www.unicode.org/reports/tr9.
|
||||
//
|
||||
// NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways
|
||||
// and without notice.
|
||||
|
4
vendor/golang.org/x/text/unicode/bidi/bracket.go
generated
vendored
4
vendor/golang.org/x/text/unicode/bidi/bracket.go
generated
vendored
@ -12,7 +12,7 @@ import (
|
||||
|
||||
// This file contains a port of the reference implementation of the
|
||||
// Bidi Parentheses Algorithm:
|
||||
// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java
|
||||
// https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java
|
||||
//
|
||||
// The implementation in this file covers definitions BD14-BD16 and rule N0
|
||||
// of UAX#9.
|
||||
@ -246,7 +246,7 @@ func (p *bracketPairer) getStrongTypeN0(index int) Class {
|
||||
// assuming the given embedding direction.
|
||||
//
|
||||
// It returns ON if no strong type is found. If a single strong type is found,
|
||||
// it returns this this type. Otherwise it returns the embedding direction.
|
||||
// it returns this type. Otherwise it returns the embedding direction.
|
||||
//
|
||||
// TODO: use separate type for "strong" directionality.
|
||||
func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class {
|
||||
|
10
vendor/golang.org/x/text/unicode/bidi/core.go
generated
vendored
10
vendor/golang.org/x/text/unicode/bidi/core.go
generated
vendored
@ -7,7 +7,7 @@ package bidi
|
||||
import "log"
|
||||
|
||||
// This implementation is a port based on the reference implementation found at:
|
||||
// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/
|
||||
// https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/
|
||||
//
|
||||
// described in Unicode Bidirectional Algorithm (UAX #9).
|
||||
//
|
||||
@ -480,15 +480,15 @@ func (s *isolatingRunSequence) resolveWeakTypes() {
|
||||
|
||||
// Rule W1.
|
||||
// Changes all NSMs.
|
||||
preceedingCharacterType := s.sos
|
||||
precedingCharacterType := s.sos
|
||||
for i, t := range s.types {
|
||||
if t == NSM {
|
||||
s.types[i] = preceedingCharacterType
|
||||
s.types[i] = precedingCharacterType
|
||||
} else {
|
||||
if t.in(LRI, RLI, FSI, PDI) {
|
||||
preceedingCharacterType = ON
|
||||
precedingCharacterType = ON
|
||||
}
|
||||
preceedingCharacterType = t
|
||||
precedingCharacterType = t
|
||||
}
|
||||
}
|
||||
|
||||
|
1815
vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go
generated
vendored
Normal file
1815
vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1887
vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
generated
vendored
Normal file
1887
vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1923
vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go
generated
vendored
Normal file
1923
vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,7 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
package bidi
|
||||
|
||||
// UnicodeVersion is the Unicode version from which the tables in this package are derived.
|
8
vendor/golang.org/x/text/unicode/norm/composition.go
generated
vendored
8
vendor/golang.org/x/text/unicode/norm/composition.go
generated
vendored
@ -407,7 +407,7 @@ func decomposeHangul(buf []byte, r rune) int {
|
||||
|
||||
// decomposeHangul algorithmically decomposes a Hangul rune into
|
||||
// its Jamo components.
|
||||
// See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul.
|
||||
// See https://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul.
|
||||
func (rb *reorderBuffer) decomposeHangul(r rune) {
|
||||
r -= hangulBase
|
||||
x := r % jamoTCount
|
||||
@ -420,7 +420,7 @@ func (rb *reorderBuffer) decomposeHangul(r rune) {
|
||||
}
|
||||
|
||||
// combineHangul algorithmically combines Jamo character components into Hangul.
|
||||
// See http://unicode.org/reports/tr15/#Hangul for details on combining Hangul.
|
||||
// See https://unicode.org/reports/tr15/#Hangul for details on combining Hangul.
|
||||
func (rb *reorderBuffer) combineHangul(s, i, k int) {
|
||||
b := rb.rune[:]
|
||||
bn := rb.nrune
|
||||
@ -461,6 +461,10 @@ func (rb *reorderBuffer) combineHangul(s, i, k int) {
|
||||
// It should only be used to recompose a single segment, as it will not
|
||||
// handle alternations between Hangul and non-Hangul characters correctly.
|
||||
func (rb *reorderBuffer) compose() {
|
||||
// Lazily load the map used by the combine func below, but do
|
||||
// it outside of the loop.
|
||||
recompMapOnce.Do(buildRecompMap)
|
||||
|
||||
// UAX #15, section X5 , including Corrigendum #5
|
||||
// "In any character sequence beginning with starter S, a character C is
|
||||
// blocked from S if and only if there is some character B between S
|
||||
|
19
vendor/golang.org/x/text/unicode/norm/forminfo.go
generated
vendored
19
vendor/golang.org/x/text/unicode/norm/forminfo.go
generated
vendored
@ -4,6 +4,8 @@
|
||||
|
||||
package norm
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// This file contains Form-specific logic and wrappers for data in tables.go.
|
||||
|
||||
// Rune info is stored in a separate trie per composing form. A composing form
|
||||
@ -178,6 +180,17 @@ func (p Properties) TrailCCC() uint8 {
|
||||
return ccc[p.tccc]
|
||||
}
|
||||
|
||||
func buildRecompMap() {
|
||||
recompMap = make(map[uint32]rune, len(recompMapPacked)/8)
|
||||
var buf [8]byte
|
||||
for i := 0; i < len(recompMapPacked); i += 8 {
|
||||
copy(buf[:], recompMapPacked[i:i+8])
|
||||
key := binary.BigEndian.Uint32(buf[:4])
|
||||
val := binary.BigEndian.Uint32(buf[4:])
|
||||
recompMap[key] = rune(val)
|
||||
}
|
||||
}
|
||||
|
||||
// Recomposition
|
||||
// We use 32-bit keys instead of 64-bit for the two codepoint keys.
|
||||
// This clips off the bits of three entries, but we know this will not
|
||||
@ -186,8 +199,14 @@ func (p Properties) TrailCCC() uint8 {
|
||||
// Note that the recomposition map for NFC and NFKC are identical.
|
||||
|
||||
// combine returns the combined rune or 0 if it doesn't exist.
|
||||
//
|
||||
// The caller is responsible for calling
|
||||
// recompMapOnce.Do(buildRecompMap) sometime before this is called.
|
||||
func combine(a, b rune) rune {
|
||||
key := uint32(uint16(a))<<16 + uint32(uint16(b))
|
||||
if recompMap == nil {
|
||||
panic("caller error") // see func comment
|
||||
}
|
||||
return recompMap[key]
|
||||
}
|
||||
|
||||
|
3
vendor/golang.org/x/text/unicode/norm/iter.go
generated
vendored
3
vendor/golang.org/x/text/unicode/norm/iter.go
generated
vendored
@ -128,8 +128,9 @@ func (i *Iter) Next() []byte {
|
||||
func nextASCIIBytes(i *Iter) []byte {
|
||||
p := i.p + 1
|
||||
if p >= i.rb.nsrc {
|
||||
p0 := i.p
|
||||
i.setDone()
|
||||
return i.rb.src.bytes[i.p:p]
|
||||
return i.rb.src.bytes[p0:p]
|
||||
}
|
||||
if i.rb.src.bytes[p] < utf8.RuneSelf {
|
||||
p0 := i.p
|
||||
|
4
vendor/golang.org/x/text/unicode/norm/normalize.go
generated
vendored
4
vendor/golang.org/x/text/unicode/norm/normalize.go
generated
vendored
@ -29,8 +29,8 @@ import (
|
||||
// proceed independently on both sides:
|
||||
// f(x) == append(f(x[0:n]), f(x[n:])...)
|
||||
//
|
||||
// References: http://unicode.org/reports/tr15/ and
|
||||
// http://unicode.org/notes/tn5/.
|
||||
// References: https://unicode.org/reports/tr15/ and
|
||||
// https://unicode.org/notes/tn5/.
|
||||
type Form int
|
||||
|
||||
const (
|
||||
|
4
vendor/golang.org/x/text/unicode/norm/readwriter.go
generated
vendored
4
vendor/golang.org/x/text/unicode/norm/readwriter.go
generated
vendored
@ -60,8 +60,8 @@ func (w *normWriter) Close() error {
|
||||
}
|
||||
|
||||
// Writer returns a new writer that implements Write(b)
|
||||
// by writing f(b) to w. The returned writer may use an
|
||||
// an internal buffer to maintain state across Write calls.
|
||||
// by writing f(b) to w. The returned writer may use an
|
||||
// internal buffer to maintain state across Write calls.
|
||||
// Calling its Close method writes any buffered data to w.
|
||||
func (f Form) Writer(w io.Writer) io.WriteCloser {
|
||||
wr := &normWriter{rb: reorderBuffer{}, w: w}
|
||||
|
7657
vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
generated
vendored
Normal file
7657
vendor/golang.org/x/text/unicode/norm/tables10.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7693
vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
generated
vendored
Normal file
7693
vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7710
vendor/golang.org/x/text/unicode/norm/tables12.0.0.go
generated
vendored
Normal file
7710
vendor/golang.org/x/text/unicode/norm/tables12.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user