Merge pull request #1521 from AkihiroSuda/revendor-libcontainer
vendor runc v1.0.0-rc91
This commit is contained in:
commit
aa0f4fd37b
@ -324,7 +324,7 @@ func WithDevices(osi osinterface.OS, config *runtime.ContainerConfig) oci.SpecOp
|
|||||||
Type: string(dev.Type),
|
Type: string(dev.Type),
|
||||||
Major: &dev.Major,
|
Major: &dev.Major,
|
||||||
Minor: &dev.Minor,
|
Minor: &dev.Minor,
|
||||||
Access: dev.Permissions,
|
Access: string(dev.Permissions),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
"github.com/containerd/containerd/mount"
|
"github.com/containerd/containerd/mount"
|
||||||
"github.com/containerd/containerd/oci"
|
"github.com/containerd/containerd/oci"
|
||||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||||
"github.com/opencontainers/runc/libcontainer/devices"
|
"github.com/opencontainers/runc/libcontainer/devices"
|
||||||
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/opencontainers/selinux/go-selinux"
|
"github.com/opencontainers/selinux/go-selinux"
|
||||||
@ -1193,8 +1194,15 @@ func TestPrivilegedDevices(t *testing.T) {
|
|||||||
spec, err := c.containerSpec(t.Name(), testSandboxID, testPid, "", testContainerName, containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
|
spec, err := c.containerSpec(t.Name(), testSandboxID, testPid, "", testContainerName, containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
hostDevices, err := devices.HostDevices()
|
hostDevicesRaw, err := devices.HostDevices()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
var hostDevices []*libcontainerconfigs.Device
|
||||||
|
for _, dev := range hostDevicesRaw {
|
||||||
|
// https://github.com/containerd/cri/pull/1521#issuecomment-652807951
|
||||||
|
if dev.DeviceRule.Major != 0 {
|
||||||
|
hostDevices = append(hostDevices, dev)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if test.expectHostDevices {
|
if test.expectHostDevices {
|
||||||
assert.Len(t, spec.Linux.Devices, len(hostDevices))
|
assert.Len(t, spec.Linux.Devices, len(hostDevices))
|
||||||
|
21
vendor.conf
21
vendor.conf
@ -1,13 +1,13 @@
|
|||||||
# cri dependencies
|
# cri dependencies
|
||||||
github.com/docker/docker 4634ce647cf2ce2c6031129ccd109e557244986f
|
github.com/docker/docker 4634ce647cf2ce2c6031129ccd109e557244986f
|
||||||
github.com/opencontainers/selinux bb88c45a3863dc4c38320d71b890bb30ef9feba4
|
github.com/opencontainers/selinux v1.5.1
|
||||||
github.com/tchap/go-patricia v2.2.6
|
github.com/tchap/go-patricia v2.2.6
|
||||||
|
|
||||||
# containerd dependencies
|
# containerd dependencies
|
||||||
github.com/beorn7/perks v1.0.1
|
github.com/beorn7/perks v1.0.1
|
||||||
github.com/BurntSushi/toml v0.3.1
|
github.com/BurntSushi/toml v0.3.1
|
||||||
github.com/cespare/xxhash/v2 v2.1.1
|
github.com/cespare/xxhash/v2 v2.1.1
|
||||||
github.com/containerd/cgroups b4448137398923af7f4918b8b2ad8249172ca7a6
|
github.com/containerd/cgroups e9676da73eddf8ed2433f77aaf3b9cf8f0f75b8c
|
||||||
github.com/containerd/console v1.0.0
|
github.com/containerd/console v1.0.0
|
||||||
github.com/containerd/containerd v1.4.0-beta.0
|
github.com/containerd/containerd v1.4.0-beta.0
|
||||||
github.com/containerd/continuity d3ef23f19fbb106bb73ffde425d07a9187e30745
|
github.com/containerd/continuity d3ef23f19fbb106bb73ffde425d07a9187e30745
|
||||||
@ -16,14 +16,14 @@ github.com/containerd/go-runc 7016d3ce2328dd2cb1192b2076eb
|
|||||||
github.com/containerd/ttrpc v1.0.1
|
github.com/containerd/ttrpc v1.0.1
|
||||||
github.com/containerd/typeurl v1.0.1
|
github.com/containerd/typeurl v1.0.1
|
||||||
github.com/coreos/go-systemd/v22 v22.0.0
|
github.com/coreos/go-systemd/v22 v22.0.0
|
||||||
github.com/cpuguy83/go-md2man v1.0.10
|
github.com/cpuguy83/go-md2man/v2 v2.0.0
|
||||||
github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f
|
github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f
|
||||||
github.com/docker/go-metrics v0.0.1
|
github.com/docker/go-metrics v0.0.1
|
||||||
github.com/docker/go-units v0.4.0
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/godbus/dbus/v5 v5.0.3
|
github.com/godbus/dbus/v5 v5.0.3
|
||||||
github.com/gogo/googleapis v1.3.2
|
github.com/gogo/googleapis v1.3.2
|
||||||
github.com/gogo/protobuf v1.3.1
|
github.com/gogo/protobuf v1.3.1
|
||||||
github.com/golang/protobuf v1.3.3
|
github.com/golang/protobuf v1.3.5
|
||||||
github.com/google/uuid v1.1.1
|
github.com/google/uuid v1.1.1
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||||
github.com/hashicorp/errwrap v1.0.0
|
github.com/hashicorp/errwrap v1.0.0
|
||||||
@ -36,28 +36,29 @@ github.com/Microsoft/go-winio v0.4.14
|
|||||||
github.com/Microsoft/hcsshim v0.8.9
|
github.com/Microsoft/hcsshim v0.8.9
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.0.1
|
github.com/opencontainers/image-spec v1.0.1
|
||||||
github.com/opencontainers/runc v1.0.0-rc10
|
github.com/opencontainers/runc v1.0.0-rc91
|
||||||
github.com/opencontainers/runtime-spec v1.0.2
|
github.com/opencontainers/runtime-spec 237cc4f519e2e8f9b235bacccfa8ef5a84df2875 # v1.0.2-14-g8e2f17c
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/prometheus/client_golang v1.6.0
|
github.com/prometheus/client_golang v1.6.0
|
||||||
github.com/prometheus/client_model v0.2.0
|
github.com/prometheus/client_model v0.2.0
|
||||||
github.com/prometheus/common v0.9.1
|
github.com/prometheus/common v0.9.1
|
||||||
github.com/prometheus/procfs v0.0.11
|
github.com/prometheus/procfs v0.0.11
|
||||||
github.com/russross/blackfriday v1.5.2
|
github.com/russross/blackfriday/v2 v2.0.1
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0
|
||||||
github.com/sirupsen/logrus v1.6.0
|
github.com/sirupsen/logrus v1.6.0
|
||||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||||
github.com/urfave/cli v1.22.0
|
github.com/urfave/cli v1.22.1 # NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092
|
||||||
go.etcd.io/bbolt v1.3.3
|
go.etcd.io/bbolt v1.3.3
|
||||||
go.opencensus.io v0.22.0
|
go.opencensus.io v0.22.0
|
||||||
golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3
|
golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3
|
||||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||||
golang.org/x/sys 5c8b2ff67527cb88b770f693cebf3799036d8bc0
|
golang.org/x/sys 9dae0f8f577553e0f21298e18926efc9644c281d
|
||||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||||
google.golang.org/genproto e50cd9704f63023d62cd06a1994b98227fc4d21a
|
google.golang.org/genproto e50cd9704f63023d62cd06a1994b98227fc4d21a
|
||||||
google.golang.org/grpc v1.27.1
|
google.golang.org/grpc v1.27.1
|
||||||
|
|
||||||
# cgroups dependencies
|
# cgroups dependencies
|
||||||
github.com/cilium/ebpf 4032b1d8aae306b7bb94a2a11002932caf88c644
|
github.com/cilium/ebpf 1c8d4c9ef7759622653a1d319284a44652333b28
|
||||||
|
|
||||||
# kubernetes dependencies
|
# kubernetes dependencies
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
|
16
vendor/github.com/cilium/ebpf/abi.go
generated
vendored
16
vendor/github.com/cilium/ebpf/abi.go
generated
vendored
@ -3,14 +3,13 @@ package ebpf
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/internal"
|
"github.com/cilium/ebpf/internal"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MapABI are the attributes of a Map which are available across all supported kernels.
|
// MapABI are the attributes of a Map which are available across all supported kernels.
|
||||||
@ -35,7 +34,7 @@ func newMapABIFromSpec(spec *MapSpec) *MapABI {
|
|||||||
func newMapABIFromFd(fd *internal.FD) (string, *MapABI, error) {
|
func newMapABIFromFd(fd *internal.FD) (string, *MapABI, error) {
|
||||||
info, err := bpfGetMapInfoByFD(fd)
|
info, err := bpfGetMapInfoByFD(fd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Cause(err) == syscall.EINVAL {
|
if errors.Is(err, syscall.EINVAL) {
|
||||||
abi, err := newMapABIFromProc(fd)
|
abi, err := newMapABIFromProc(fd)
|
||||||
return "", abi, err
|
return "", abi, err
|
||||||
}
|
}
|
||||||
@ -98,7 +97,7 @@ func newProgramABIFromSpec(spec *ProgramSpec) *ProgramABI {
|
|||||||
func newProgramABIFromFd(fd *internal.FD) (string, *ProgramABI, error) {
|
func newProgramABIFromFd(fd *internal.FD) (string, *ProgramABI, error) {
|
||||||
info, err := bpfGetProgInfoByFD(fd)
|
info, err := bpfGetProgInfoByFD(fd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Cause(err) == syscall.EINVAL {
|
if errors.Is(err, syscall.EINVAL) {
|
||||||
return newProgramABIFromProc(fd)
|
return newProgramABIFromProc(fd)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,7 +126,7 @@ func newProgramABIFromProc(fd *internal.FD) (string, *ProgramABI, error) {
|
|||||||
"prog_type": &abi.Type,
|
"prog_type": &abi.Type,
|
||||||
"prog_tag": &name,
|
"prog_tag": &name,
|
||||||
})
|
})
|
||||||
if errors.Cause(err) == errMissingFields {
|
if errors.Is(err, errMissingFields) {
|
||||||
return "", nil, &internal.UnsupportedFeatureError{
|
return "", nil, &internal.UnsupportedFeatureError{
|
||||||
Name: "reading ABI from /proc/self/fdinfo",
|
Name: "reading ABI from /proc/self/fdinfo",
|
||||||
MinimumVersion: internal.Version{4, 11, 0},
|
MinimumVersion: internal.Version{4, 11, 0},
|
||||||
@ -152,7 +151,10 @@ func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error {
|
|||||||
}
|
}
|
||||||
defer fh.Close()
|
defer fh.Close()
|
||||||
|
|
||||||
return errors.Wrap(scanFdInfoReader(fh, fields), fh.Name())
|
if err := scanFdInfoReader(fh, fields); err != nil {
|
||||||
|
return fmt.Errorf("%s: %w", fh.Name(), err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var errMissingFields = errors.New("missing fields")
|
var errMissingFields = errors.New("missing fields")
|
||||||
@ -176,7 +178,7 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if n, err := fmt.Fscanln(bytes.NewReader(parts[1]), field); err != nil || n != 1 {
|
if n, err := fmt.Fscanln(bytes.NewReader(parts[1]), field); err != nil || n != 1 {
|
||||||
return errors.Wrapf(err, "can't parse field %s", name)
|
return fmt.Errorf("can't parse field %s: %v", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
scanned++
|
scanned++
|
||||||
|
114
vendor/github.com/cilium/ebpf/asm/instruction.go
generated
vendored
114
vendor/github.com/cilium/ebpf/asm/instruction.go
generated
vendored
@ -2,12 +2,11 @@ package asm
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// InstructionSize is the size of a BPF instruction in bytes
|
// InstructionSize is the size of a BPF instruction in bytes
|
||||||
@ -39,10 +38,12 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
ins.OpCode = bi.OpCode
|
ins.OpCode = bi.OpCode
|
||||||
ins.Dst = bi.Registers.Dst()
|
|
||||||
ins.Src = bi.Registers.Src()
|
|
||||||
ins.Offset = bi.Offset
|
ins.Offset = bi.Offset
|
||||||
ins.Constant = int64(bi.Constant)
|
ins.Constant = int64(bi.Constant)
|
||||||
|
ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("can't unmarshal registers: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
if !bi.OpCode.isDWordLoad() {
|
if !bi.OpCode.isDWordLoad() {
|
||||||
return InstructionSize, nil
|
return InstructionSize, nil
|
||||||
@ -75,9 +76,14 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
|
|||||||
cons = int32(uint32(ins.Constant))
|
cons = int32(uint32(ins.Constant))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
regs, err := newBPFRegisters(ins.Dst, ins.Src, bo)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("can't marshal registers: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
bpfi := bpfInstruction{
|
bpfi := bpfInstruction{
|
||||||
ins.OpCode,
|
ins.OpCode,
|
||||||
newBPFRegisters(ins.Dst, ins.Src),
|
regs,
|
||||||
ins.Offset,
|
ins.Offset,
|
||||||
cons,
|
cons,
|
||||||
}
|
}
|
||||||
@ -103,22 +109,52 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
|
|||||||
|
|
||||||
// RewriteMapPtr changes an instruction to use a new map fd.
|
// RewriteMapPtr changes an instruction to use a new map fd.
|
||||||
//
|
//
|
||||||
// Returns an error if the fd is invalid, or the instruction
|
// Returns an error if the instruction doesn't load a map.
|
||||||
// is incorrect.
|
|
||||||
func (ins *Instruction) RewriteMapPtr(fd int) error {
|
func (ins *Instruction) RewriteMapPtr(fd int) error {
|
||||||
if !ins.OpCode.isDWordLoad() {
|
if !ins.OpCode.isDWordLoad() {
|
||||||
return errors.Errorf("%s is not a 64 bit load", ins.OpCode)
|
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fd < 0 {
|
if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue {
|
||||||
return errors.New("invalid fd")
|
return errors.New("not a load from a map")
|
||||||
}
|
}
|
||||||
|
|
||||||
ins.Src = R1
|
// Preserve the offset value for direct map loads.
|
||||||
ins.Constant = int64(fd)
|
offset := uint64(ins.Constant) & (math.MaxUint32 << 32)
|
||||||
|
rawFd := uint64(uint32(fd))
|
||||||
|
ins.Constant = int64(offset | rawFd)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ins *Instruction) mapPtr() uint32 {
|
||||||
|
return uint32(uint64(ins.Constant) & math.MaxUint32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RewriteMapOffset changes the offset of a direct load from a map.
|
||||||
|
//
|
||||||
|
// Returns an error if the instruction is not a direct load.
|
||||||
|
func (ins *Instruction) RewriteMapOffset(offset uint32) error {
|
||||||
|
if !ins.OpCode.isDWordLoad() {
|
||||||
|
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ins.Src != PseudoMapValue {
|
||||||
|
return errors.New("not a direct load from a map")
|
||||||
|
}
|
||||||
|
|
||||||
|
fd := uint64(ins.Constant) & math.MaxUint32
|
||||||
|
ins.Constant = int64(uint64(offset)<<32 | fd)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ins *Instruction) mapOffset() uint32 {
|
||||||
|
return uint32(uint64(ins.Constant) >> 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ins *Instruction) isLoadFromMap() bool {
|
||||||
|
return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
|
||||||
|
}
|
||||||
|
|
||||||
// Format implements fmt.Formatter.
|
// Format implements fmt.Formatter.
|
||||||
func (ins Instruction) Format(f fmt.State, c rune) {
|
func (ins Instruction) Format(f fmt.State, c rune) {
|
||||||
if c != 'v' {
|
if c != 'v' {
|
||||||
@ -139,6 +175,19 @@ func (ins Instruction) Format(f fmt.State, c rune) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ins.isLoadFromMap() {
|
||||||
|
fd := int32(ins.mapPtr())
|
||||||
|
switch ins.Src {
|
||||||
|
case PseudoMapFD:
|
||||||
|
fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
|
||||||
|
|
||||||
|
case PseudoMapValue:
|
||||||
|
fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset())
|
||||||
|
}
|
||||||
|
|
||||||
|
goto ref
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Fprintf(f, "%v ", op)
|
fmt.Fprintf(f, "%v ", op)
|
||||||
switch cls := op.Class(); cls {
|
switch cls := op.Class(); cls {
|
||||||
case LdClass, LdXClass, StClass, StXClass:
|
case LdClass, LdXClass, StClass, StXClass:
|
||||||
@ -166,7 +215,7 @@ func (ins Instruction) Format(f fmt.State, c rune) {
|
|||||||
case JumpClass:
|
case JumpClass:
|
||||||
switch jop := op.JumpOp(); jop {
|
switch jop := op.JumpOp(); jop {
|
||||||
case Call:
|
case Call:
|
||||||
if ins.Src == R1 {
|
if ins.Src == PseudoCall {
|
||||||
// bpf-to-bpf call
|
// bpf-to-bpf call
|
||||||
fmt.Fprint(f, ins.Constant)
|
fmt.Fprint(f, ins.Constant)
|
||||||
} else {
|
} else {
|
||||||
@ -183,6 +232,7 @@ func (ins Instruction) Format(f fmt.State, c rune) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ref:
|
||||||
if ins.Reference != "" {
|
if ins.Reference != "" {
|
||||||
fmt.Fprintf(f, " <%s>", ins.Reference)
|
fmt.Fprintf(f, " <%s>", ins.Reference)
|
||||||
}
|
}
|
||||||
@ -235,7 +285,7 @@ func (insns Instructions) SymbolOffsets() (map[string]int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := offsets[ins.Symbol]; ok {
|
if _, ok := offsets[ins.Symbol]; ok {
|
||||||
return nil, errors.Errorf("duplicate symbol %s", ins.Symbol)
|
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
|
||||||
}
|
}
|
||||||
|
|
||||||
offsets[ins.Symbol] = i
|
offsets[ins.Symbol] = i
|
||||||
@ -273,7 +323,7 @@ func (insns Instructions) marshalledOffsets() (map[string]int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := symbols[ins.Symbol]; ok {
|
if _, ok := symbols[ins.Symbol]; ok {
|
||||||
return nil, errors.Errorf("duplicate symbol %s", ins.Symbol)
|
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
|
||||||
}
|
}
|
||||||
|
|
||||||
symbols[ins.Symbol] = currentPos
|
symbols[ins.Symbol] = currentPos
|
||||||
@ -350,11 +400,11 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
|||||||
num := 0
|
num := 0
|
||||||
for i, ins := range insns {
|
for i, ins := range insns {
|
||||||
switch {
|
switch {
|
||||||
case ins.OpCode.JumpOp() == Call && ins.Constant == -1:
|
case ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall && ins.Constant == -1:
|
||||||
// Rewrite bpf to bpf call
|
// Rewrite bpf to bpf call
|
||||||
offset, ok := absoluteOffsets[ins.Reference]
|
offset, ok := absoluteOffsets[ins.Reference]
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
|
return fmt.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
|
||||||
}
|
}
|
||||||
|
|
||||||
ins.Constant = int64(offset - num - 1)
|
ins.Constant = int64(offset - num - 1)
|
||||||
@ -363,7 +413,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
|||||||
// Rewrite jump to label
|
// Rewrite jump to label
|
||||||
offset, ok := absoluteOffsets[ins.Reference]
|
offset, ok := absoluteOffsets[ins.Reference]
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
|
return fmt.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
|
||||||
}
|
}
|
||||||
|
|
||||||
ins.Offset = int16(offset - num - 1)
|
ins.Offset = int16(offset - num - 1)
|
||||||
@ -371,7 +421,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
|||||||
|
|
||||||
n, err := ins.Marshal(w, bo)
|
n, err := ins.Marshal(w, bo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "instruction %d", i)
|
return fmt.Errorf("instruction %d: %w", i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
num += int(n / InstructionSize)
|
num += int(n / InstructionSize)
|
||||||
@ -388,16 +438,26 @@ type bpfInstruction struct {
|
|||||||
|
|
||||||
type bpfRegisters uint8
|
type bpfRegisters uint8
|
||||||
|
|
||||||
func newBPFRegisters(dst, src Register) bpfRegisters {
|
func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) {
|
||||||
return bpfRegisters((src << 4) | (dst & 0xF))
|
switch bo {
|
||||||
|
case binary.LittleEndian:
|
||||||
|
return bpfRegisters((src << 4) | (dst & 0xF)), nil
|
||||||
|
case binary.BigEndian:
|
||||||
|
return bpfRegisters((dst << 4) | (src & 0xF)), nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r bpfRegisters) Dst() Register {
|
func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) {
|
||||||
return Register(r & 0xF)
|
switch bo {
|
||||||
}
|
case binary.LittleEndian:
|
||||||
|
return Register(r & 0xF), Register(r >> 4), nil
|
||||||
func (r bpfRegisters) Src() Register {
|
case binary.BigEndian:
|
||||||
return Register(r >> 4)
|
return Register(r >> 4), Register(r & 0xf), nil
|
||||||
|
default:
|
||||||
|
return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type unreferencedSymbolError struct {
|
type unreferencedSymbolError struct {
|
||||||
|
2
vendor/github.com/cilium/ebpf/asm/jump.go
generated
vendored
2
vendor/github.com/cilium/ebpf/asm/jump.go
generated
vendored
@ -95,7 +95,7 @@ func (op JumpOp) Label(label string) Instruction {
|
|||||||
if op == Call {
|
if op == Call {
|
||||||
return Instruction{
|
return Instruction{
|
||||||
OpCode: OpCode(JumpClass).SetJumpOp(Call),
|
OpCode: OpCode(JumpClass).SetJumpOp(Call),
|
||||||
Src: R1,
|
Src: PseudoCall,
|
||||||
Constant: -1,
|
Constant: -1,
|
||||||
Reference: label,
|
Reference: label,
|
||||||
}
|
}
|
||||||
|
17
vendor/github.com/cilium/ebpf/asm/load_store.go
generated
vendored
17
vendor/github.com/cilium/ebpf/asm/load_store.go
generated
vendored
@ -110,11 +110,26 @@ func LoadMapPtr(dst Register, fd int) Instruction {
|
|||||||
return Instruction{
|
return Instruction{
|
||||||
OpCode: LoadImmOp(DWord),
|
OpCode: LoadImmOp(DWord),
|
||||||
Dst: dst,
|
Dst: dst,
|
||||||
Src: R1,
|
Src: PseudoMapFD,
|
||||||
Constant: int64(fd),
|
Constant: int64(fd),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LoadMapValue stores a pointer to the value at a certain offset of a map.
|
||||||
|
func LoadMapValue(dst Register, fd int, offset uint32) Instruction {
|
||||||
|
if fd < 0 {
|
||||||
|
return Instruction{OpCode: InvalidOpCode}
|
||||||
|
}
|
||||||
|
|
||||||
|
fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd))
|
||||||
|
return Instruction{
|
||||||
|
OpCode: LoadImmOp(DWord),
|
||||||
|
Dst: dst,
|
||||||
|
Src: PseudoMapValue,
|
||||||
|
Constant: int64(fdAndOffset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff.
|
// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff.
|
||||||
func LoadIndOp(size Size) OpCode {
|
func LoadIndOp(size Size) OpCode {
|
||||||
return OpCode(LdClass).SetMode(IndMode).SetSize(size)
|
return OpCode(LdClass).SetMode(IndMode).SetSize(size)
|
||||||
|
2
vendor/github.com/cilium/ebpf/asm/opcode.go
generated
vendored
2
vendor/github.com/cilium/ebpf/asm/opcode.go
generated
vendored
@ -225,7 +225,7 @@ func (op OpCode) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
fmt.Fprintf(&f, "%#x", op)
|
fmt.Fprintf(&f, "OpCode(%#x)", uint8(op))
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.String()
|
return f.String()
|
||||||
|
7
vendor/github.com/cilium/ebpf/asm/register.go
generated
vendored
7
vendor/github.com/cilium/ebpf/asm/register.go
generated
vendored
@ -33,6 +33,13 @@ const (
|
|||||||
RFP = R10
|
RFP = R10
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Pseudo registers used by 64bit loads and jumps
|
||||||
|
const (
|
||||||
|
PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD
|
||||||
|
PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE
|
||||||
|
PseudoCall = R1 // BPF_PSEUDO_CALL
|
||||||
|
)
|
||||||
|
|
||||||
func (r Register) String() string {
|
func (r Register) String() string {
|
||||||
v := uint8(r)
|
v := uint8(r)
|
||||||
if v == 10 {
|
if v == 10 {
|
||||||
|
117
vendor/github.com/cilium/ebpf/collection.go
generated
vendored
117
vendor/github.com/cilium/ebpf/collection.go
generated
vendored
@ -1,9 +1,13 @@
|
|||||||
package ebpf
|
package ebpf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/asm"
|
"github.com/cilium/ebpf/asm"
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
"github.com/cilium/ebpf/internal/btf"
|
"github.com/cilium/ebpf/internal/btf"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// CollectionOptions control loading a collection into the kernel.
|
// CollectionOptions control loading a collection into the kernel.
|
||||||
@ -39,6 +43,89 @@ func (cs *CollectionSpec) Copy() *CollectionSpec {
|
|||||||
return &cpy
|
return &cpy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RewriteMaps replaces all references to specific maps.
|
||||||
|
//
|
||||||
|
// Use this function to use pre-existing maps instead of creating new ones
|
||||||
|
// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps.
|
||||||
|
//
|
||||||
|
// Returns an error if a named map isn't used in at least one program.
|
||||||
|
func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
|
||||||
|
for symbol, m := range maps {
|
||||||
|
// have we seen a program that uses this symbol / map
|
||||||
|
seen := false
|
||||||
|
fd := m.FD()
|
||||||
|
for progName, progSpec := range cs.Programs {
|
||||||
|
err := progSpec.Instructions.RewriteMapPtr(symbol, fd)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
seen = true
|
||||||
|
|
||||||
|
case asm.IsUnreferencedSymbol(err):
|
||||||
|
// Not all programs need to use the map
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("program %s: %w", progName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !seen {
|
||||||
|
return fmt.Errorf("map %s not referenced by any programs", symbol)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent NewCollection from creating rewritten maps
|
||||||
|
delete(cs.Maps, symbol)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RewriteConstants replaces the value of multiple constants.
|
||||||
|
//
|
||||||
|
// The constant must be defined like so in the C program:
|
||||||
|
//
|
||||||
|
// static volatile const type foobar;
|
||||||
|
// static volatile const type foobar = default;
|
||||||
|
//
|
||||||
|
// Replacement values must be of the same length as the C sizeof(type).
|
||||||
|
// If necessary, they are marshalled according to the same rules as
|
||||||
|
// map values.
|
||||||
|
//
|
||||||
|
// From Linux 5.5 the verifier will use constants to eliminate dead code.
|
||||||
|
//
|
||||||
|
// Returns an error if a constant doesn't exist.
|
||||||
|
func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
|
||||||
|
rodata := cs.Maps[".rodata"]
|
||||||
|
if rodata == nil {
|
||||||
|
return errors.New("missing .rodata section")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rodata.BTF == nil {
|
||||||
|
return errors.New(".rodata section has no BTF")
|
||||||
|
}
|
||||||
|
|
||||||
|
if n := len(rodata.Contents); n != 1 {
|
||||||
|
return fmt.Errorf("expected one key in .rodata, found %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
kv := rodata.Contents[0]
|
||||||
|
value, ok := kv.Value.([]byte)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, len(value))
|
||||||
|
copy(buf, value)
|
||||||
|
|
||||||
|
err := patchValue(buf, btf.MapValue(rodata.BTF), consts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rodata.Contents[0] = MapKV{kv.Key, buf}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Collection is a collection of Programs and Maps associated
|
// Collection is a collection of Programs and Maps associated
|
||||||
// with their symbols
|
// with their symbols
|
||||||
type Collection struct {
|
type Collection struct {
|
||||||
@ -99,14 +186,14 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (col
|
|||||||
var handle *btf.Handle
|
var handle *btf.Handle
|
||||||
if mapSpec.BTF != nil {
|
if mapSpec.BTF != nil {
|
||||||
handle, err = loadBTF(btf.MapSpec(mapSpec.BTF))
|
handle, err = loadBTF(btf.MapSpec(mapSpec.BTF))
|
||||||
if err != nil && !btf.IsNotSupported(err) {
|
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := newMapWithBTF(mapSpec, handle)
|
m, err := newMapWithBTF(mapSpec, handle)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "map %s", mapName)
|
return nil, fmt.Errorf("map %s: %w", mapName, err)
|
||||||
}
|
}
|
||||||
maps[mapName] = m
|
maps[mapName] = m
|
||||||
}
|
}
|
||||||
@ -116,37 +203,43 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (col
|
|||||||
|
|
||||||
// Rewrite any reference to a valid map.
|
// Rewrite any reference to a valid map.
|
||||||
for i := range progSpec.Instructions {
|
for i := range progSpec.Instructions {
|
||||||
var (
|
ins := &progSpec.Instructions[i]
|
||||||
ins = &progSpec.Instructions[i]
|
|
||||||
m = maps[ins.Reference]
|
|
||||||
)
|
|
||||||
|
|
||||||
if ins.Reference == "" || m == nil {
|
if ins.OpCode != asm.LoadImmOp(asm.DWord) || ins.Reference == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if ins.Src == asm.R1 {
|
if uint32(ins.Constant) != math.MaxUint32 {
|
||||||
// Don't overwrite maps already rewritten, users can
|
// Don't overwrite maps already rewritten, users can
|
||||||
// rewrite programs in the spec themselves
|
// rewrite programs in the spec themselves
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m := maps[ins.Reference]
|
||||||
|
if m == nil {
|
||||||
|
return nil, fmt.Errorf("program %s: missing map %s", progName, ins.Reference)
|
||||||
|
}
|
||||||
|
|
||||||
|
fd := m.FD()
|
||||||
|
if fd < 0 {
|
||||||
|
return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd)
|
||||||
|
}
|
||||||
if err := ins.RewriteMapPtr(m.FD()); err != nil {
|
if err := ins.RewriteMapPtr(m.FD()); err != nil {
|
||||||
return nil, errors.Wrapf(err, "progam %s: map %s", progName, ins.Reference)
|
return nil, fmt.Errorf("progam %s: map %s: %w", progName, ins.Reference, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var handle *btf.Handle
|
var handle *btf.Handle
|
||||||
if progSpec.BTF != nil {
|
if progSpec.BTF != nil {
|
||||||
handle, err = loadBTF(btf.ProgramSpec(progSpec.BTF))
|
handle, err = loadBTF(btf.ProgramSpec(progSpec.BTF))
|
||||||
if err != nil && !btf.IsNotSupported(err) {
|
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
prog, err := newProgramWithBTF(progSpec, handle, opts.Programs)
|
prog, err := newProgramWithBTF(progSpec, handle, opts.Programs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "program %s", progName)
|
return nil, fmt.Errorf("program %s: %w", progName, err)
|
||||||
}
|
}
|
||||||
progs[progName] = prog
|
progs[progName] = prog
|
||||||
}
|
}
|
||||||
|
607
vendor/github.com/cilium/ebpf/elf_reader.go
generated
vendored
607
vendor/github.com/cilium/ebpf/elf_reader.go
generated
vendored
@ -4,21 +4,23 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"debug/elf"
|
"debug/elf"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/asm"
|
"github.com/cilium/ebpf/asm"
|
||||||
"github.com/cilium/ebpf/internal"
|
"github.com/cilium/ebpf/internal"
|
||||||
"github.com/cilium/ebpf/internal/btf"
|
"github.com/cilium/ebpf/internal/btf"
|
||||||
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type elfCode struct {
|
type elfCode struct {
|
||||||
*elf.File
|
*elf.File
|
||||||
symbols []elf.Symbol
|
symbols []elf.Symbol
|
||||||
symbolsPerSection map[elf.SectionIndex]map[uint64]string
|
symbolsPerSection map[elf.SectionIndex]map[uint64]elf.Symbol
|
||||||
license string
|
license string
|
||||||
version uint32
|
version uint32
|
||||||
}
|
}
|
||||||
@ -32,7 +34,10 @@ func LoadCollectionSpec(file string) (*CollectionSpec, error) {
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
spec, err := LoadCollectionSpecFromReader(f)
|
spec, err := LoadCollectionSpecFromReader(f)
|
||||||
return spec, errors.Wrapf(err, "file %s", file)
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("file %s: %w", file, err)
|
||||||
|
}
|
||||||
|
return spec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec.
|
// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec.
|
||||||
@ -45,7 +50,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
|||||||
|
|
||||||
symbols, err := f.Symbols()
|
symbols, err := f.Symbols()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "load symbols")
|
return nil, fmt.Errorf("load symbols: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ec := &elfCode{f, symbols, symbolsPerSection(symbols), "", 0}
|
ec := &elfCode{f, symbols, symbolsPerSection(symbols), "", 0}
|
||||||
@ -57,6 +62,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
|||||||
progSections = make(map[elf.SectionIndex]*elf.Section)
|
progSections = make(map[elf.SectionIndex]*elf.Section)
|
||||||
relSections = make(map[elf.SectionIndex]*elf.Section)
|
relSections = make(map[elf.SectionIndex]*elf.Section)
|
||||||
mapSections = make(map[elf.SectionIndex]*elf.Section)
|
mapSections = make(map[elf.SectionIndex]*elf.Section)
|
||||||
|
dataSections = make(map[elf.SectionIndex]*elf.Section)
|
||||||
)
|
)
|
||||||
|
|
||||||
for i, sec := range ec.Sections {
|
for i, sec := range ec.Sections {
|
||||||
@ -69,15 +75,17 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
|||||||
mapSections[elf.SectionIndex(i)] = sec
|
mapSections[elf.SectionIndex(i)] = sec
|
||||||
case sec.Name == ".maps":
|
case sec.Name == ".maps":
|
||||||
btfMaps[elf.SectionIndex(i)] = sec
|
btfMaps[elf.SectionIndex(i)] = sec
|
||||||
|
case sec.Name == ".bss" || sec.Name == ".rodata" || sec.Name == ".data":
|
||||||
|
dataSections[elf.SectionIndex(i)] = sec
|
||||||
case sec.Type == elf.SHT_REL:
|
case sec.Type == elf.SHT_REL:
|
||||||
if int(sec.Info) >= len(ec.Sections) {
|
if int(sec.Info) >= len(ec.Sections) {
|
||||||
return nil, errors.Errorf("found relocation section %v for missing section %v", i, sec.Info)
|
return nil, fmt.Errorf("found relocation section %v for missing section %v", i, sec.Info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store relocations under the section index of the target
|
// Store relocations under the section index of the target
|
||||||
idx := elf.SectionIndex(sec.Info)
|
idx := elf.SectionIndex(sec.Info)
|
||||||
if relSections[idx] != nil {
|
if relSections[idx] != nil {
|
||||||
return nil, errors.Errorf("section %d has multiple relocation sections", sec.Info)
|
return nil, fmt.Errorf("section %d has multiple relocation sections", sec.Info)
|
||||||
}
|
}
|
||||||
relSections[idx] = sec
|
relSections[idx] = sec
|
||||||
case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0:
|
case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0:
|
||||||
@ -87,34 +95,52 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
|||||||
|
|
||||||
ec.license, err = loadLicense(licenseSection)
|
ec.license, err = loadLicense(licenseSection)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "load license")
|
return nil, fmt.Errorf("load license: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ec.version, err = loadVersion(versionSection, ec.ByteOrder)
|
ec.version, err = loadVersion(versionSection, ec.ByteOrder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "load version")
|
return nil, fmt.Errorf("load version: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
btf, err := btf.LoadSpecFromReader(rd)
|
btfSpec, err := btf.LoadSpecFromReader(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "load BTF")
|
return nil, fmt.Errorf("load BTF: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
relocations, referencedSections, err := ec.loadRelocations(relSections)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("load relocations: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
maps := make(map[string]*MapSpec)
|
maps := make(map[string]*MapSpec)
|
||||||
|
|
||||||
if err := ec.loadMaps(maps, mapSections); err != nil {
|
if err := ec.loadMaps(maps, mapSections); err != nil {
|
||||||
return nil, errors.Wrap(err, "load maps")
|
return nil, fmt.Errorf("load maps: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(btfMaps) > 0 {
|
if len(btfMaps) > 0 {
|
||||||
if err := ec.loadBTFMaps(maps, btfMaps, btf); err != nil {
|
if err := ec.loadBTFMaps(maps, btfMaps, btfSpec); err != nil {
|
||||||
return nil, errors.Wrap(err, "load BTF maps")
|
return nil, fmt.Errorf("load BTF maps: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
progs, err := ec.loadPrograms(progSections, relSections, btf)
|
if len(dataSections) > 0 {
|
||||||
|
for idx := range dataSections {
|
||||||
|
if !referencedSections[idx] {
|
||||||
|
// Prune data sections which are not referenced by any
|
||||||
|
// instructions.
|
||||||
|
delete(dataSections, idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ec.loadDataSections(maps, dataSections, btfSpec); err != nil {
|
||||||
|
return nil, fmt.Errorf("load data sections: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
progs, err := ec.loadPrograms(progSections, relocations, btfSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "load programs")
|
return nil, fmt.Errorf("load programs: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &CollectionSpec{maps, progs}, nil
|
return &CollectionSpec{maps, progs}, nil
|
||||||
@ -122,11 +148,12 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
|||||||
|
|
||||||
func loadLicense(sec *elf.Section) (string, error) {
|
func loadLicense(sec *elf.Section) (string, error) {
|
||||||
if sec == nil {
|
if sec == nil {
|
||||||
return "", errors.Errorf("missing license section")
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := sec.Data()
|
data, err := sec.Data()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrapf(err, "section %s", sec.Name)
|
return "", fmt.Errorf("section %s: %v", sec.Name, err)
|
||||||
}
|
}
|
||||||
return string(bytes.TrimRight(data, "\000")), nil
|
return string(bytes.TrimRight(data, "\000")), nil
|
||||||
}
|
}
|
||||||
@ -137,52 +164,51 @@ func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var version uint32
|
var version uint32
|
||||||
err := binary.Read(sec.Open(), bo, &version)
|
if err := binary.Read(sec.Open(), bo, &version); err != nil {
|
||||||
return version, errors.Wrapf(err, "section %s", sec.Name)
|
return 0, fmt.Errorf("section %s: %v", sec.Name, err)
|
||||||
|
}
|
||||||
|
return version, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ec *elfCode) loadPrograms(progSections, relSections map[elf.SectionIndex]*elf.Section, btf *btf.Spec) (map[string]*ProgramSpec, error) {
|
func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section, relocations map[elf.SectionIndex]map[uint64]elf.Symbol, btfSpec *btf.Spec) (map[string]*ProgramSpec, error) {
|
||||||
var (
|
var (
|
||||||
progs []*ProgramSpec
|
progs []*ProgramSpec
|
||||||
libs []*ProgramSpec
|
libs []*ProgramSpec
|
||||||
)
|
)
|
||||||
|
|
||||||
for idx, prog := range progSections {
|
for idx, sec := range progSections {
|
||||||
syms := ec.symbolsPerSection[idx]
|
syms := ec.symbolsPerSection[idx]
|
||||||
if len(syms) == 0 {
|
if len(syms) == 0 {
|
||||||
return nil, errors.Errorf("section %v: missing symbols", prog.Name)
|
return nil, fmt.Errorf("section %v: missing symbols", sec.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
funcSym := syms[0]
|
funcSym, ok := syms[0]
|
||||||
if funcSym == "" {
|
if !ok {
|
||||||
return nil, errors.Errorf("section %v: no label at start", prog.Name)
|
return nil, fmt.Errorf("section %v: no label at start", sec.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
rels, err := ec.loadRelocations(relSections[idx])
|
insns, length, err := ec.loadInstructions(sec, syms, relocations[idx])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "program %s: can't load relocations", funcSym)
|
return nil, fmt.Errorf("program %s: can't unmarshal instructions: %w", funcSym.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
insns, length, err := ec.loadInstructions(prog, syms, rels)
|
progType, attachType, attachTo := getProgType(sec.Name)
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "program %s: can't unmarshal instructions", funcSym)
|
|
||||||
}
|
|
||||||
|
|
||||||
progType, attachType := getProgType(prog.Name)
|
|
||||||
|
|
||||||
spec := &ProgramSpec{
|
spec := &ProgramSpec{
|
||||||
Name: funcSym,
|
Name: funcSym.Name,
|
||||||
Type: progType,
|
Type: progType,
|
||||||
AttachType: attachType,
|
AttachType: attachType,
|
||||||
|
AttachTo: attachTo,
|
||||||
License: ec.license,
|
License: ec.license,
|
||||||
KernelVersion: ec.version,
|
KernelVersion: ec.version,
|
||||||
Instructions: insns,
|
Instructions: insns,
|
||||||
|
ByteOrder: ec.ByteOrder,
|
||||||
}
|
}
|
||||||
|
|
||||||
if btf != nil {
|
if btfSpec != nil {
|
||||||
spec.BTF, err = btf.Program(prog.Name, length)
|
spec.BTF, err = btfSpec.Program(sec.Name, length)
|
||||||
if err != nil {
|
if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) {
|
||||||
return nil, errors.Wrapf(err, "BTF for section %s (program %s)", prog.Name, funcSym)
|
return nil, fmt.Errorf("program %s: %w", funcSym.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,7 +226,7 @@ func (ec *elfCode) loadPrograms(progSections, relSections map[elf.SectionIndex]*
|
|||||||
for _, prog := range progs {
|
for _, prog := range progs {
|
||||||
err := link(prog, libs)
|
err := link(prog, libs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "program %s", prog.Name)
|
return nil, fmt.Errorf("program %s: %w", prog.Name, err)
|
||||||
}
|
}
|
||||||
res[prog.Name] = prog
|
res[prog.Name] = prog
|
||||||
}
|
}
|
||||||
@ -208,39 +234,158 @@ func (ec *elfCode) loadPrograms(progSections, relSections map[elf.SectionIndex]*
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ec *elfCode) loadInstructions(section *elf.Section, symbols, relocations map[uint64]string) (asm.Instructions, uint64, error) {
|
func (ec *elfCode) loadInstructions(section *elf.Section, symbols, relocations map[uint64]elf.Symbol) (asm.Instructions, uint64, error) {
|
||||||
var (
|
var (
|
||||||
r = section.Open()
|
r = section.Open()
|
||||||
insns asm.Instructions
|
insns asm.Instructions
|
||||||
ins asm.Instruction
|
|
||||||
offset uint64
|
offset uint64
|
||||||
)
|
)
|
||||||
for {
|
for {
|
||||||
|
var ins asm.Instruction
|
||||||
n, err := ins.Unmarshal(r, ec.ByteOrder)
|
n, err := ins.Unmarshal(r, ec.ByteOrder)
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
return insns, offset, nil
|
return insns, offset, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, errors.Wrapf(err, "offset %d", offset)
|
return nil, 0, fmt.Errorf("offset %d: %w", offset, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ins.Symbol = symbols[offset]
|
ins.Symbol = symbols[offset].Name
|
||||||
ins.Reference = relocations[offset]
|
|
||||||
|
if rel, ok := relocations[offset]; ok {
|
||||||
|
if err = ec.relocateInstruction(&ins, rel); err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("offset %d: can't relocate instruction: %w", offset, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
insns = append(insns, ins)
|
insns = append(insns, ins)
|
||||||
offset += n
|
offset += n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error {
|
||||||
|
var (
|
||||||
|
typ = elf.ST_TYPE(rel.Info)
|
||||||
|
bind = elf.ST_BIND(rel.Info)
|
||||||
|
name = rel.Name
|
||||||
|
)
|
||||||
|
|
||||||
|
if typ == elf.STT_SECTION {
|
||||||
|
// Symbols with section type do not have a name set. Get it
|
||||||
|
// from the section itself.
|
||||||
|
idx := int(rel.Section)
|
||||||
|
if idx > len(ec.Sections) {
|
||||||
|
return errors.New("out-of-bounds section index")
|
||||||
|
}
|
||||||
|
|
||||||
|
name = ec.Sections[idx].Name
|
||||||
|
}
|
||||||
|
|
||||||
|
outer:
|
||||||
|
switch {
|
||||||
|
case ins.OpCode == asm.LoadImmOp(asm.DWord):
|
||||||
|
// There are two distinct types of a load from a map:
|
||||||
|
// a direct one, where the value is extracted without
|
||||||
|
// a call to map_lookup_elem in eBPF, and an indirect one
|
||||||
|
// that goes via the helper. They are distinguished by
|
||||||
|
// different relocations.
|
||||||
|
switch typ {
|
||||||
|
case elf.STT_SECTION:
|
||||||
|
// This is a direct load since the referenced symbol is a
|
||||||
|
// section. Weirdly, the offset of the real symbol in the
|
||||||
|
// section is encoded in the instruction stream.
|
||||||
|
if bind != elf.STB_LOCAL {
|
||||||
|
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
|
||||||
|
}
|
||||||
|
|
||||||
|
// For some reason, clang encodes the offset of the symbol its
|
||||||
|
// section in the first basic BPF instruction, while the kernel
|
||||||
|
// expects it in the second one.
|
||||||
|
ins.Constant <<= 32
|
||||||
|
ins.Src = asm.PseudoMapValue
|
||||||
|
|
||||||
|
case elf.STT_NOTYPE:
|
||||||
|
if bind == elf.STB_GLOBAL && rel.Section == elf.SHN_UNDEF {
|
||||||
|
// This is a relocation generated by inline assembly.
|
||||||
|
// We can't do more than assigning ins.Reference.
|
||||||
|
break outer
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is an ELF generated on clang < 8, which doesn't tag
|
||||||
|
// relocations appropriately.
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case elf.STT_OBJECT:
|
||||||
|
if bind != elf.STB_GLOBAL {
|
||||||
|
return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Src = asm.PseudoMapFD
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("load: %s: unsupported relocation: %s", name, typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark the instruction as needing an update when creating the
|
||||||
|
// collection.
|
||||||
|
if err := ins.RewriteMapPtr(-1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
case ins.OpCode.JumpOp() == asm.Call:
|
||||||
|
if ins.Src != asm.PseudoCall {
|
||||||
|
return fmt.Errorf("call: %s: incorrect source register", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch typ {
|
||||||
|
case elf.STT_NOTYPE, elf.STT_FUNC:
|
||||||
|
if bind != elf.STB_GLOBAL {
|
||||||
|
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||||
|
}
|
||||||
|
|
||||||
|
case elf.STT_SECTION:
|
||||||
|
if bind != elf.STB_LOCAL {
|
||||||
|
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The function we want to call is in the indicated section,
|
||||||
|
// at the offset encoded in the instruction itself. Reverse
|
||||||
|
// the calculation to find the real function we're looking for.
|
||||||
|
// A value of -1 references the first instruction in the section.
|
||||||
|
offset := int64(int32(ins.Constant)+1) * asm.InstructionSize
|
||||||
|
if offset < 0 {
|
||||||
|
return fmt.Errorf("call: %s: invalid offset %d", name, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
sym, ok := ec.symbolsPerSection[rel.Section][uint64(offset)]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("call: %s: no symbol at offset %d", name, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Constant = -1
|
||||||
|
name = sym.Name
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("call: %s: invalid symbol type %s", name, typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("relocation for unsupported instruction: %s", ins.OpCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Reference = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.SectionIndex]*elf.Section) error {
|
func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.SectionIndex]*elf.Section) error {
|
||||||
for idx, sec := range mapSections {
|
for idx, sec := range mapSections {
|
||||||
syms := ec.symbolsPerSection[idx]
|
syms := ec.symbolsPerSection[idx]
|
||||||
if len(syms) == 0 {
|
if len(syms) == 0 {
|
||||||
return errors.Errorf("section %v: no symbols", sec.Name)
|
return fmt.Errorf("section %v: no symbols", sec.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if sec.Size%uint64(len(syms)) != 0 {
|
if sec.Size%uint64(len(syms)) != 0 {
|
||||||
return errors.Errorf("section %v: map descriptors are not of equal size", sec.Name)
|
return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -248,36 +393,38 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.Sectio
|
|||||||
size = sec.Size / uint64(len(syms))
|
size = sec.Size / uint64(len(syms))
|
||||||
)
|
)
|
||||||
for i, offset := 0, uint64(0); i < len(syms); i, offset = i+1, offset+size {
|
for i, offset := 0, uint64(0); i < len(syms); i, offset = i+1, offset+size {
|
||||||
mapSym := syms[offset]
|
mapSym, ok := syms[offset]
|
||||||
if mapSym == "" {
|
if !ok {
|
||||||
return errors.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
|
return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
if maps[mapSym] != nil {
|
if maps[mapSym.Name] != nil {
|
||||||
return errors.Errorf("section %v: map %v already exists", sec.Name, mapSym)
|
return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
|
||||||
}
|
}
|
||||||
|
|
||||||
lr := io.LimitReader(r, int64(size))
|
lr := io.LimitReader(r, int64(size))
|
||||||
|
|
||||||
var spec MapSpec
|
spec := MapSpec{
|
||||||
|
Name: SanitizeName(mapSym.Name, -1),
|
||||||
|
}
|
||||||
switch {
|
switch {
|
||||||
case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
|
case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
|
||||||
return errors.Errorf("map %v: missing type", mapSym)
|
return fmt.Errorf("map %v: missing type", mapSym)
|
||||||
case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
|
case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
|
||||||
return errors.Errorf("map %v: missing key size", mapSym)
|
return fmt.Errorf("map %v: missing key size", mapSym)
|
||||||
case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
|
case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
|
||||||
return errors.Errorf("map %v: missing value size", mapSym)
|
return fmt.Errorf("map %v: missing value size", mapSym)
|
||||||
case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
|
case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
|
||||||
return errors.Errorf("map %v: missing max entries", mapSym)
|
return fmt.Errorf("map %v: missing max entries", mapSym)
|
||||||
case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
|
case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
|
||||||
return errors.Errorf("map %v: missing flags", mapSym)
|
return fmt.Errorf("map %v: missing flags", mapSym)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil {
|
if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil {
|
||||||
return errors.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
|
return fmt.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
|
||||||
}
|
}
|
||||||
|
|
||||||
maps[mapSym] = &spec
|
maps[mapSym.Name] = &spec
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,85 +432,117 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.Sectio
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec, mapSections map[elf.SectionIndex]*elf.Section, spec *btf.Spec) error {
|
func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec, mapSections map[elf.SectionIndex]*elf.Section, spec *btf.Spec) error {
|
||||||
|
|
||||||
if spec == nil {
|
if spec == nil {
|
||||||
return errors.Errorf("missing BTF")
|
return fmt.Errorf("missing BTF")
|
||||||
}
|
}
|
||||||
|
|
||||||
for idx, sec := range mapSections {
|
for idx, sec := range mapSections {
|
||||||
syms := ec.symbolsPerSection[idx]
|
syms := ec.symbolsPerSection[idx]
|
||||||
if len(syms) == 0 {
|
if len(syms) == 0 {
|
||||||
return errors.Errorf("section %v: no symbols", sec.Name)
|
return fmt.Errorf("section %v: no symbols", sec.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sym := range syms {
|
for _, sym := range syms {
|
||||||
if maps[sym] != nil {
|
name := sym.Name
|
||||||
return errors.Errorf("section %v: map %v already exists", sec.Name, sym)
|
if maps[name] != nil {
|
||||||
|
return fmt.Errorf("section %v: map %v already exists", sec.Name, sym)
|
||||||
}
|
}
|
||||||
|
|
||||||
btfMap, err := spec.Map(sym)
|
mapSpec, err := mapSpecFromBTF(spec, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "map %v: can't get BTF", sym)
|
return fmt.Errorf("map %v: %w", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
spec, err := mapSpecFromBTF(btfMap)
|
maps[name] = mapSpec
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "map %v", sym)
|
|
||||||
}
|
|
||||||
|
|
||||||
maps[sym] = spec
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func mapSpecFromBTF(btfMap *btf.Map) (*MapSpec, error) {
|
func mapSpecFromBTF(spec *btf.Spec, name string) (*MapSpec, error) {
|
||||||
|
btfMap, btfMapMembers, err := spec.Map(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get BTF: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
keyType := btf.MapKey(btfMap)
|
||||||
|
size, err := btf.Sizeof(keyType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get size of BTF key: %w", err)
|
||||||
|
}
|
||||||
|
keySize := uint32(size)
|
||||||
|
|
||||||
|
valueType := btf.MapValue(btfMap)
|
||||||
|
size, err = btf.Sizeof(valueType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get size of BTF value: %w", err)
|
||||||
|
}
|
||||||
|
valueSize := uint32(size)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mapType, flags, maxEntries uint32
|
mapType, flags, maxEntries uint32
|
||||||
err error
|
|
||||||
)
|
)
|
||||||
for _, member := range btf.MapType(btfMap).Members {
|
for _, member := range btfMapMembers {
|
||||||
switch member.Name {
|
switch member.Name {
|
||||||
case "type":
|
case "type":
|
||||||
mapType, err = uintFromBTF(member.Type)
|
mapType, err = uintFromBTF(member.Type)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't get type")
|
return nil, fmt.Errorf("can't get type: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
case "map_flags":
|
case "map_flags":
|
||||||
flags, err = uintFromBTF(member.Type)
|
flags, err = uintFromBTF(member.Type)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't get BTF map flags")
|
return nil, fmt.Errorf("can't get BTF map flags: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
case "max_entries":
|
case "max_entries":
|
||||||
maxEntries, err = uintFromBTF(member.Type)
|
maxEntries, err = uintFromBTF(member.Type)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't get BTF map max entries")
|
return nil, fmt.Errorf("can't get BTF map max entries: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
case "key":
|
case "key_size":
|
||||||
case "value":
|
if _, isVoid := keyType.(*btf.Void); !isVoid {
|
||||||
|
return nil, errors.New("both key and key_size given")
|
||||||
|
}
|
||||||
|
|
||||||
|
keySize, err = uintFromBTF(member.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get BTF key size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "value_size":
|
||||||
|
if _, isVoid := valueType.(*btf.Void); !isVoid {
|
||||||
|
return nil, errors.New("both value and value_size given")
|
||||||
|
}
|
||||||
|
|
||||||
|
valueSize, err = uintFromBTF(member.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get BTF value size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "pinning":
|
||||||
|
pinning, err := uintFromBTF(member.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get pinning: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pinning != 0 {
|
||||||
|
return nil, fmt.Errorf("'pinning' attribute not supported: %w", ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "key", "value":
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("unrecognized field %s in BTF map definition", member.Name)
|
return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
keySize, err := btf.Sizeof(btf.MapKey(btfMap))
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "can't get size of BTF key")
|
|
||||||
}
|
|
||||||
|
|
||||||
valueSize, err := btf.Sizeof(btf.MapValue(btfMap))
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "can't get size of BTF value")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &MapSpec{
|
return &MapSpec{
|
||||||
Type: MapType(mapType),
|
Type: MapType(mapType),
|
||||||
KeySize: uint32(keySize),
|
KeySize: keySize,
|
||||||
ValueSize: uint32(valueSize),
|
ValueSize: valueSize,
|
||||||
MaxEntries: maxEntries,
|
MaxEntries: maxEntries,
|
||||||
Flags: flags,
|
Flags: flags,
|
||||||
BTF: btfMap,
|
BTF: btfMap,
|
||||||
@ -375,127 +554,163 @@ func mapSpecFromBTF(btfMap *btf.Map) (*MapSpec, error) {
|
|||||||
func uintFromBTF(typ btf.Type) (uint32, error) {
|
func uintFromBTF(typ btf.Type) (uint32, error) {
|
||||||
ptr, ok := typ.(*btf.Pointer)
|
ptr, ok := typ.(*btf.Pointer)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, errors.Errorf("not a pointer: %v", typ)
|
return 0, fmt.Errorf("not a pointer: %v", typ)
|
||||||
}
|
}
|
||||||
|
|
||||||
arr, ok := ptr.Target.(*btf.Array)
|
arr, ok := ptr.Target.(*btf.Array)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, errors.Errorf("not a pointer to array: %v", typ)
|
return 0, fmt.Errorf("not a pointer to array: %v", typ)
|
||||||
}
|
}
|
||||||
|
|
||||||
return arr.Nelems, nil
|
return arr.Nelems, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getProgType(v string) (ProgramType, AttachType) {
|
func (ec *elfCode) loadDataSections(maps map[string]*MapSpec, dataSections map[elf.SectionIndex]*elf.Section, spec *btf.Spec) error {
|
||||||
types := map[string]ProgramType{
|
if spec == nil {
|
||||||
// From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c#n3568
|
return errors.New("data sections require BTF, make sure all consts are marked as static")
|
||||||
"socket": SocketFilter,
|
|
||||||
"seccomp": SocketFilter,
|
|
||||||
"kprobe/": Kprobe,
|
|
||||||
"uprobe/": Kprobe,
|
|
||||||
"kretprobe/": Kprobe,
|
|
||||||
"uretprobe/": Kprobe,
|
|
||||||
"tracepoint/": TracePoint,
|
|
||||||
"raw_tracepoint/": RawTracepoint,
|
|
||||||
"xdp": XDP,
|
|
||||||
"perf_event": PerfEvent,
|
|
||||||
"lwt_in": LWTIn,
|
|
||||||
"lwt_out": LWTOut,
|
|
||||||
"lwt_xmit": LWTXmit,
|
|
||||||
"lwt_seg6local": LWTSeg6Local,
|
|
||||||
"sockops": SockOps,
|
|
||||||
"sk_skb": SkSKB,
|
|
||||||
"sk_msg": SkMsg,
|
|
||||||
"lirc_mode2": LircMode2,
|
|
||||||
"flow_dissector": FlowDissector,
|
|
||||||
|
|
||||||
"cgroup_skb/": CGroupSKB,
|
|
||||||
"cgroup/dev": CGroupDevice,
|
|
||||||
"cgroup/skb": CGroupSKB,
|
|
||||||
"cgroup/sock": CGroupSock,
|
|
||||||
"cgroup/post_bind": CGroupSock,
|
|
||||||
"cgroup/bind": CGroupSockAddr,
|
|
||||||
"cgroup/connect": CGroupSockAddr,
|
|
||||||
"cgroup/sendmsg": CGroupSockAddr,
|
|
||||||
"cgroup/recvmsg": CGroupSockAddr,
|
|
||||||
"cgroup/sysctl": CGroupSysctl,
|
|
||||||
"cgroup/getsockopt": CGroupSockopt,
|
|
||||||
"cgroup/setsockopt": CGroupSockopt,
|
|
||||||
"classifier": SchedCLS,
|
|
||||||
"action": SchedACT,
|
|
||||||
}
|
|
||||||
attachTypes := map[string]AttachType{
|
|
||||||
"cgroup_skb/ingress": AttachCGroupInetIngress,
|
|
||||||
"cgroup_skb/egress": AttachCGroupInetEgress,
|
|
||||||
"cgroup/sock": AttachCGroupInetSockCreate,
|
|
||||||
"cgroup/post_bind4": AttachCGroupInet4PostBind,
|
|
||||||
"cgroup/post_bind6": AttachCGroupInet6PostBind,
|
|
||||||
"cgroup/dev": AttachCGroupDevice,
|
|
||||||
"sockops": AttachCGroupSockOps,
|
|
||||||
"sk_skb/stream_parser": AttachSkSKBStreamParser,
|
|
||||||
"sk_skb/stream_verdict": AttachSkSKBStreamVerdict,
|
|
||||||
"sk_msg": AttachSkSKBStreamVerdict,
|
|
||||||
"lirc_mode2": AttachLircMode2,
|
|
||||||
"flow_dissector": AttachFlowDissector,
|
|
||||||
"cgroup/bind4": AttachCGroupInet4Bind,
|
|
||||||
"cgroup/bind6": AttachCGroupInet6Bind,
|
|
||||||
"cgroup/connect4": AttachCGroupInet4Connect,
|
|
||||||
"cgroup/connect6": AttachCGroupInet6Connect,
|
|
||||||
"cgroup/sendmsg4": AttachCGroupUDP4Sendmsg,
|
|
||||||
"cgroup/sendmsg6": AttachCGroupUDP6Sendmsg,
|
|
||||||
"cgroup/recvmsg4": AttachCGroupUDP4Recvmsg,
|
|
||||||
"cgroup/recvmsg6": AttachCGroupUDP6Recvmsg,
|
|
||||||
"cgroup/sysctl": AttachCGroupSysctl,
|
|
||||||
"cgroup/getsockopt": AttachCGroupGetsockopt,
|
|
||||||
"cgroup/setsockopt": AttachCGroupSetsockopt,
|
|
||||||
}
|
|
||||||
attachType := AttachNone
|
|
||||||
for k, t := range attachTypes {
|
|
||||||
if strings.HasPrefix(v, k) {
|
|
||||||
attachType = t
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, t := range types {
|
for _, sec := range dataSections {
|
||||||
if strings.HasPrefix(v, k) {
|
btfMap, err := spec.Datasec(sec.Name)
|
||||||
return t, attachType
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data, err := sec.Data()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if uint64(len(data)) > math.MaxUint32 {
|
||||||
|
return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
mapSpec := &MapSpec{
|
||||||
|
Name: SanitizeName(sec.Name, -1),
|
||||||
|
Type: Array,
|
||||||
|
KeySize: 4,
|
||||||
|
ValueSize: uint32(len(data)),
|
||||||
|
MaxEntries: 1,
|
||||||
|
Contents: []MapKV{{uint32(0), data}},
|
||||||
|
BTF: btfMap,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch sec.Name {
|
||||||
|
case ".rodata":
|
||||||
|
mapSpec.Flags = unix.BPF_F_RDONLY_PROG
|
||||||
|
mapSpec.Freeze = true
|
||||||
|
case ".bss":
|
||||||
|
// The kernel already zero-initializes the map
|
||||||
|
mapSpec.Contents = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
maps[sec.Name] = mapSpec
|
||||||
}
|
}
|
||||||
return UnspecifiedProgram, AttachNone
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ec *elfCode) loadRelocations(sec *elf.Section) (map[uint64]string, error) {
|
func getProgType(sectionName string) (ProgramType, AttachType, string) {
|
||||||
rels := make(map[uint64]string)
|
types := map[string]struct {
|
||||||
if sec == nil {
|
progType ProgramType
|
||||||
return rels, nil
|
attachType AttachType
|
||||||
|
}{
|
||||||
|
// From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c
|
||||||
|
"socket": {SocketFilter, AttachNone},
|
||||||
|
"seccomp": {SocketFilter, AttachNone},
|
||||||
|
"kprobe/": {Kprobe, AttachNone},
|
||||||
|
"uprobe/": {Kprobe, AttachNone},
|
||||||
|
"kretprobe/": {Kprobe, AttachNone},
|
||||||
|
"uretprobe/": {Kprobe, AttachNone},
|
||||||
|
"tracepoint/": {TracePoint, AttachNone},
|
||||||
|
"raw_tracepoint/": {RawTracepoint, AttachNone},
|
||||||
|
"xdp": {XDP, AttachNone},
|
||||||
|
"perf_event": {PerfEvent, AttachNone},
|
||||||
|
"lwt_in": {LWTIn, AttachNone},
|
||||||
|
"lwt_out": {LWTOut, AttachNone},
|
||||||
|
"lwt_xmit": {LWTXmit, AttachNone},
|
||||||
|
"lwt_seg6local": {LWTSeg6Local, AttachNone},
|
||||||
|
"sockops": {SockOps, AttachCGroupSockOps},
|
||||||
|
"sk_skb/stream_parser": {SkSKB, AttachSkSKBStreamParser},
|
||||||
|
"sk_skb/stream_verdict": {SkSKB, AttachSkSKBStreamParser},
|
||||||
|
"sk_msg": {SkMsg, AttachSkSKBStreamVerdict},
|
||||||
|
"lirc_mode2": {LircMode2, AttachLircMode2},
|
||||||
|
"flow_dissector": {FlowDissector, AttachFlowDissector},
|
||||||
|
"iter/": {Tracing, AttachTraceIter},
|
||||||
|
|
||||||
|
"cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress},
|
||||||
|
"cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress},
|
||||||
|
"cgroup/dev": {CGroupDevice, AttachCGroupDevice},
|
||||||
|
"cgroup/skb": {CGroupSKB, AttachNone},
|
||||||
|
"cgroup/sock": {CGroupSock, AttachCGroupInetSockCreate},
|
||||||
|
"cgroup/post_bind4": {CGroupSock, AttachCGroupInet4PostBind},
|
||||||
|
"cgroup/post_bind6": {CGroupSock, AttachCGroupInet6PostBind},
|
||||||
|
"cgroup/bind4": {CGroupSockAddr, AttachCGroupInet4Bind},
|
||||||
|
"cgroup/bind6": {CGroupSockAddr, AttachCGroupInet6Bind},
|
||||||
|
"cgroup/connect4": {CGroupSockAddr, AttachCGroupInet4Connect},
|
||||||
|
"cgroup/connect6": {CGroupSockAddr, AttachCGroupInet6Connect},
|
||||||
|
"cgroup/sendmsg4": {CGroupSockAddr, AttachCGroupUDP4Sendmsg},
|
||||||
|
"cgroup/sendmsg6": {CGroupSockAddr, AttachCGroupUDP6Sendmsg},
|
||||||
|
"cgroup/recvmsg4": {CGroupSockAddr, AttachCGroupUDP4Recvmsg},
|
||||||
|
"cgroup/recvmsg6": {CGroupSockAddr, AttachCGroupUDP6Recvmsg},
|
||||||
|
"cgroup/sysctl": {CGroupSysctl, AttachCGroupSysctl},
|
||||||
|
"cgroup/getsockopt": {CGroupSockopt, AttachCGroupGetsockopt},
|
||||||
|
"cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt},
|
||||||
|
"classifier": {SchedCLS, AttachNone},
|
||||||
|
"action": {SchedACT, AttachNone},
|
||||||
}
|
}
|
||||||
|
|
||||||
if sec.Entsize < 16 {
|
for prefix, t := range types {
|
||||||
return nil, errors.New("rels are less than 16 bytes")
|
if !strings.HasPrefix(sectionName, prefix) {
|
||||||
}
|
continue
|
||||||
|
|
||||||
r := sec.Open()
|
|
||||||
for off := uint64(0); off < sec.Size; off += sec.Entsize {
|
|
||||||
ent := io.LimitReader(r, int64(sec.Entsize))
|
|
||||||
|
|
||||||
var rel elf.Rel64
|
|
||||||
if binary.Read(ent, ec.ByteOrder, &rel) != nil {
|
|
||||||
return nil, errors.Errorf("can't parse relocation at offset %v", off)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
symNo := int(elf.R_SYM64(rel.Info) - 1)
|
if !strings.HasSuffix(prefix, "/") {
|
||||||
if symNo >= len(ec.symbols) {
|
return t.progType, t.attachType, ""
|
||||||
return nil, errors.Errorf("relocation at offset %d: symbol %v doesnt exist", off, symNo)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rels[rel.Off] = ec.symbols[symNo].Name
|
return t.progType, t.attachType, sectionName[len(prefix):]
|
||||||
}
|
}
|
||||||
return rels, nil
|
|
||||||
|
return UnspecifiedProgram, AttachNone, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func symbolsPerSection(symbols []elf.Symbol) map[elf.SectionIndex]map[uint64]string {
|
func (ec *elfCode) loadRelocations(sections map[elf.SectionIndex]*elf.Section) (map[elf.SectionIndex]map[uint64]elf.Symbol, map[elf.SectionIndex]bool, error) {
|
||||||
result := make(map[elf.SectionIndex]map[uint64]string)
|
result := make(map[elf.SectionIndex]map[uint64]elf.Symbol)
|
||||||
for i, sym := range symbols {
|
targets := make(map[elf.SectionIndex]bool)
|
||||||
|
for idx, sec := range sections {
|
||||||
|
rels := make(map[uint64]elf.Symbol)
|
||||||
|
|
||||||
|
if sec.Entsize < 16 {
|
||||||
|
return nil, nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := sec.Open()
|
||||||
|
for off := uint64(0); off < sec.Size; off += sec.Entsize {
|
||||||
|
ent := io.LimitReader(r, int64(sec.Entsize))
|
||||||
|
|
||||||
|
var rel elf.Rel64
|
||||||
|
if binary.Read(ent, ec.ByteOrder, &rel) != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't parse relocation at offset %v", off)
|
||||||
|
}
|
||||||
|
|
||||||
|
symNo := int(elf.R_SYM64(rel.Info) - 1)
|
||||||
|
if symNo >= len(ec.symbols) {
|
||||||
|
return nil, nil, fmt.Errorf("relocation at offset %d: symbol %v doesnt exist", off, symNo)
|
||||||
|
}
|
||||||
|
|
||||||
|
symbol := ec.symbols[symNo]
|
||||||
|
targets[symbol.Section] = true
|
||||||
|
rels[rel.Off] = ec.symbols[symNo]
|
||||||
|
}
|
||||||
|
|
||||||
|
result[idx] = rels
|
||||||
|
}
|
||||||
|
return result, targets, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func symbolsPerSection(symbols []elf.Symbol) map[elf.SectionIndex]map[uint64]elf.Symbol {
|
||||||
|
result := make(map[elf.SectionIndex]map[uint64]elf.Symbol)
|
||||||
|
for _, sym := range symbols {
|
||||||
switch elf.ST_TYPE(sym.Info) {
|
switch elf.ST_TYPE(sym.Info) {
|
||||||
case elf.STT_NOTYPE:
|
case elf.STT_NOTYPE:
|
||||||
// Older versions of LLVM doesn't tag
|
// Older versions of LLVM doesn't tag
|
||||||
@ -509,15 +724,19 @@ func symbolsPerSection(symbols []elf.Symbol) map[elf.SectionIndex]map[uint64]str
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sym.Section == elf.SHN_UNDEF || sym.Section >= elf.SHN_LORESERVE {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if sym.Name == "" {
|
if sym.Name == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
idx := sym.Section
|
idx := sym.Section
|
||||||
if _, ok := result[idx]; !ok {
|
if _, ok := result[idx]; !ok {
|
||||||
result[idx] = make(map[uint64]string)
|
result[idx] = make(map[uint64]elf.Symbol)
|
||||||
}
|
}
|
||||||
result[idx][sym.Value] = symbols[i].Name
|
result[idx][sym.Value] = sym
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
7
vendor/github.com/cilium/ebpf/go.mod
generated
vendored
7
vendor/github.com/cilium/ebpf/go.mod
generated
vendored
@ -1,8 +1,5 @@
|
|||||||
module github.com/cilium/ebpf
|
module github.com/cilium/ebpf
|
||||||
|
|
||||||
go 1.12
|
go 1.13
|
||||||
|
|
||||||
require (
|
require golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
|
||||||
github.com/pkg/errors v0.8.1
|
|
||||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7
|
|
||||||
)
|
|
||||||
|
432
vendor/github.com/cilium/ebpf/internal/btf/btf.go
generated
vendored
432
vendor/github.com/cilium/ebpf/internal/btf/btf.go
generated
vendored
@ -4,20 +4,29 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"debug/elf"
|
"debug/elf"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sync"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/internal"
|
"github.com/cilium/ebpf/internal"
|
||||||
"github.com/cilium/ebpf/internal/unix"
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const btfMagic = 0xeB9F
|
const btfMagic = 0xeB9F
|
||||||
|
|
||||||
|
// Errors returned by BTF functions.
|
||||||
|
var (
|
||||||
|
ErrNotSupported = internal.ErrNotSupported
|
||||||
|
ErrNotFound = errors.New("not found")
|
||||||
|
ErrNoExtendedInfo = errors.New("no extended info")
|
||||||
|
)
|
||||||
|
|
||||||
// Spec represents decoded BTF.
|
// Spec represents decoded BTF.
|
||||||
type Spec struct {
|
type Spec struct {
|
||||||
rawTypes []rawType
|
rawTypes []rawType
|
||||||
@ -25,6 +34,7 @@ type Spec struct {
|
|||||||
types map[string][]Type
|
types map[string][]Type
|
||||||
funcInfos map[string]extInfo
|
funcInfos map[string]extInfo
|
||||||
lineInfos map[string]extInfo
|
lineInfos map[string]extInfo
|
||||||
|
byteOrder binary.ByteOrder
|
||||||
}
|
}
|
||||||
|
|
||||||
type btfHeader struct {
|
type btfHeader struct {
|
||||||
@ -52,6 +62,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
|||||||
var (
|
var (
|
||||||
btfSection *elf.Section
|
btfSection *elf.Section
|
||||||
btfExtSection *elf.Section
|
btfExtSection *elf.Section
|
||||||
|
sectionSizes = make(map[string]uint32)
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, sec := range file.Sections {
|
for _, sec := range file.Sections {
|
||||||
@ -60,6 +71,16 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
|||||||
btfSection = sec
|
btfSection = sec
|
||||||
case ".BTF.ext":
|
case ".BTF.ext":
|
||||||
btfExtSection = sec
|
btfExtSection = sec
|
||||||
|
default:
|
||||||
|
if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if sec.Size > math.MaxUint32 {
|
||||||
|
return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
sectionSizes[sec.Name] = uint32(sec.Size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,74 +88,59 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
spec, err := parseBTF(btfSection.Open(), file.ByteOrder)
|
symbols, err := file.Symbols()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't read symbols: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
variableOffsets := make(map[variable]uint32)
|
||||||
|
for _, symbol := range symbols {
|
||||||
|
if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
|
||||||
|
// Ignore things like SHN_ABS
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
secName := file.Sections[symbol.Section].Name
|
||||||
|
if _, ok := sectionSizes[secName]; !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if symbol.Value > math.MaxUint32 {
|
||||||
|
return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
spec, err := loadNakedSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if btfExtSection != nil {
|
if btfExtSection == nil {
|
||||||
spec.funcInfos, spec.lineInfos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
|
return spec, nil
|
||||||
if err != nil {
|
}
|
||||||
return nil, errors.Wrap(err, "can't read ext info")
|
|
||||||
}
|
spec.funcInfos, spec.lineInfos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't read ext info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return spec, nil
|
return spec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) (*Spec, error) {
|
func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) {
|
||||||
rawBTF, err := ioutil.ReadAll(btf)
|
rawTypes, rawStrings, err := parseBTF(btf, bo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't read BTF")
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rd := bytes.NewReader(rawBTF)
|
err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets)
|
||||||
|
|
||||||
var header btfHeader
|
|
||||||
if err := binary.Read(rd, bo, &header); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "can't read header")
|
|
||||||
}
|
|
||||||
|
|
||||||
if header.Magic != btfMagic {
|
|
||||||
return nil, errors.Errorf("incorrect magic value %v", header.Magic)
|
|
||||||
}
|
|
||||||
|
|
||||||
if header.Version != 1 {
|
|
||||||
return nil, errors.Errorf("unexpected version %v", header.Version)
|
|
||||||
}
|
|
||||||
|
|
||||||
if header.Flags != 0 {
|
|
||||||
return nil, errors.Errorf("unsupported flags %v", header.Flags)
|
|
||||||
}
|
|
||||||
|
|
||||||
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
|
|
||||||
if remainder < 0 {
|
|
||||||
return nil, errors.New("header is too short")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "header padding")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "can't seek to start of string section")
|
|
||||||
}
|
|
||||||
|
|
||||||
strings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen)))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't read type names")
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil {
|
types, err := inflateRawTypes(rawTypes, rawStrings)
|
||||||
return nil, errors.Wrap(err, "can't seek to start of type section")
|
|
||||||
}
|
|
||||||
|
|
||||||
rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "can't read types")
|
|
||||||
}
|
|
||||||
|
|
||||||
types, err := inflateRawTypes(rawTypes, strings)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -142,13 +148,158 @@ func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) (*Spec, error) {
|
|||||||
return &Spec{
|
return &Spec{
|
||||||
rawTypes: rawTypes,
|
rawTypes: rawTypes,
|
||||||
types: types,
|
types: types,
|
||||||
strings: strings,
|
strings: rawStrings,
|
||||||
funcInfos: make(map[string]extInfo),
|
byteOrder: bo,
|
||||||
lineInfos: make(map[string]extInfo),
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Spec) marshal(bo binary.ByteOrder) ([]byte, error) {
|
var kernelBTF struct {
|
||||||
|
sync.Mutex
|
||||||
|
*Spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadKernelSpec returns the current kernel's BTF information.
|
||||||
|
//
|
||||||
|
// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns
|
||||||
|
// ErrNotSupported if BTF is not enabled.
|
||||||
|
func LoadKernelSpec() (*Spec, error) {
|
||||||
|
kernelBTF.Lock()
|
||||||
|
defer kernelBTF.Unlock()
|
||||||
|
|
||||||
|
if kernelBTF.Spec != nil {
|
||||||
|
return kernelBTF.Spec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
kernelBTF.Spec, err = loadKernelSpec()
|
||||||
|
return kernelBTF.Spec, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadKernelSpec() (*Spec, error) {
|
||||||
|
fh, err := os.Open("/sys/kernel/btf/vmlinux")
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, fmt.Errorf("can't open kernel BTF at /sys/kernel/btf/vmlinux: %w", ErrNotFound)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't read kernel BTF: %s", err)
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
return loadNakedSpec(fh, internal.NativeEndian, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) {
|
||||||
|
rawBTF, err := ioutil.ReadAll(btf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't read BTF: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rd := bytes.NewReader(rawBTF)
|
||||||
|
|
||||||
|
var header btfHeader
|
||||||
|
if err := binary.Read(rd, bo, &header); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't read header: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Magic != btfMagic {
|
||||||
|
return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Version != 1 {
|
||||||
|
return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Flags != 0 {
|
||||||
|
return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
|
||||||
|
if remainder < 0 {
|
||||||
|
return nil, nil, errors.New("header is too short")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("header padding: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't read type names: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't read types: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rawTypes, rawStrings, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type variable struct {
|
||||||
|
section string
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
|
||||||
|
for i, rawType := range rawTypes {
|
||||||
|
if rawType.Kind() != kindDatasec {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name, err := rawStrings.Lookup(rawType.NameOff)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if name == ".kconfig" || name == ".ksym" {
|
||||||
|
return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
size, ok := sectionSizes[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("data section %s: missing size", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawTypes[i].SizeType = size
|
||||||
|
|
||||||
|
secinfos := rawType.data.([]btfVarSecinfo)
|
||||||
|
for j, secInfo := range secinfos {
|
||||||
|
id := int(secInfo.Type - 1)
|
||||||
|
if id >= len(rawTypes) {
|
||||||
|
return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
|
||||||
|
}
|
||||||
|
|
||||||
|
varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
offset, ok := variableOffsets[variable{name, varName}]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
|
||||||
|
}
|
||||||
|
|
||||||
|
secinfos[j].Offset = offset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type marshalOpts struct {
|
||||||
|
ByteOrder binary.ByteOrder
|
||||||
|
StripFuncLinkage bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
|
||||||
var (
|
var (
|
||||||
buf bytes.Buffer
|
buf bytes.Buffer
|
||||||
header = new(btfHeader)
|
header = new(btfHeader)
|
||||||
@ -160,17 +311,14 @@ func (s *Spec) marshal(bo binary.ByteOrder) ([]byte, error) {
|
|||||||
_, _ = buf.Write(make([]byte, headerLen))
|
_, _ = buf.Write(make([]byte, headerLen))
|
||||||
|
|
||||||
// Write type section, just after the header.
|
// Write type section, just after the header.
|
||||||
for _, typ := range s.rawTypes {
|
for _, raw := range s.rawTypes {
|
||||||
if typ.Kind() == kindDatasec {
|
switch {
|
||||||
// Datasec requires patching with information from the ELF
|
case opts.StripFuncLinkage && raw.Kind() == kindFunc:
|
||||||
// file. We don't support this at the moment, so patch
|
raw.SetLinkage(linkageStatic)
|
||||||
// out any Datasec by turning it into a void*.
|
|
||||||
typ = rawType{}
|
|
||||||
typ.SetKind(kindPointer)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := typ.Marshal(&buf, bo); err != nil {
|
if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
|
||||||
return nil, errors.Wrap(err, "can't marshal BTF")
|
return nil, fmt.Errorf("can't marshal BTF: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -192,9 +340,9 @@ func (s *Spec) marshal(bo binary.ByteOrder) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
raw := buf.Bytes()
|
raw := buf.Bytes()
|
||||||
err := binary.Write(sliceWriter(raw[:headerLen]), bo, header)
|
err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't write header")
|
return nil, fmt.Errorf("can't write header: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return raw, nil
|
return raw, nil
|
||||||
@ -214,17 +362,22 @@ func (sw sliceWriter) Write(p []byte) (int, error) {
|
|||||||
//
|
//
|
||||||
// Length is the number of bytes in the raw BPF instruction stream.
|
// Length is the number of bytes in the raw BPF instruction stream.
|
||||||
//
|
//
|
||||||
// Returns an error if there is no BTF.
|
// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't
|
||||||
|
// contain extended BTF info.
|
||||||
func (s *Spec) Program(name string, length uint64) (*Program, error) {
|
func (s *Spec) Program(name string, length uint64) (*Program, error) {
|
||||||
if length == 0 {
|
if length == 0 {
|
||||||
return nil, errors.New("length musn't be zero")
|
return nil, errors.New("length musn't be zero")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.funcInfos == nil && s.lineInfos == nil {
|
||||||
|
return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo)
|
||||||
|
}
|
||||||
|
|
||||||
funcInfos, funcOK := s.funcInfos[name]
|
funcInfos, funcOK := s.funcInfos[name]
|
||||||
lineInfos, lineOK := s.lineInfos[name]
|
lineInfos, lineOK := s.lineInfos[name]
|
||||||
|
|
||||||
if !funcOK && !lineOK {
|
if !funcOK && !lineOK {
|
||||||
return nil, errors.Errorf("no BTF for program %s", name)
|
return nil, fmt.Errorf("no extended BTF info for section %s", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Program{s, length, funcInfos, lineInfos}, nil
|
return &Program{s, length, funcInfos, lineInfos}, nil
|
||||||
@ -233,15 +386,15 @@ func (s *Spec) Program(name string, length uint64) (*Program, error) {
|
|||||||
// Map finds the BTF for a map.
|
// Map finds the BTF for a map.
|
||||||
//
|
//
|
||||||
// Returns an error if there is no BTF for the given name.
|
// Returns an error if there is no BTF for the given name.
|
||||||
func (s *Spec) Map(name string) (*Map, error) {
|
func (s *Spec) Map(name string) (*Map, []Member, error) {
|
||||||
var mapVar Var
|
var mapVar Var
|
||||||
if err := s.FindType(name, &mapVar); err != nil {
|
if err := s.FindType(name, &mapVar); err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
mapStruct, ok := mapVar.Type.(*Struct)
|
mapStruct, ok := mapVar.Type.(*Struct)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.Errorf("expected struct, have %s", mapVar.Type)
|
return nil, nil, fmt.Errorf("expected struct, have %s", mapVar.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
var key, value Type
|
var key, value Type
|
||||||
@ -256,23 +409,32 @@ func (s *Spec) Map(name string) (*Map, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if key == nil {
|
if key == nil {
|
||||||
return nil, errors.Errorf("map %s: missing 'key' in type", name)
|
key = (*Void)(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
if value == nil {
|
if value == nil {
|
||||||
return nil, errors.Errorf("map %s: missing 'value' in type", name)
|
value = (*Void)(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Map{mapStruct, s, key, value}, nil
|
return &Map{s, key, value}, mapStruct.Members, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var errNotFound = errors.New("not found")
|
// Datasec returns the BTF required to create maps which represent data sections.
|
||||||
|
func (s *Spec) Datasec(name string) (*Map, error) {
|
||||||
|
var datasec Datasec
|
||||||
|
if err := s.FindType(name, &datasec); err != nil {
|
||||||
|
return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Map{s, &Void{}, &datasec}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// FindType searches for a type with a specific name.
|
// FindType searches for a type with a specific name.
|
||||||
//
|
//
|
||||||
// hint determines the type of the returned Type.
|
// hint determines the type of the returned Type.
|
||||||
//
|
//
|
||||||
// Returns an error if there is no or multiple matches.
|
// Returns an error wrapping ErrNotFound if no matching
|
||||||
|
// type exists in spec.
|
||||||
func (s *Spec) FindType(name string, typ Type) error {
|
func (s *Spec) FindType(name string, typ Type) error {
|
||||||
var (
|
var (
|
||||||
wanted = reflect.TypeOf(typ)
|
wanted = reflect.TypeOf(typ)
|
||||||
@ -285,14 +447,14 @@ func (s *Spec) FindType(name string, typ Type) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if candidate != nil {
|
if candidate != nil {
|
||||||
return errors.Errorf("type %s: multiple candidates for %T", name, typ)
|
return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
|
||||||
}
|
}
|
||||||
|
|
||||||
candidate = typ
|
candidate = typ
|
||||||
}
|
}
|
||||||
|
|
||||||
if candidate == nil {
|
if candidate == nil {
|
||||||
return errors.WithMessagef(errNotFound, "type %s", name)
|
return fmt.Errorf("type %s: %w", name, ErrNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
value := reflect.Indirect(reflect.ValueOf(copyType(candidate)))
|
value := reflect.Indirect(reflect.ValueOf(copyType(candidate)))
|
||||||
@ -307,16 +469,22 @@ type Handle struct {
|
|||||||
|
|
||||||
// NewHandle loads BTF into the kernel.
|
// NewHandle loads BTF into the kernel.
|
||||||
//
|
//
|
||||||
// Returns an error if BTF is not supported, which can
|
// Returns ErrNotSupported if BTF is not supported.
|
||||||
// be checked by IsNotSupported.
|
|
||||||
func NewHandle(spec *Spec) (*Handle, error) {
|
func NewHandle(spec *Spec) (*Handle, error) {
|
||||||
if err := haveBTF(); err != nil {
|
if err := haveBTF(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
btf, err := spec.marshal(internal.NativeEndian)
|
if spec.byteOrder != internal.NativeEndian {
|
||||||
|
return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
btf, err := spec.marshal(marshalOpts{
|
||||||
|
ByteOrder: internal.NativeEndian,
|
||||||
|
StripFuncLinkage: haveFuncLinkage() != nil,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't marshal BTF")
|
return nil, fmt.Errorf("can't marshal BTF: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if uint64(len(btf)) > math.MaxUint32 {
|
if uint64(len(btf)) > math.MaxUint32 {
|
||||||
@ -360,7 +528,6 @@ func (h *Handle) FD() int {
|
|||||||
|
|
||||||
// Map is the BTF for a map.
|
// Map is the BTF for a map.
|
||||||
type Map struct {
|
type Map struct {
|
||||||
definition *Struct
|
|
||||||
spec *Spec
|
spec *Spec
|
||||||
key, value Type
|
key, value Type
|
||||||
}
|
}
|
||||||
@ -371,12 +538,6 @@ func MapSpec(m *Map) *Spec {
|
|||||||
return m.spec
|
return m.spec
|
||||||
}
|
}
|
||||||
|
|
||||||
// MapType should be a method on Map, but is a free function
|
|
||||||
// to hide it from users of the ebpf package.
|
|
||||||
func MapType(m *Map) *Struct {
|
|
||||||
return m.definition
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapKey should be a method on Map, but is a free function
|
// MapKey should be a method on Map, but is a free function
|
||||||
// to hide it from users of the ebpf package.
|
// to hide it from users of the ebpf package.
|
||||||
func MapKey(m *Map) Type {
|
func MapKey(m *Map) Type {
|
||||||
@ -411,12 +572,12 @@ func ProgramSpec(s *Program) *Spec {
|
|||||||
func ProgramAppend(s, other *Program) error {
|
func ProgramAppend(s, other *Program) error {
|
||||||
funcInfos, err := s.funcInfos.append(other.funcInfos, s.length)
|
funcInfos, err := s.funcInfos.append(other.funcInfos, s.length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "func infos")
|
return fmt.Errorf("func infos: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lineInfos, err := s.lineInfos.append(other.lineInfos, s.length)
|
lineInfos, err := s.lineInfos.append(other.lineInfos, s.length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "line infos")
|
return fmt.Errorf("line infos: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.length += other.length
|
s.length += other.length
|
||||||
@ -451,13 +612,6 @@ func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
|
|||||||
return s.lineInfos.recordSize, bytes, nil
|
return s.lineInfos.recordSize, bytes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNotSupported returns true if the error indicates that the kernel
|
|
||||||
// doesn't support BTF.
|
|
||||||
func IsNotSupported(err error) bool {
|
|
||||||
ufe, ok := errors.Cause(err).(*internal.UnsupportedFeatureError)
|
|
||||||
return ok && ufe.Name == "BTF"
|
|
||||||
}
|
|
||||||
|
|
||||||
type bpfLoadBTFAttr struct {
|
type bpfLoadBTFAttr struct {
|
||||||
btf internal.Pointer
|
btf internal.Pointer
|
||||||
logBuf internal.Pointer
|
logBuf internal.Pointer
|
||||||
@ -477,26 +631,36 @@ func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) {
|
|||||||
return internal.NewFD(uint32(fd)), nil
|
return internal.NewFD(uint32(fd)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func minimalBTF(bo binary.ByteOrder) []byte {
|
func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
|
||||||
const minHeaderLength = 24
|
const minHeaderLength = 24
|
||||||
|
|
||||||
|
typesLen := uint32(binary.Size(types))
|
||||||
|
header := btfHeader{
|
||||||
|
Magic: btfMagic,
|
||||||
|
Version: 1,
|
||||||
|
HdrLen: minHeaderLength,
|
||||||
|
TypeOff: 0,
|
||||||
|
TypeLen: typesLen,
|
||||||
|
StringOff: typesLen,
|
||||||
|
StringLen: uint32(len(strings)),
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
_ = binary.Write(buf, bo, &header)
|
||||||
|
_ = binary.Write(buf, bo, types)
|
||||||
|
buf.Write(strings)
|
||||||
|
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
var haveBTF = internal.FeatureTest("BTF", "5.1", func() (bool, error) {
|
||||||
var (
|
var (
|
||||||
types struct {
|
types struct {
|
||||||
Integer btfType
|
Integer btfType
|
||||||
Var btfType
|
Var btfType
|
||||||
btfVar struct{ Linkage uint32 }
|
btfVar struct{ Linkage uint32 }
|
||||||
}
|
}
|
||||||
typLen = uint32(binary.Size(&types))
|
|
||||||
strings = []byte{0, 'a', 0}
|
strings = []byte{0, 'a', 0}
|
||||||
header = btfHeader{
|
|
||||||
Magic: btfMagic,
|
|
||||||
Version: 1,
|
|
||||||
HdrLen: minHeaderLength,
|
|
||||||
TypeOff: 0,
|
|
||||||
TypeLen: typLen,
|
|
||||||
StringOff: typLen,
|
|
||||||
StringLen: uint32(len(strings)),
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// We use a BTF_KIND_VAR here, to make sure that
|
// We use a BTF_KIND_VAR here, to make sure that
|
||||||
@ -507,16 +671,8 @@ func minimalBTF(bo binary.ByteOrder) []byte {
|
|||||||
types.Var.SetKind(kindVar)
|
types.Var.SetKind(kindVar)
|
||||||
types.Var.SizeType = 1
|
types.Var.SizeType = 1
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
btf := marshalBTF(&types, strings, internal.NativeEndian)
|
||||||
_ = binary.Write(buf, bo, &header)
|
|
||||||
_ = binary.Write(buf, bo, &types)
|
|
||||||
buf.Write(strings)
|
|
||||||
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
var haveBTF = internal.FeatureTest("BTF", "5.1", func() bool {
|
|
||||||
btf := minimalBTF(internal.NativeEndian)
|
|
||||||
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
|
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
|
||||||
btf: internal.NewSlicePointer(btf),
|
btf: internal.NewSlicePointer(btf),
|
||||||
btfSize: uint32(len(btf)),
|
btfSize: uint32(len(btf)),
|
||||||
@ -526,5 +682,35 @@ var haveBTF = internal.FeatureTest("BTF", "5.1", func() bool {
|
|||||||
}
|
}
|
||||||
// Check for EINVAL specifically, rather than err != nil since we
|
// Check for EINVAL specifically, rather than err != nil since we
|
||||||
// otherwise misdetect due to insufficient permissions.
|
// otherwise misdetect due to insufficient permissions.
|
||||||
return errors.Cause(err) != unix.EINVAL
|
return !errors.Is(err, unix.EINVAL), nil
|
||||||
|
})
|
||||||
|
|
||||||
|
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() (bool, error) {
|
||||||
|
var (
|
||||||
|
types struct {
|
||||||
|
FuncProto btfType
|
||||||
|
Func btfType
|
||||||
|
}
|
||||||
|
strings = []byte{0, 'a', 0}
|
||||||
|
)
|
||||||
|
|
||||||
|
types.FuncProto.SetKind(kindFuncProto)
|
||||||
|
types.Func.SetKind(kindFunc)
|
||||||
|
types.Func.SizeType = 1 // aka FuncProto
|
||||||
|
types.Func.NameOff = 1
|
||||||
|
types.Func.SetLinkage(linkageGlobal)
|
||||||
|
|
||||||
|
btf := marshalBTF(&types, strings, internal.NativeEndian)
|
||||||
|
|
||||||
|
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
|
||||||
|
btf: internal.NewSlicePointer(btf),
|
||||||
|
btfSize: uint32(len(btf)),
|
||||||
|
})
|
||||||
|
if err == nil {
|
||||||
|
fd.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for EINVAL specifically, rather than err != nil since we
|
||||||
|
// otherwise misdetect due to insufficient permissions.
|
||||||
|
return !errors.Is(err, unix.EINVAL), nil
|
||||||
})
|
})
|
||||||
|
101
vendor/github.com/cilium/ebpf/internal/btf/btf_types.go
generated
vendored
101
vendor/github.com/cilium/ebpf/internal/btf/btf_types.go
generated
vendored
@ -2,9 +2,8 @@ package btf
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// btfKind describes a Type.
|
// btfKind describes a Type.
|
||||||
@ -32,6 +31,14 @@ const (
|
|||||||
kindDatasec
|
kindDatasec
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type btfFuncLinkage uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
linkageStatic btfFuncLinkage = iota
|
||||||
|
linkageGlobal
|
||||||
|
linkageExtern
|
||||||
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
btfTypeKindShift = 24
|
btfTypeKindShift = 24
|
||||||
btfTypeKindLen = 4
|
btfTypeKindLen = 4
|
||||||
@ -43,7 +50,7 @@ const (
|
|||||||
type btfType struct {
|
type btfType struct {
|
||||||
NameOff uint32
|
NameOff uint32
|
||||||
/* "info" bits arrangement
|
/* "info" bits arrangement
|
||||||
* bits 0-15: vlen (e.g. # of struct's members)
|
* bits 0-15: vlen (e.g. # of struct's members), linkage
|
||||||
* bits 16-23: unused
|
* bits 16-23: unused
|
||||||
* bits 24-27: kind (e.g. int, ptr, array...etc)
|
* bits 24-27: kind (e.g. int, ptr, array...etc)
|
||||||
* bits 28-30: unused
|
* bits 28-30: unused
|
||||||
@ -61,6 +68,45 @@ type btfType struct {
|
|||||||
SizeType uint32
|
SizeType uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (k btfKind) String() string {
|
||||||
|
switch k {
|
||||||
|
case kindUnknown:
|
||||||
|
return "Unknown"
|
||||||
|
case kindInt:
|
||||||
|
return "Integer"
|
||||||
|
case kindPointer:
|
||||||
|
return "Pointer"
|
||||||
|
case kindArray:
|
||||||
|
return "Array"
|
||||||
|
case kindStruct:
|
||||||
|
return "Struct"
|
||||||
|
case kindUnion:
|
||||||
|
return "Union"
|
||||||
|
case kindEnum:
|
||||||
|
return "Enumeration"
|
||||||
|
case kindForward:
|
||||||
|
return "Forward"
|
||||||
|
case kindTypedef:
|
||||||
|
return "Typedef"
|
||||||
|
case kindVolatile:
|
||||||
|
return "Volatile"
|
||||||
|
case kindConst:
|
||||||
|
return "Const"
|
||||||
|
case kindRestrict:
|
||||||
|
return "Restrict"
|
||||||
|
case kindFunc:
|
||||||
|
return "Function"
|
||||||
|
case kindFuncProto:
|
||||||
|
return "Function Proto"
|
||||||
|
case kindVar:
|
||||||
|
return "Variable"
|
||||||
|
case kindDatasec:
|
||||||
|
return "Section"
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("Unknown (%d)", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func mask(len uint32) uint32 {
|
func mask(len uint32) uint32 {
|
||||||
return (1 << len) - 1
|
return (1 << len) - 1
|
||||||
}
|
}
|
||||||
@ -90,6 +136,14 @@ func (bt *btfType) SetVlen(vlen int) {
|
|||||||
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
|
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) Linkage() btfFuncLinkage {
|
||||||
|
return btfFuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bt *btfType) SetLinkage(linkage btfFuncLinkage) {
|
||||||
|
bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
|
||||||
|
}
|
||||||
|
|
||||||
func (bt *btfType) Type() TypeID {
|
func (bt *btfType) Type() TypeID {
|
||||||
// TODO: Panic here if wrong kind?
|
// TODO: Panic here if wrong kind?
|
||||||
return TypeID(bt.SizeType)
|
return TypeID(bt.SizeType)
|
||||||
@ -129,6 +183,26 @@ type btfMember struct {
|
|||||||
Offset uint32
|
Offset uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type btfVarSecinfo struct {
|
||||||
|
Type TypeID
|
||||||
|
Offset uint32
|
||||||
|
Size uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfVariable struct {
|
||||||
|
Linkage uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfEnum struct {
|
||||||
|
NameOff uint32
|
||||||
|
Val int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type btfParam struct {
|
||||||
|
NameOff uint32
|
||||||
|
Type TypeID
|
||||||
|
}
|
||||||
|
|
||||||
func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
||||||
var (
|
var (
|
||||||
header btfType
|
header btfType
|
||||||
@ -139,14 +213,13 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
|||||||
if err := binary.Read(r, bo, &header); err == io.EOF {
|
if err := binary.Read(r, bo, &header); err == io.EOF {
|
||||||
return types, nil
|
return types, nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, errors.Wrapf(err, "can't read type info for id %v", id)
|
return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var data interface{}
|
var data interface{}
|
||||||
switch header.Kind() {
|
switch header.Kind() {
|
||||||
case kindInt:
|
case kindInt:
|
||||||
// sizeof(uint32)
|
data = new(uint32)
|
||||||
data = make([]byte, 4)
|
|
||||||
case kindPointer:
|
case kindPointer:
|
||||||
case kindArray:
|
case kindArray:
|
||||||
data = new(btfArray)
|
data = new(btfArray)
|
||||||
@ -155,8 +228,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
|||||||
case kindUnion:
|
case kindUnion:
|
||||||
data = make([]btfMember, header.Vlen())
|
data = make([]btfMember, header.Vlen())
|
||||||
case kindEnum:
|
case kindEnum:
|
||||||
// sizeof(struct btf_enum)
|
data = make([]btfEnum, header.Vlen())
|
||||||
data = make([]byte, header.Vlen()*4*2)
|
|
||||||
case kindForward:
|
case kindForward:
|
||||||
case kindTypedef:
|
case kindTypedef:
|
||||||
case kindVolatile:
|
case kindVolatile:
|
||||||
@ -164,16 +236,13 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
|||||||
case kindRestrict:
|
case kindRestrict:
|
||||||
case kindFunc:
|
case kindFunc:
|
||||||
case kindFuncProto:
|
case kindFuncProto:
|
||||||
// sizeof(struct btf_param)
|
data = make([]btfParam, header.Vlen())
|
||||||
data = make([]byte, header.Vlen()*4*2)
|
|
||||||
case kindVar:
|
case kindVar:
|
||||||
// sizeof(struct btf_variable)
|
data = new(btfVariable)
|
||||||
data = make([]byte, 4)
|
|
||||||
case kindDatasec:
|
case kindDatasec:
|
||||||
// sizeof(struct btf_var_secinfo)
|
data = make([]btfVarSecinfo, header.Vlen())
|
||||||
data = make([]byte, header.Vlen()*4*3)
|
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("type id %v: unknown kind: %v", id, header.Kind())
|
return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
|
||||||
}
|
}
|
||||||
|
|
||||||
if data == nil {
|
if data == nil {
|
||||||
@ -182,7 +251,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := binary.Read(r, bo, data); err != nil {
|
if err := binary.Read(r, bo, data); err != nil {
|
||||||
return nil, errors.Wrapf(err, "type id %d: kind %v: can't read %T", id, header.Kind(), data)
|
return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
types = append(types, rawType{header, data})
|
types = append(types, rawType{header, data})
|
||||||
|
44
vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
generated
vendored
44
vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
generated
vendored
@ -3,13 +3,13 @@ package btf
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/asm"
|
"github.com/cilium/ebpf/asm"
|
||||||
"github.com/cilium/ebpf/internal"
|
"github.com/cilium/ebpf/internal"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type btfExtHeader struct {
|
type btfExtHeader struct {
|
||||||
@ -25,23 +25,21 @@ type btfExtHeader struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, err error) {
|
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, err error) {
|
||||||
const expectedMagic = 0xeB9F
|
|
||||||
|
|
||||||
var header btfExtHeader
|
var header btfExtHeader
|
||||||
if err := binary.Read(r, bo, &header); err != nil {
|
if err := binary.Read(r, bo, &header); err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "can't read header")
|
return nil, nil, fmt.Errorf("can't read header: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if header.Magic != expectedMagic {
|
if header.Magic != btfMagic {
|
||||||
return nil, nil, errors.Errorf("incorrect magic value %v", header.Magic)
|
return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
|
||||||
}
|
}
|
||||||
|
|
||||||
if header.Version != 1 {
|
if header.Version != 1 {
|
||||||
return nil, nil, errors.Errorf("unexpected version %v", header.Version)
|
return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
if header.Flags != 0 {
|
if header.Flags != 0 {
|
||||||
return nil, nil, errors.Errorf("unsupported flags %v", header.Flags)
|
return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
|
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
|
||||||
@ -53,25 +51,25 @@ func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (f
|
|||||||
// .BTF ext header. We need to ignore non-null values.
|
// .BTF ext header. We need to ignore non-null values.
|
||||||
_, err = io.CopyN(ioutil.Discard, r, remainder)
|
_, err = io.CopyN(ioutil.Discard, r, remainder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "header padding")
|
return nil, nil, fmt.Errorf("header padding: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
|
if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "can't seek to function info section")
|
return nil, nil, fmt.Errorf("can't seek to function info section: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
funcInfo, err = parseExtInfo(io.LimitReader(r, int64(header.FuncInfoLen)), bo, strings)
|
funcInfo, err = parseExtInfo(io.LimitReader(r, int64(header.FuncInfoLen)), bo, strings)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "function info")
|
return nil, nil, fmt.Errorf("function info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
|
if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "can't seek to line info section")
|
return nil, nil, fmt.Errorf("can't seek to line info section: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lineInfo, err = parseExtInfo(io.LimitReader(r, int64(header.LineInfoLen)), bo, strings)
|
lineInfo, err = parseExtInfo(io.LimitReader(r, int64(header.LineInfoLen)), bo, strings)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "line info")
|
return nil, nil, fmt.Errorf("line info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return funcInfo, lineInfo, nil
|
return funcInfo, lineInfo, nil
|
||||||
@ -94,7 +92,7 @@ type extInfo struct {
|
|||||||
|
|
||||||
func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) {
|
func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) {
|
||||||
if other.recordSize != ei.recordSize {
|
if other.recordSize != ei.recordSize {
|
||||||
return extInfo{}, errors.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
|
return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
records := make([]extInfoRecord, 0, len(ei.records)+len(other.records))
|
records := make([]extInfoRecord, 0, len(ei.records)+len(other.records))
|
||||||
@ -119,7 +117,7 @@ func (ei extInfo) MarshalBinary() ([]byte, error) {
|
|||||||
// while the ELF tracks it in bytes.
|
// while the ELF tracks it in bytes.
|
||||||
insnOff := uint32(info.InsnOff / asm.InstructionSize)
|
insnOff := uint32(info.InsnOff / asm.InstructionSize)
|
||||||
if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil {
|
if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil {
|
||||||
return nil, errors.Wrap(err, "can't write instruction offset")
|
return nil, fmt.Errorf("can't write instruction offset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
buf.Write(info.Opaque)
|
buf.Write(info.Opaque)
|
||||||
@ -131,7 +129,7 @@ func (ei extInfo) MarshalBinary() ([]byte, error) {
|
|||||||
func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
|
func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
|
||||||
var recordSize uint32
|
var recordSize uint32
|
||||||
if err := binary.Read(r, bo, &recordSize); err != nil {
|
if err := binary.Read(r, bo, &recordSize); err != nil {
|
||||||
return nil, errors.Wrap(err, "can't read record size")
|
return nil, fmt.Errorf("can't read record size: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if recordSize < 4 {
|
if recordSize < 4 {
|
||||||
@ -145,32 +143,32 @@ func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[st
|
|||||||
if err := binary.Read(r, bo, &infoHeader); err == io.EOF {
|
if err := binary.Read(r, bo, &infoHeader); err == io.EOF {
|
||||||
return result, nil
|
return result, nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't read ext info header")
|
return nil, fmt.Errorf("can't read ext info header: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
secName, err := strings.Lookup(infoHeader.SecNameOff)
|
secName, err := strings.Lookup(infoHeader.SecNameOff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't get section name")
|
return nil, fmt.Errorf("can't get section name: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if infoHeader.NumInfo == 0 {
|
if infoHeader.NumInfo == 0 {
|
||||||
return nil, errors.Errorf("section %s has invalid number of records", secName)
|
return nil, fmt.Errorf("section %s has invalid number of records", secName)
|
||||||
}
|
}
|
||||||
|
|
||||||
var records []extInfoRecord
|
var records []extInfoRecord
|
||||||
for i := uint32(0); i < infoHeader.NumInfo; i++ {
|
for i := uint32(0); i < infoHeader.NumInfo; i++ {
|
||||||
var byteOff uint32
|
var byteOff uint32
|
||||||
if err := binary.Read(r, bo, &byteOff); err != nil {
|
if err := binary.Read(r, bo, &byteOff); err != nil {
|
||||||
return nil, errors.Wrapf(err, "section %v: can't read extended info offset", secName)
|
return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := make([]byte, int(recordSize-4))
|
buf := make([]byte, int(recordSize-4))
|
||||||
if _, err := io.ReadFull(r, buf); err != nil {
|
if _, err := io.ReadFull(r, buf); err != nil {
|
||||||
return nil, errors.Wrapf(err, "section %v: can't read record", secName)
|
return nil, fmt.Errorf("section %v: can't read record: %v", secName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if byteOff%asm.InstructionSize != 0 {
|
if byteOff%asm.InstructionSize != 0 {
|
||||||
return nil, errors.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
|
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
|
||||||
}
|
}
|
||||||
|
|
||||||
records = append(records, extInfoRecord{uint64(byteOff), buf})
|
records = append(records, extInfoRecord{uint64(byteOff), buf})
|
||||||
|
14
vendor/github.com/cilium/ebpf/internal/btf/strings.go
generated
vendored
14
vendor/github.com/cilium/ebpf/internal/btf/strings.go
generated
vendored
@ -2,10 +2,10 @@ package btf
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type stringTable []byte
|
type stringTable []byte
|
||||||
@ -13,7 +13,7 @@ type stringTable []byte
|
|||||||
func readStringTable(r io.Reader) (stringTable, error) {
|
func readStringTable(r io.Reader) (stringTable, error) {
|
||||||
contents, err := ioutil.ReadAll(r)
|
contents, err := ioutil.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't read string table")
|
return nil, fmt.Errorf("can't read string table: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(contents) < 1 {
|
if len(contents) < 1 {
|
||||||
@ -33,22 +33,22 @@ func readStringTable(r io.Reader) (stringTable, error) {
|
|||||||
|
|
||||||
func (st stringTable) Lookup(offset uint32) (string, error) {
|
func (st stringTable) Lookup(offset uint32) (string, error) {
|
||||||
if int64(offset) > int64(^uint(0)>>1) {
|
if int64(offset) > int64(^uint(0)>>1) {
|
||||||
return "", errors.Errorf("offset %d overflows int", offset)
|
return "", fmt.Errorf("offset %d overflows int", offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
pos := int(offset)
|
pos := int(offset)
|
||||||
if pos >= len(st) {
|
if pos >= len(st) {
|
||||||
return "", errors.Errorf("offset %d is out of bounds", offset)
|
return "", fmt.Errorf("offset %d is out of bounds", offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
if pos > 0 && st[pos-1] != '\x00' {
|
if pos > 0 && st[pos-1] != '\x00' {
|
||||||
return "", errors.Errorf("offset %d isn't start of a string", offset)
|
return "", fmt.Errorf("offset %d isn't start of a string", offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
str := st[pos:]
|
str := st[pos:]
|
||||||
end := bytes.IndexByte(str, '\x00')
|
end := bytes.IndexByte(str, '\x00')
|
||||||
if end == -1 {
|
if end == -1 {
|
||||||
return "", errors.Errorf("offset %d isn't null terminated", offset)
|
return "", fmt.Errorf("offset %d isn't null terminated", offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
return string(str[:end]), nil
|
return string(str[:end]), nil
|
||||||
|
117
vendor/github.com/cilium/ebpf/internal/btf/types.go
generated
vendored
117
vendor/github.com/cilium/ebpf/internal/btf/types.go
generated
vendored
@ -1,9 +1,9 @@
|
|||||||
package btf
|
package btf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxTypeDepth = 32
|
const maxTypeDepth = 32
|
||||||
@ -38,9 +38,10 @@ func (n Name) name() string {
|
|||||||
// Void is the unit type of BTF.
|
// Void is the unit type of BTF.
|
||||||
type Void struct{}
|
type Void struct{}
|
||||||
|
|
||||||
func (v Void) ID() TypeID { return 0 }
|
func (v *Void) ID() TypeID { return 0 }
|
||||||
func (v Void) copy() Type { return Void{} }
|
func (v *Void) size() uint32 { return 0 }
|
||||||
func (v Void) walk(*copyStack) {}
|
func (v *Void) copy() Type { return (*Void)(nil) }
|
||||||
|
func (v *Void) walk(*copyStack) {}
|
||||||
|
|
||||||
// Int is an integer of a given length.
|
// Int is an integer of a given length.
|
||||||
type Int struct {
|
type Int struct {
|
||||||
@ -103,7 +104,8 @@ func (s *Struct) walk(cs *copyStack) {
|
|||||||
|
|
||||||
func (s *Struct) copy() Type {
|
func (s *Struct) copy() Type {
|
||||||
cpy := *s
|
cpy := *s
|
||||||
cpy.Members = copyMembers(cpy.Members)
|
cpy.Members = make([]Member, len(s.Members))
|
||||||
|
copy(cpy.Members, s.Members)
|
||||||
return &cpy
|
return &cpy
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,7 +128,8 @@ func (u *Union) walk(cs *copyStack) {
|
|||||||
|
|
||||||
func (u *Union) copy() Type {
|
func (u *Union) copy() Type {
|
||||||
cpy := *u
|
cpy := *u
|
||||||
cpy.Members = copyMembers(cpy.Members)
|
cpy.Members = make([]Member, len(u.Members))
|
||||||
|
copy(cpy.Members, u.Members)
|
||||||
return &cpy
|
return &cpy
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,14 +142,6 @@ type Member struct {
|
|||||||
Offset uint32
|
Offset uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyMembers(in []Member) []Member {
|
|
||||||
cpy := make([]Member, 0, len(in))
|
|
||||||
for _, member := range in {
|
|
||||||
cpy = append(cpy, member)
|
|
||||||
}
|
|
||||||
return cpy
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enum lists possible values.
|
// Enum lists possible values.
|
||||||
type Enum struct {
|
type Enum struct {
|
||||||
TypeID
|
TypeID
|
||||||
@ -265,15 +260,31 @@ type Datasec struct {
|
|||||||
TypeID
|
TypeID
|
||||||
Name
|
Name
|
||||||
Size uint32
|
Size uint32
|
||||||
|
Vars []VarSecinfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ds *Datasec) size() uint32 { return ds.Size }
|
||||||
|
|
||||||
|
func (ds *Datasec) walk(cs *copyStack) {
|
||||||
|
for i := range ds.Vars {
|
||||||
|
cs.push(&ds.Vars[i].Type)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ds *Datasec) size() uint32 { return ds.Size }
|
|
||||||
func (ds *Datasec) walk(*copyStack) {}
|
|
||||||
func (ds *Datasec) copy() Type {
|
func (ds *Datasec) copy() Type {
|
||||||
cpy := *ds
|
cpy := *ds
|
||||||
|
cpy.Vars = make([]VarSecinfo, len(ds.Vars))
|
||||||
|
copy(cpy.Vars, ds.Vars)
|
||||||
return &cpy
|
return &cpy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VarSecinfo describes variable in a Datasec
|
||||||
|
type VarSecinfo struct {
|
||||||
|
Type Type
|
||||||
|
Offset uint32
|
||||||
|
Size uint32
|
||||||
|
}
|
||||||
|
|
||||||
type sizer interface {
|
type sizer interface {
|
||||||
size() uint32
|
size() uint32
|
||||||
}
|
}
|
||||||
@ -326,7 +337,7 @@ func Sizeof(typ Type) (int, error) {
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return 0, errors.Errorf("unrecognized type %T", typ)
|
return 0, fmt.Errorf("unrecognized type %T", typ)
|
||||||
}
|
}
|
||||||
|
|
||||||
if n > 0 && elem > math.MaxInt64/n {
|
if n > 0 && elem > math.MaxInt64/n {
|
||||||
@ -405,12 +416,17 @@ var _ namer = Name("")
|
|||||||
// compilation units, multiple types may share the same name. A Type may form a
|
// compilation units, multiple types may share the same name. A Type may form a
|
||||||
// cyclic graph by pointing at itself.
|
// cyclic graph by pointing at itself.
|
||||||
func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map[string][]Type, err error) {
|
func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map[string][]Type, err error) {
|
||||||
type fixup struct {
|
type fixupDef struct {
|
||||||
id TypeID
|
id TypeID
|
||||||
typ *Type
|
expectedKind btfKind
|
||||||
|
typ *Type
|
||||||
|
}
|
||||||
|
|
||||||
|
var fixups []fixupDef
|
||||||
|
fixup := func(id TypeID, expectedKind btfKind, typ *Type) {
|
||||||
|
fixups = append(fixups, fixupDef{id, expectedKind, typ})
|
||||||
}
|
}
|
||||||
|
|
||||||
var fixups []fixup
|
|
||||||
convertMembers := func(raw []btfMember) ([]Member, error) {
|
convertMembers := func(raw []btfMember) ([]Member, error) {
|
||||||
// NB: The fixup below relies on pre-allocating this array to
|
// NB: The fixup below relies on pre-allocating this array to
|
||||||
// work, since otherwise append might re-allocate members.
|
// work, since otherwise append might re-allocate members.
|
||||||
@ -418,7 +434,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||||||
for i, btfMember := range raw {
|
for i, btfMember := range raw {
|
||||||
name, err := rawStrings.LookupName(btfMember.NameOff)
|
name, err := rawStrings.LookupName(btfMember.NameOff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "can't get name for member %d", i)
|
return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
|
||||||
}
|
}
|
||||||
members = append(members, Member{
|
members = append(members, Member{
|
||||||
Name: name,
|
Name: name,
|
||||||
@ -426,13 +442,13 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
for i := range members {
|
for i := range members {
|
||||||
fixups = append(fixups, fixup{raw[i].Type, &members[i].Type})
|
fixup(raw[i].Type, kindUnknown, &members[i].Type)
|
||||||
}
|
}
|
||||||
return members, nil
|
return members, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
types := make([]Type, 0, len(rawTypes))
|
types := make([]Type, 0, len(rawTypes))
|
||||||
types = append(types, Void{})
|
types = append(types, (*Void)(nil))
|
||||||
namedTypes = make(map[string][]Type)
|
namedTypes = make(map[string][]Type)
|
||||||
|
|
||||||
for i, raw := range rawTypes {
|
for i, raw := range rawTypes {
|
||||||
@ -445,7 +461,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||||||
|
|
||||||
name, err := rawStrings.LookupName(raw.NameOff)
|
name, err := rawStrings.LookupName(raw.NameOff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "can't get name for type id %d", id)
|
return nil, fmt.Errorf("can't get name for type id %d: %w", id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch raw.Kind() {
|
switch raw.Kind() {
|
||||||
@ -454,7 +470,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||||||
|
|
||||||
case kindPointer:
|
case kindPointer:
|
||||||
ptr := &Pointer{id, nil}
|
ptr := &Pointer{id, nil}
|
||||||
fixups = append(fixups, fixup{raw.Type(), &ptr.Target})
|
fixup(raw.Type(), kindUnknown, &ptr.Target)
|
||||||
typ = ptr
|
typ = ptr
|
||||||
|
|
||||||
case kindArray:
|
case kindArray:
|
||||||
@ -463,20 +479,20 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||||||
// IndexType is unused according to btf.rst.
|
// IndexType is unused according to btf.rst.
|
||||||
// Don't make it available right now.
|
// Don't make it available right now.
|
||||||
arr := &Array{id, nil, btfArr.Nelems}
|
arr := &Array{id, nil, btfArr.Nelems}
|
||||||
fixups = append(fixups, fixup{btfArr.Type, &arr.Type})
|
fixup(btfArr.Type, kindUnknown, &arr.Type)
|
||||||
typ = arr
|
typ = arr
|
||||||
|
|
||||||
case kindStruct:
|
case kindStruct:
|
||||||
members, err := convertMembers(raw.data.([]btfMember))
|
members, err := convertMembers(raw.data.([]btfMember))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "struct %s (id %d)", name, id)
|
return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
|
||||||
}
|
}
|
||||||
typ = &Struct{id, name, raw.Size(), members}
|
typ = &Struct{id, name, raw.Size(), members}
|
||||||
|
|
||||||
case kindUnion:
|
case kindUnion:
|
||||||
members, err := convertMembers(raw.data.([]btfMember))
|
members, err := convertMembers(raw.data.([]btfMember))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "union %s (id %d)", name, id)
|
return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
|
||||||
}
|
}
|
||||||
typ = &Union{id, name, raw.Size(), members}
|
typ = &Union{id, name, raw.Size(), members}
|
||||||
|
|
||||||
@ -488,44 +504,55 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||||||
|
|
||||||
case kindTypedef:
|
case kindTypedef:
|
||||||
typedef := &Typedef{id, name, nil}
|
typedef := &Typedef{id, name, nil}
|
||||||
fixups = append(fixups, fixup{raw.Type(), &typedef.Type})
|
fixup(raw.Type(), kindUnknown, &typedef.Type)
|
||||||
typ = typedef
|
typ = typedef
|
||||||
|
|
||||||
case kindVolatile:
|
case kindVolatile:
|
||||||
volatile := &Volatile{id, nil}
|
volatile := &Volatile{id, nil}
|
||||||
fixups = append(fixups, fixup{raw.Type(), &volatile.Type})
|
fixup(raw.Type(), kindUnknown, &volatile.Type)
|
||||||
typ = volatile
|
typ = volatile
|
||||||
|
|
||||||
case kindConst:
|
case kindConst:
|
||||||
cnst := &Const{id, nil}
|
cnst := &Const{id, nil}
|
||||||
fixups = append(fixups, fixup{raw.Type(), &cnst.Type})
|
fixup(raw.Type(), kindUnknown, &cnst.Type)
|
||||||
typ = cnst
|
typ = cnst
|
||||||
|
|
||||||
case kindRestrict:
|
case kindRestrict:
|
||||||
restrict := &Restrict{id, nil}
|
restrict := &Restrict{id, nil}
|
||||||
fixups = append(fixups, fixup{raw.Type(), &restrict.Type})
|
fixup(raw.Type(), kindUnknown, &restrict.Type)
|
||||||
typ = restrict
|
typ = restrict
|
||||||
|
|
||||||
case kindFunc:
|
case kindFunc:
|
||||||
fn := &Func{id, name, nil}
|
fn := &Func{id, name, nil}
|
||||||
fixups = append(fixups, fixup{raw.Type(), &fn.Type})
|
fixup(raw.Type(), kindFuncProto, &fn.Type)
|
||||||
typ = fn
|
typ = fn
|
||||||
|
|
||||||
case kindFuncProto:
|
case kindFuncProto:
|
||||||
fp := &FuncProto{id, nil}
|
fp := &FuncProto{id, nil}
|
||||||
fixups = append(fixups, fixup{raw.Type(), &fp.Return})
|
fixup(raw.Type(), kindUnknown, &fp.Return)
|
||||||
typ = fp
|
typ = fp
|
||||||
|
|
||||||
case kindVar:
|
case kindVar:
|
||||||
v := &Var{id, name, nil}
|
v := &Var{id, name, nil}
|
||||||
fixups = append(fixups, fixup{raw.Type(), &v.Type})
|
fixup(raw.Type(), kindUnknown, &v.Type)
|
||||||
typ = v
|
typ = v
|
||||||
|
|
||||||
case kindDatasec:
|
case kindDatasec:
|
||||||
typ = &Datasec{id, name, raw.SizeType}
|
btfVars := raw.data.([]btfVarSecinfo)
|
||||||
|
vars := make([]VarSecinfo, 0, len(btfVars))
|
||||||
|
for _, btfVar := range btfVars {
|
||||||
|
vars = append(vars, VarSecinfo{
|
||||||
|
Offset: btfVar.Offset,
|
||||||
|
Size: btfVar.Size,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for i := range vars {
|
||||||
|
fixup(btfVars[i].Type, kindVar, &vars[i].Type)
|
||||||
|
}
|
||||||
|
typ = &Datasec{id, name, raw.SizeType, vars}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
|
return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
|
||||||
}
|
}
|
||||||
|
|
||||||
types = append(types, typ)
|
types = append(types, typ)
|
||||||
@ -540,7 +567,17 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
|
|||||||
for _, fixup := range fixups {
|
for _, fixup := range fixups {
|
||||||
i := int(fixup.id)
|
i := int(fixup.id)
|
||||||
if i >= len(types) {
|
if i >= len(types) {
|
||||||
return nil, errors.Errorf("reference to invalid type id: %d", fixup.id)
|
return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default void (id 0) to unknown
|
||||||
|
rawKind := kindUnknown
|
||||||
|
if i > 0 {
|
||||||
|
rawKind = rawTypes[i-1].Kind()
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected {
|
||||||
|
return nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
|
||||||
}
|
}
|
||||||
|
|
||||||
*fixup.typ = types[i]
|
*fixup.typ = types[i]
|
||||||
|
60
vendor/github.com/cilium/ebpf/internal/cpu.go
generated
vendored
60
vendor/github.com/cilium/ebpf/internal/cpu.go
generated
vendored
@ -2,10 +2,9 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var sysCPU struct {
|
var sysCPU struct {
|
||||||
@ -18,45 +17,44 @@ var sysCPU struct {
|
|||||||
// Logical CPU numbers must be of the form 0-n
|
// Logical CPU numbers must be of the form 0-n
|
||||||
func PossibleCPUs() (int, error) {
|
func PossibleCPUs() (int, error) {
|
||||||
sysCPU.once.Do(func() {
|
sysCPU.once.Do(func() {
|
||||||
sysCPU.num, sysCPU.err = parseCPUs("/sys/devices/system/cpu/possible")
|
sysCPU.num, sysCPU.err = parseCPUsFromFile("/sys/devices/system/cpu/possible")
|
||||||
})
|
})
|
||||||
|
|
||||||
return sysCPU.num, sysCPU.err
|
return sysCPU.num, sysCPU.err
|
||||||
}
|
}
|
||||||
|
|
||||||
var onlineCPU struct {
|
func parseCPUsFromFile(path string) (int, error) {
|
||||||
once sync.Once
|
spec, err := ioutil.ReadFile(path)
|
||||||
err error
|
|
||||||
num int
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnlineCPUs returns the number of currently online CPUs
|
|
||||||
// Logical CPU numbers must be of the form 0-n
|
|
||||||
func OnlineCPUs() (int, error) {
|
|
||||||
onlineCPU.once.Do(func() {
|
|
||||||
onlineCPU.num, onlineCPU.err = parseCPUs("/sys/devices/system/cpu/online")
|
|
||||||
})
|
|
||||||
|
|
||||||
return onlineCPU.num, onlineCPU.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseCPUs parses the number of cpus from sysfs,
|
|
||||||
// in the format of "/sys/devices/system/cpu/{possible,online,..}.
|
|
||||||
// Logical CPU numbers must be of the form 0-n
|
|
||||||
func parseCPUs(path string) (int, error) {
|
|
||||||
file, err := os.Open(path)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
|
||||||
|
n, err := parseCPUs(string(spec))
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("can't parse %s: %v", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseCPUs parses the number of cpus from a string produced
|
||||||
|
// by bitmap_list_string() in the Linux kernel.
|
||||||
|
// Multiple ranges are rejected, since they can't be unified
|
||||||
|
// into a single number.
|
||||||
|
// This is the format of /sys/devices/system/cpu/possible, it
|
||||||
|
// is not suitable for /sys/devices/system/cpu/online, etc.
|
||||||
|
func parseCPUs(spec string) (int, error) {
|
||||||
|
if strings.Trim(spec, "\n") == "0" {
|
||||||
|
return 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
var low, high int
|
var low, high int
|
||||||
n, _ := fmt.Fscanf(file, "%d-%d", &low, &high)
|
n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high)
|
||||||
if n < 1 || low != 0 {
|
if n != 2 || err != nil {
|
||||||
return 0, errors.Wrapf(err, "%s has unknown format", path)
|
return 0, fmt.Errorf("invalid format: %s", spec)
|
||||||
}
|
}
|
||||||
if n == 1 {
|
if low != 0 {
|
||||||
high = low
|
return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec)
|
||||||
}
|
}
|
||||||
|
|
||||||
// cpus is 0 indexed
|
// cpus is 0 indexed
|
||||||
|
15
vendor/github.com/cilium/ebpf/internal/errors.go
generated
vendored
15
vendor/github.com/cilium/ebpf/internal/errors.go
generated
vendored
@ -2,11 +2,11 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/internal/unix"
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrorWithLog returns an error that includes logs from the
|
// ErrorWithLog returns an error that includes logs from the
|
||||||
@ -16,19 +16,20 @@ import (
|
|||||||
// the log. It is used to check for truncation of the output.
|
// the log. It is used to check for truncation of the output.
|
||||||
func ErrorWithLog(err error, log []byte, logErr error) error {
|
func ErrorWithLog(err error, log []byte, logErr error) error {
|
||||||
logStr := strings.Trim(CString(log), "\t\r\n ")
|
logStr := strings.Trim(CString(log), "\t\r\n ")
|
||||||
if errors.Cause(logErr) == unix.ENOSPC {
|
if errors.Is(logErr, unix.ENOSPC) {
|
||||||
logStr += " (truncated...)"
|
logStr += " (truncated...)"
|
||||||
}
|
}
|
||||||
|
|
||||||
return &loadError{err, logStr}
|
return &VerifierError{err, logStr}
|
||||||
}
|
}
|
||||||
|
|
||||||
type loadError struct {
|
// VerifierError includes information from the eBPF verifier.
|
||||||
|
type VerifierError struct {
|
||||||
cause error
|
cause error
|
||||||
log string
|
log string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (le *loadError) Error() string {
|
func (le *VerifierError) Error() string {
|
||||||
if le.log == "" {
|
if le.log == "" {
|
||||||
return le.cause.Error()
|
return le.cause.Error()
|
||||||
}
|
}
|
||||||
@ -36,10 +37,6 @@ func (le *loadError) Error() string {
|
|||||||
return fmt.Sprintf("%s: %s", le.cause, le.log)
|
return fmt.Sprintf("%s: %s", le.cause, le.log)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (le *loadError) Cause() error {
|
|
||||||
return le.cause
|
|
||||||
}
|
|
||||||
|
|
||||||
// CString turns a NUL / zero terminated byte buffer into a string.
|
// CString turns a NUL / zero terminated byte buffer into a string.
|
||||||
func CString(in []byte) string {
|
func CString(in []byte) string {
|
||||||
inLen := bytes.IndexByte(in, 0)
|
inLen := bytes.IndexByte(in, 0)
|
||||||
|
12
vendor/github.com/cilium/ebpf/internal/fd.go
generated
vendored
12
vendor/github.com/cilium/ebpf/internal/fd.go
generated
vendored
@ -1,12 +1,13 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/internal/unix"
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrClosedFd = errors.New("use of closed file descriptor")
|
var ErrClosedFd = errors.New("use of closed file descriptor")
|
||||||
@ -56,8 +57,13 @@ func (fd *FD) Dup() (*FD, error) {
|
|||||||
|
|
||||||
dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0)
|
dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't dup fd")
|
return nil, fmt.Errorf("can't dup fd: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewFD(uint32(dup)), nil
|
return NewFD(uint32(dup)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fd *FD) File(name string) *os.File {
|
||||||
|
fd.Forget()
|
||||||
|
return os.NewFile(uintptr(fd.raw), name)
|
||||||
|
}
|
||||||
|
73
vendor/github.com/cilium/ebpf/internal/feature.go
generated
vendored
73
vendor/github.com/cilium/ebpf/internal/feature.go
generated
vendored
@ -1,12 +1,14 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrNotSupported indicates that a feature is not supported by the current kernel.
|
||||||
|
var ErrNotSupported = errors.New("not supported")
|
||||||
|
|
||||||
// UnsupportedFeatureError is returned by FeatureTest() functions.
|
// UnsupportedFeatureError is returned by FeatureTest() functions.
|
||||||
type UnsupportedFeatureError struct {
|
type UnsupportedFeatureError struct {
|
||||||
// The minimum Linux mainline version required for this feature.
|
// The minimum Linux mainline version required for this feature.
|
||||||
@ -21,33 +23,68 @@ func (ufe *UnsupportedFeatureError) Error() string {
|
|||||||
return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion)
|
return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Is indicates that UnsupportedFeatureError is ErrNotSupported.
|
||||||
|
func (ufe *UnsupportedFeatureError) Is(target error) bool {
|
||||||
|
return target == ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
type featureTest struct {
|
||||||
|
sync.Mutex
|
||||||
|
successful bool
|
||||||
|
result error
|
||||||
|
}
|
||||||
|
|
||||||
|
// FeatureTestFn is used to determine whether the kernel supports
|
||||||
|
// a certain feature.
|
||||||
|
//
|
||||||
|
// The return values have the following semantics:
|
||||||
|
//
|
||||||
|
// err != nil: the test couldn't be executed
|
||||||
|
// err == nil && available: the feature is available
|
||||||
|
// err == nil && !available: the feature isn't available
|
||||||
|
type FeatureTestFn func() (available bool, err error)
|
||||||
|
|
||||||
// FeatureTest wraps a function so that it is run at most once.
|
// FeatureTest wraps a function so that it is run at most once.
|
||||||
//
|
//
|
||||||
// name should identify the tested feature, while version must be in the
|
// name should identify the tested feature, while version must be in the
|
||||||
// form Major.Minor[.Patch].
|
// form Major.Minor[.Patch].
|
||||||
//
|
//
|
||||||
// Returns a descriptive UnsupportedFeatureError if the feature is not available.
|
// Returns an error wrapping ErrNotSupported if the feature is not supported.
|
||||||
func FeatureTest(name, version string, fn func() bool) func() error {
|
func FeatureTest(name, version string, fn FeatureTestFn) func() error {
|
||||||
v, err := NewVersion(version)
|
v, err := NewVersion(version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return func() error { return err }
|
return func() error { return err }
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
ft := new(featureTest)
|
||||||
once sync.Once
|
|
||||||
result error
|
|
||||||
)
|
|
||||||
|
|
||||||
return func() error {
|
return func() error {
|
||||||
once.Do(func() {
|
ft.Lock()
|
||||||
if !fn() {
|
defer ft.Unlock()
|
||||||
result = &UnsupportedFeatureError{
|
|
||||||
MinimumVersion: v,
|
if ft.successful {
|
||||||
Name: name,
|
return ft.result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
available, err := fn()
|
||||||
|
if errors.Is(err, ErrNotSupported) {
|
||||||
|
// The feature test aborted because a dependent feature
|
||||||
|
// is missing, which we should cache.
|
||||||
|
available = false
|
||||||
|
} else if err != nil {
|
||||||
|
// We couldn't execute the feature test to a point
|
||||||
|
// where it could make a determination.
|
||||||
|
// Don't cache the result, just return it.
|
||||||
|
return fmt.Errorf("can't detect support for %s: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ft.successful = true
|
||||||
|
if !available {
|
||||||
|
ft.result = &UnsupportedFeatureError{
|
||||||
|
MinimumVersion: v,
|
||||||
|
Name: name,
|
||||||
}
|
}
|
||||||
})
|
}
|
||||||
return result
|
return ft.result
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,7 +98,7 @@ func NewVersion(ver string) (Version, error) {
|
|||||||
var major, minor, patch uint16
|
var major, minor, patch uint16
|
||||||
n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
|
n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
|
||||||
if n < 2 {
|
if n < 2 {
|
||||||
return Version{}, errors.Errorf("invalid version: %s", ver)
|
return Version{}, fmt.Errorf("invalid version: %s", ver)
|
||||||
}
|
}
|
||||||
return Version{major, minor, patch}, nil
|
return Version{major, minor, patch}, nil
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/cilium/ebpf/internal/io.go
generated
vendored
2
vendor/github.com/cilium/ebpf/internal/io.go
generated
vendored
@ -1,6 +1,6 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import "github.com/pkg/errors"
|
import "errors"
|
||||||
|
|
||||||
// DiscardZeroes makes sure that all written bytes are zero
|
// DiscardZeroes makes sure that all written bytes are zero
|
||||||
// before discarding them.
|
// before discarding them.
|
||||||
|
6
vendor/github.com/cilium/ebpf/internal/ptr.go
generated
vendored
6
vendor/github.com/cilium/ebpf/internal/ptr.go
generated
vendored
@ -22,5 +22,9 @@ func NewStringPointer(str string) Pointer {
|
|||||||
return Pointer{}
|
return Pointer{}
|
||||||
}
|
}
|
||||||
|
|
||||||
return Pointer{ptr: unsafe.Pointer(&[]byte(str)[0])}
|
// The kernel expects strings to be zero terminated
|
||||||
|
buf := make([]byte, len(str)+1)
|
||||||
|
copy(buf, str)
|
||||||
|
|
||||||
|
return Pointer{ptr: unsafe.Pointer(&buf[0])}
|
||||||
}
|
}
|
||||||
|
118
vendor/github.com/cilium/ebpf/internal/syscall.go
generated
vendored
118
vendor/github.com/cilium/ebpf/internal/syscall.go
generated
vendored
@ -1,16 +1,61 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/internal/unix"
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//go:generate stringer -output syscall_string.go -type=BPFCmd
|
||||||
|
|
||||||
|
// BPFCmd identifies a subcommand of the bpf syscall.
|
||||||
|
type BPFCmd int
|
||||||
|
|
||||||
|
// Well known BPF commands.
|
||||||
|
const (
|
||||||
|
BPF_MAP_CREATE BPFCmd = iota
|
||||||
|
BPF_MAP_LOOKUP_ELEM
|
||||||
|
BPF_MAP_UPDATE_ELEM
|
||||||
|
BPF_MAP_DELETE_ELEM
|
||||||
|
BPF_MAP_GET_NEXT_KEY
|
||||||
|
BPF_PROG_LOAD
|
||||||
|
BPF_OBJ_PIN
|
||||||
|
BPF_OBJ_GET
|
||||||
|
BPF_PROG_ATTACH
|
||||||
|
BPF_PROG_DETACH
|
||||||
|
BPF_PROG_TEST_RUN
|
||||||
|
BPF_PROG_GET_NEXT_ID
|
||||||
|
BPF_MAP_GET_NEXT_ID
|
||||||
|
BPF_PROG_GET_FD_BY_ID
|
||||||
|
BPF_MAP_GET_FD_BY_ID
|
||||||
|
BPF_OBJ_GET_INFO_BY_FD
|
||||||
|
BPF_PROG_QUERY
|
||||||
|
BPF_RAW_TRACEPOINT_OPEN
|
||||||
|
BPF_BTF_LOAD
|
||||||
|
BPF_BTF_GET_FD_BY_ID
|
||||||
|
BPF_TASK_FD_QUERY
|
||||||
|
BPF_MAP_LOOKUP_AND_DELETE_ELEM
|
||||||
|
BPF_MAP_FREEZE
|
||||||
|
BPF_BTF_GET_NEXT_ID
|
||||||
|
BPF_MAP_LOOKUP_BATCH
|
||||||
|
BPF_MAP_LOOKUP_AND_DELETE_BATCH
|
||||||
|
BPF_MAP_UPDATE_BATCH
|
||||||
|
BPF_MAP_DELETE_BATCH
|
||||||
|
BPF_LINK_CREATE
|
||||||
|
BPF_LINK_UPDATE
|
||||||
|
BPF_LINK_GET_FD_BY_ID
|
||||||
|
BPF_LINK_GET_NEXT_ID
|
||||||
|
BPF_ENABLE_STATS
|
||||||
|
BPF_ITER_CREATE
|
||||||
|
)
|
||||||
|
|
||||||
// BPF wraps SYS_BPF.
|
// BPF wraps SYS_BPF.
|
||||||
//
|
//
|
||||||
// Any pointers contained in attr must use the Pointer type from this package.
|
// Any pointers contained in attr must use the Pointer type from this package.
|
||||||
func BPF(cmd int, attr unsafe.Pointer, size uintptr) (uintptr, error) {
|
func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
|
||||||
r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
|
r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
|
||||||
runtime.KeepAlive(attr)
|
runtime.KeepAlive(attr)
|
||||||
|
|
||||||
@ -21,3 +66,74 @@ func BPF(cmd int, attr unsafe.Pointer, size uintptr) (uintptr, error) {
|
|||||||
|
|
||||||
return r1, err
|
return r1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BPFProgAttachAttr struct {
|
||||||
|
TargetFd uint32
|
||||||
|
AttachBpfFd uint32
|
||||||
|
AttachType uint32
|
||||||
|
AttachFlags uint32
|
||||||
|
ReplaceBpfFd uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func BPFProgAttach(attr *BPFProgAttachAttr) error {
|
||||||
|
_, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type BPFProgDetachAttr struct {
|
||||||
|
TargetFd uint32
|
||||||
|
AttachBpfFd uint32
|
||||||
|
AttachType uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func BPFProgDetach(attr *BPFProgDetachAttr) error {
|
||||||
|
_, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfObjAttr struct {
|
||||||
|
fileName Pointer
|
||||||
|
fd uint32
|
||||||
|
fileFlags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
const bpfFSType = 0xcafe4a11
|
||||||
|
|
||||||
|
// BPFObjPin wraps BPF_OBJ_PIN.
|
||||||
|
func BPFObjPin(fileName string, fd *FD) error {
|
||||||
|
dirName := filepath.Dir(fileName)
|
||||||
|
var statfs unix.Statfs_t
|
||||||
|
if err := unix.Statfs(dirName, &statfs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if uint64(statfs.Type) != bpfFSType {
|
||||||
|
return fmt.Errorf("%s is not on a bpf filesystem", fileName)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := fd.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfObjAttr{
|
||||||
|
fileName: NewStringPointer(fileName),
|
||||||
|
fd: value,
|
||||||
|
}
|
||||||
|
_, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pin object %s: %w", fileName, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BPFObjGet wraps BPF_OBJ_GET.
|
||||||
|
func BPFObjGet(fileName string) (*FD, error) {
|
||||||
|
attr := bpfObjAttr{
|
||||||
|
fileName: NewStringPointer(fileName),
|
||||||
|
}
|
||||||
|
ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get object %s: %w", fileName, err)
|
||||||
|
}
|
||||||
|
return NewFD(uint32(ptr)), nil
|
||||||
|
}
|
||||||
|
56
vendor/github.com/cilium/ebpf/internal/syscall_string.go
generated
vendored
Normal file
56
vendor/github.com/cilium/ebpf/internal/syscall_string.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[BPF_MAP_CREATE-0]
|
||||||
|
_ = x[BPF_MAP_LOOKUP_ELEM-1]
|
||||||
|
_ = x[BPF_MAP_UPDATE_ELEM-2]
|
||||||
|
_ = x[BPF_MAP_DELETE_ELEM-3]
|
||||||
|
_ = x[BPF_MAP_GET_NEXT_KEY-4]
|
||||||
|
_ = x[BPF_PROG_LOAD-5]
|
||||||
|
_ = x[BPF_OBJ_PIN-6]
|
||||||
|
_ = x[BPF_OBJ_GET-7]
|
||||||
|
_ = x[BPF_PROG_ATTACH-8]
|
||||||
|
_ = x[BPF_PROG_DETACH-9]
|
||||||
|
_ = x[BPF_PROG_TEST_RUN-10]
|
||||||
|
_ = x[BPF_PROG_GET_NEXT_ID-11]
|
||||||
|
_ = x[BPF_MAP_GET_NEXT_ID-12]
|
||||||
|
_ = x[BPF_PROG_GET_FD_BY_ID-13]
|
||||||
|
_ = x[BPF_MAP_GET_FD_BY_ID-14]
|
||||||
|
_ = x[BPF_OBJ_GET_INFO_BY_FD-15]
|
||||||
|
_ = x[BPF_PROG_QUERY-16]
|
||||||
|
_ = x[BPF_RAW_TRACEPOINT_OPEN-17]
|
||||||
|
_ = x[BPF_BTF_LOAD-18]
|
||||||
|
_ = x[BPF_BTF_GET_FD_BY_ID-19]
|
||||||
|
_ = x[BPF_TASK_FD_QUERY-20]
|
||||||
|
_ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21]
|
||||||
|
_ = x[BPF_MAP_FREEZE-22]
|
||||||
|
_ = x[BPF_BTF_GET_NEXT_ID-23]
|
||||||
|
_ = x[BPF_MAP_LOOKUP_BATCH-24]
|
||||||
|
_ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25]
|
||||||
|
_ = x[BPF_MAP_UPDATE_BATCH-26]
|
||||||
|
_ = x[BPF_MAP_DELETE_BATCH-27]
|
||||||
|
_ = x[BPF_LINK_CREATE-28]
|
||||||
|
_ = x[BPF_LINK_UPDATE-29]
|
||||||
|
_ = x[BPF_LINK_GET_FD_BY_ID-30]
|
||||||
|
_ = x[BPF_LINK_GET_NEXT_ID-31]
|
||||||
|
_ = x[BPF_ENABLE_STATS-32]
|
||||||
|
_ = x[BPF_ITER_CREATE-33]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE"
|
||||||
|
|
||||||
|
var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617}
|
||||||
|
|
||||||
|
func (i BPFCmd) String() string {
|
||||||
|
if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) {
|
||||||
|
return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]]
|
||||||
|
}
|
23
vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
generated
vendored
23
vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
generated
vendored
@ -10,10 +10,17 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
ENOENT = linux.ENOENT
|
ENOENT = linux.ENOENT
|
||||||
|
EEXIST = linux.EEXIST
|
||||||
EAGAIN = linux.EAGAIN
|
EAGAIN = linux.EAGAIN
|
||||||
ENOSPC = linux.ENOSPC
|
ENOSPC = linux.ENOSPC
|
||||||
EINVAL = linux.EINVAL
|
EINVAL = linux.EINVAL
|
||||||
EPOLLIN = linux.EPOLLIN
|
EPOLLIN = linux.EPOLLIN
|
||||||
|
EINTR = linux.EINTR
|
||||||
|
EPERM = linux.EPERM
|
||||||
|
ESRCH = linux.ESRCH
|
||||||
|
ENODEV = linux.ENODEV
|
||||||
|
BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
|
||||||
|
BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
|
||||||
BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
|
BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
|
||||||
BPF_TAG_SIZE = linux.BPF_TAG_SIZE
|
BPF_TAG_SIZE = linux.BPF_TAG_SIZE
|
||||||
SYS_BPF = linux.SYS_BPF
|
SYS_BPF = linux.SYS_BPF
|
||||||
@ -31,6 +38,7 @@ const (
|
|||||||
PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW
|
PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW
|
||||||
PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
|
PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
|
||||||
RLIM_INFINITY = linux.RLIM_INFINITY
|
RLIM_INFINITY = linux.RLIM_INFINITY
|
||||||
|
RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK
|
||||||
)
|
)
|
||||||
|
|
||||||
// Statfs_t is a wrapper
|
// Statfs_t is a wrapper
|
||||||
@ -125,3 +133,18 @@ type Utsname = linux.Utsname
|
|||||||
func Uname(buf *Utsname) (err error) {
|
func Uname(buf *Utsname) (err error) {
|
||||||
return linux.Uname(buf)
|
return linux.Uname(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Getpid is a wrapper
|
||||||
|
func Getpid() int {
|
||||||
|
return linux.Getpid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gettid is a wrapper
|
||||||
|
func Gettid() int {
|
||||||
|
return linux.Gettid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tgkill is a wrapper
|
||||||
|
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
|
||||||
|
return linux.Tgkill(tgid, tid, sig)
|
||||||
|
}
|
||||||
|
26
vendor/github.com/cilium/ebpf/internal/unix/types_other.go
generated
vendored
26
vendor/github.com/cilium/ebpf/internal/unix/types_other.go
generated
vendored
@ -12,9 +12,16 @@ var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
ENOENT = syscall.ENOENT
|
ENOENT = syscall.ENOENT
|
||||||
|
EEXIST = syscall.EEXIST
|
||||||
EAGAIN = syscall.EAGAIN
|
EAGAIN = syscall.EAGAIN
|
||||||
ENOSPC = syscall.ENOSPC
|
ENOSPC = syscall.ENOSPC
|
||||||
EINVAL = syscall.EINVAL
|
EINVAL = syscall.EINVAL
|
||||||
|
EINTR = syscall.EINTR
|
||||||
|
EPERM = syscall.EPERM
|
||||||
|
ESRCH = syscall.ESRCH
|
||||||
|
ENODEV = syscall.ENODEV
|
||||||
|
BPF_F_RDONLY_PROG = 0
|
||||||
|
BPF_F_WRONLY_PROG = 0
|
||||||
BPF_OBJ_NAME_LEN = 0x10
|
BPF_OBJ_NAME_LEN = 0x10
|
||||||
BPF_TAG_SIZE = 0x8
|
BPF_TAG_SIZE = 0x8
|
||||||
SYS_BPF = 321
|
SYS_BPF = 321
|
||||||
@ -32,6 +39,8 @@ const (
|
|||||||
PerfBitWatermark = 0x4000
|
PerfBitWatermark = 0x4000
|
||||||
PERF_SAMPLE_RAW = 0x400
|
PERF_SAMPLE_RAW = 0x400
|
||||||
PERF_FLAG_FD_CLOEXEC = 0x8
|
PERF_FLAG_FD_CLOEXEC = 0x8
|
||||||
|
RLIM_INFINITY = 0x7fffffffffffffff
|
||||||
|
RLIMIT_MEMLOCK = 8
|
||||||
)
|
)
|
||||||
|
|
||||||
// Statfs_t is a wrapper
|
// Statfs_t is a wrapper
|
||||||
@ -184,10 +193,25 @@ func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int
|
|||||||
|
|
||||||
// Utsname is a wrapper
|
// Utsname is a wrapper
|
||||||
type Utsname struct {
|
type Utsname struct {
|
||||||
Release [65]byte
|
Release [65]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// Uname is a wrapper
|
// Uname is a wrapper
|
||||||
func Uname(buf *Utsname) (err error) {
|
func Uname(buf *Utsname) (err error) {
|
||||||
return errNonLinux
|
return errNonLinux
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Getpid is a wrapper
|
||||||
|
func Getpid() int {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gettid is a wrapper
|
||||||
|
func Gettid() int {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tgkill is a wrapper
|
||||||
|
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
|
||||||
|
return errNonLinux
|
||||||
|
}
|
||||||
|
62
vendor/github.com/cilium/ebpf/linker.go
generated
vendored
62
vendor/github.com/cilium/ebpf/linker.go
generated
vendored
@ -1,43 +1,60 @@
|
|||||||
package ebpf
|
package ebpf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/asm"
|
"github.com/cilium/ebpf/asm"
|
||||||
"github.com/cilium/ebpf/internal/btf"
|
"github.com/cilium/ebpf/internal/btf"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// link resolves bpf-to-bpf calls.
|
// link resolves bpf-to-bpf calls.
|
||||||
//
|
//
|
||||||
// Each library may contain multiple functions / labels, and is only linked
|
// Each library may contain multiple functions / labels, and is only linked
|
||||||
// if the program being edited references one of these functions.
|
// if prog references one of these functions.
|
||||||
//
|
//
|
||||||
// Libraries must not require linking themselves.
|
// Libraries also linked.
|
||||||
func link(prog *ProgramSpec, libs []*ProgramSpec) error {
|
func link(prog *ProgramSpec, libs []*ProgramSpec) error {
|
||||||
for _, lib := range libs {
|
var (
|
||||||
insns, err := linkSection(prog.Instructions, lib.Instructions)
|
linked = make(map[*ProgramSpec]bool)
|
||||||
if err != nil {
|
pending = []asm.Instructions{prog.Instructions}
|
||||||
return errors.Wrapf(err, "linking %s", lib.Name)
|
insns asm.Instructions
|
||||||
}
|
)
|
||||||
|
for len(pending) > 0 {
|
||||||
|
insns, pending = pending[0], pending[1:]
|
||||||
|
for _, lib := range libs {
|
||||||
|
if linked[lib] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if len(insns) == len(prog.Instructions) {
|
needed, err := needSection(insns, lib.Instructions)
|
||||||
continue
|
if err != nil {
|
||||||
}
|
return fmt.Errorf("linking %s: %w", lib.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
prog.Instructions = insns
|
if !needed {
|
||||||
if prog.BTF != nil && lib.BTF != nil {
|
continue
|
||||||
if err := btf.ProgramAppend(prog.BTF, lib.BTF); err != nil {
|
}
|
||||||
return errors.Wrapf(err, "linking BTF of %s", lib.Name)
|
|
||||||
|
linked[lib] = true
|
||||||
|
prog.Instructions = append(prog.Instructions, lib.Instructions...)
|
||||||
|
pending = append(pending, lib.Instructions)
|
||||||
|
|
||||||
|
if prog.BTF != nil && lib.BTF != nil {
|
||||||
|
if err := btf.ProgramAppend(prog.BTF, lib.BTF); err != nil {
|
||||||
|
return fmt.Errorf("linking BTF of %s: %w", lib.Name, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func linkSection(insns, section asm.Instructions) (asm.Instructions, error) {
|
func needSection(insns, section asm.Instructions) (bool, error) {
|
||||||
// A map of symbols to the libraries which contain them.
|
// A map of symbols to the libraries which contain them.
|
||||||
symbols, err := section.SymbolOffsets()
|
symbols, err := section.SymbolOffsets()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ins := range insns {
|
for _, ins := range insns {
|
||||||
@ -45,7 +62,7 @@ func linkSection(insns, section asm.Instructions) (asm.Instructions, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if ins.OpCode.JumpOp() != asm.Call || ins.Src != asm.R1 {
|
if ins.OpCode.JumpOp() != asm.Call || ins.Src != asm.PseudoCall {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,11 +77,10 @@ func linkSection(insns, section asm.Instructions) (asm.Instructions, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// At this point we know that at least one function in the
|
// At this point we know that at least one function in the
|
||||||
// library is called from insns. Merge the two sections.
|
// library is called from insns, so we have to link it.
|
||||||
// The rewrite of ins.Constant happens in asm.Instruction.Marshal.
|
return true, nil
|
||||||
return append(insns, section...), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// None of the functions in the section are called. Do nothing.
|
// None of the functions in the section are called.
|
||||||
return insns, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
331
vendor/github.com/cilium/ebpf/map.go
generated
vendored
331
vendor/github.com/cilium/ebpf/map.go
generated
vendored
@ -1,15 +1,25 @@
|
|||||||
package ebpf
|
package ebpf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/internal"
|
"github.com/cilium/ebpf/internal"
|
||||||
"github.com/cilium/ebpf/internal/btf"
|
"github.com/cilium/ebpf/internal/btf"
|
||||||
"github.com/cilium/ebpf/internal/unix"
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Errors returned by Map and MapIterator methods.
|
||||||
|
var (
|
||||||
|
ErrKeyNotExist = errors.New("key does not exist")
|
||||||
|
ErrKeyExist = errors.New("key already exists")
|
||||||
|
ErrIterationAborted = errors.New("iteration aborted")
|
||||||
|
)
|
||||||
|
|
||||||
|
// MapID represents the unique ID of an eBPF map
|
||||||
|
type MapID uint32
|
||||||
|
|
||||||
// MapSpec defines a Map.
|
// MapSpec defines a Map.
|
||||||
type MapSpec struct {
|
type MapSpec struct {
|
||||||
// Name is passed to the kernel as a debug aid. Must only contain
|
// Name is passed to the kernel as a debug aid. Must only contain
|
||||||
@ -21,6 +31,12 @@ type MapSpec struct {
|
|||||||
MaxEntries uint32
|
MaxEntries uint32
|
||||||
Flags uint32
|
Flags uint32
|
||||||
|
|
||||||
|
// The initial contents of the map. May be nil.
|
||||||
|
Contents []MapKV
|
||||||
|
|
||||||
|
// Whether to freeze a map after setting its initial contents.
|
||||||
|
Freeze bool
|
||||||
|
|
||||||
// InnerMap is used as a template for ArrayOfMaps and HashOfMaps
|
// InnerMap is used as a template for ArrayOfMaps and HashOfMaps
|
||||||
InnerMap *MapSpec
|
InnerMap *MapSpec
|
||||||
|
|
||||||
@ -33,16 +49,26 @@ func (ms *MapSpec) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy returns a copy of the spec.
|
// Copy returns a copy of the spec.
|
||||||
|
//
|
||||||
|
// MapSpec.Contents is a shallow copy.
|
||||||
func (ms *MapSpec) Copy() *MapSpec {
|
func (ms *MapSpec) Copy() *MapSpec {
|
||||||
if ms == nil {
|
if ms == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cpy := *ms
|
cpy := *ms
|
||||||
|
cpy.Contents = make([]MapKV, len(ms.Contents))
|
||||||
|
copy(cpy.Contents, ms.Contents)
|
||||||
cpy.InnerMap = ms.InnerMap.Copy()
|
cpy.InnerMap = ms.InnerMap.Copy()
|
||||||
return &cpy
|
return &cpy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MapKV is used to initialize the contents of a Map.
|
||||||
|
type MapKV struct {
|
||||||
|
Key interface{}
|
||||||
|
Value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
// Map represents a Map file descriptor.
|
// Map represents a Map file descriptor.
|
||||||
//
|
//
|
||||||
// It is not safe to close a map which is used by other goroutines.
|
// It is not safe to close a map which is used by other goroutines.
|
||||||
@ -81,14 +107,18 @@ func NewMapFromFD(fd int) (*Map, error) {
|
|||||||
//
|
//
|
||||||
// Creating a map for the first time will perform feature detection
|
// Creating a map for the first time will perform feature detection
|
||||||
// by creating small, temporary maps.
|
// by creating small, temporary maps.
|
||||||
|
//
|
||||||
|
// The caller is responsible for ensuring the process' rlimit is set
|
||||||
|
// sufficiently high for locking memory during map creation. This can be done
|
||||||
|
// by calling unix.Setrlimit with unix.RLIMIT_MEMLOCK prior to calling NewMap.
|
||||||
func NewMap(spec *MapSpec) (*Map, error) {
|
func NewMap(spec *MapSpec) (*Map, error) {
|
||||||
if spec.BTF == nil {
|
if spec.BTF == nil {
|
||||||
return newMapWithBTF(spec, nil)
|
return newMapWithBTF(spec, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
handle, err := btf.NewHandle(btf.MapSpec(spec.BTF))
|
handle, err := btf.NewHandle(btf.MapSpec(spec.BTF))
|
||||||
if err != nil && !btf.IsNotSupported(err) {
|
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
|
||||||
return nil, errors.Wrap(err, "can't load BTF")
|
return nil, fmt.Errorf("can't load BTF: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newMapWithBTF(spec, handle)
|
return newMapWithBTF(spec, handle)
|
||||||
@ -100,7 +130,7 @@ func newMapWithBTF(spec *MapSpec, handle *btf.Handle) (*Map, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if spec.InnerMap == nil {
|
if spec.InnerMap == nil {
|
||||||
return nil, errors.Errorf("%s requires InnerMap", spec.Type)
|
return nil, fmt.Errorf("%s requires InnerMap", spec.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
template, err := createMap(spec.InnerMap, nil, handle)
|
template, err := createMap(spec.InnerMap, nil, handle)
|
||||||
@ -113,7 +143,7 @@ func newMapWithBTF(spec *MapSpec, handle *btf.Handle) (*Map, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, error) {
|
func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, error) {
|
||||||
spec = spec.Copy()
|
abi := newMapABIFromSpec(spec)
|
||||||
|
|
||||||
switch spec.Type {
|
switch spec.Type {
|
||||||
case ArrayOfMaps:
|
case ArrayOfMaps:
|
||||||
@ -123,43 +153,50 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if spec.ValueSize != 0 && spec.ValueSize != 4 {
|
if abi.ValueSize != 0 && abi.ValueSize != 4 {
|
||||||
return nil, errors.Errorf("ValueSize must be zero or four for map of map")
|
return nil, errors.New("ValueSize must be zero or four for map of map")
|
||||||
}
|
}
|
||||||
spec.ValueSize = 4
|
abi.ValueSize = 4
|
||||||
|
|
||||||
case PerfEventArray:
|
case PerfEventArray:
|
||||||
if spec.KeySize != 0 {
|
if abi.KeySize != 0 && abi.KeySize != 4 {
|
||||||
return nil, errors.Errorf("KeySize must be zero for perf event array")
|
return nil, errors.New("KeySize must be zero or four for perf event array")
|
||||||
}
|
|
||||||
if spec.ValueSize != 0 {
|
|
||||||
return nil, errors.Errorf("ValueSize must be zero for perf event array")
|
|
||||||
}
|
|
||||||
if spec.MaxEntries == 0 {
|
|
||||||
n, err := internal.OnlineCPUs()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "perf event array")
|
|
||||||
}
|
|
||||||
spec.MaxEntries = uint32(n)
|
|
||||||
}
|
}
|
||||||
|
abi.KeySize = 4
|
||||||
|
|
||||||
spec.KeySize = 4
|
if abi.ValueSize != 0 && abi.ValueSize != 4 {
|
||||||
spec.ValueSize = 4
|
return nil, errors.New("ValueSize must be zero or four for perf event array")
|
||||||
|
}
|
||||||
|
abi.ValueSize = 4
|
||||||
|
|
||||||
|
if abi.MaxEntries == 0 {
|
||||||
|
n, err := internal.PossibleCPUs()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("perf event array: %w", err)
|
||||||
|
}
|
||||||
|
abi.MaxEntries = uint32(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if abi.Flags&(unix.BPF_F_RDONLY_PROG|unix.BPF_F_WRONLY_PROG) > 0 || spec.Freeze {
|
||||||
|
if err := haveMapMutabilityModifiers(); err != nil {
|
||||||
|
return nil, fmt.Errorf("map create: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
attr := bpfMapCreateAttr{
|
attr := bpfMapCreateAttr{
|
||||||
mapType: spec.Type,
|
mapType: abi.Type,
|
||||||
keySize: spec.KeySize,
|
keySize: abi.KeySize,
|
||||||
valueSize: spec.ValueSize,
|
valueSize: abi.ValueSize,
|
||||||
maxEntries: spec.MaxEntries,
|
maxEntries: abi.MaxEntries,
|
||||||
flags: spec.Flags,
|
flags: abi.Flags,
|
||||||
}
|
}
|
||||||
|
|
||||||
if inner != nil {
|
if inner != nil {
|
||||||
var err error
|
var err error
|
||||||
attr.innerMapFd, err = inner.Value()
|
attr.innerMapFd, err = inner.Value()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "map create")
|
return nil, fmt.Errorf("map create: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,21 +206,33 @@ func createMap(spec *MapSpec, inner *internal.FD, handle *btf.Handle) (*Map, err
|
|||||||
attr.btfValueTypeID = btf.MapValue(spec.BTF).ID()
|
attr.btfValueTypeID = btf.MapValue(spec.BTF).ID()
|
||||||
}
|
}
|
||||||
|
|
||||||
name, err := newBPFObjName(spec.Name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "map create")
|
|
||||||
}
|
|
||||||
|
|
||||||
if haveObjName() == nil {
|
if haveObjName() == nil {
|
||||||
attr.mapName = name
|
attr.mapName = newBPFObjName(spec.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
fd, err := bpfMapCreate(&attr)
|
fd, err := bpfMapCreate(&attr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "map create")
|
return nil, fmt.Errorf("map create: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newMap(fd, spec.Name, newMapABIFromSpec(spec))
|
m, err := newMap(fd, spec.Name, abi)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.populate(spec.Contents); err != nil {
|
||||||
|
m.Close()
|
||||||
|
return nil, fmt.Errorf("map create: can't set initial contents: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if spec.Freeze {
|
||||||
|
if err := m.Freeze(); err != nil {
|
||||||
|
m.Close()
|
||||||
|
return nil, fmt.Errorf("can't freeze map: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMap(fd *internal.FD, name string, abi *MapABI) (*Map, error) {
|
func newMap(fd *internal.FD, name string, abi *MapABI) (*Map, error) {
|
||||||
@ -251,9 +300,9 @@ func (m *Map) Lookup(key, valueOut interface{}) error {
|
|||||||
*value = m
|
*value = m
|
||||||
return nil
|
return nil
|
||||||
case *Map:
|
case *Map:
|
||||||
return errors.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
|
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
|
||||||
case Map:
|
case Map:
|
||||||
return errors.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
|
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
|
||||||
|
|
||||||
case **Program:
|
case **Program:
|
||||||
p, err := unmarshalProgram(valueBytes)
|
p, err := unmarshalProgram(valueBytes)
|
||||||
@ -265,9 +314,9 @@ func (m *Map) Lookup(key, valueOut interface{}) error {
|
|||||||
*value = p
|
*value = p
|
||||||
return nil
|
return nil
|
||||||
case *Program:
|
case *Program:
|
||||||
return errors.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
|
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
|
||||||
case Program:
|
case Program:
|
||||||
return errors.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
|
return fmt.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return unmarshalBytes(valueOut, valueBytes)
|
return unmarshalBytes(valueOut, valueBytes)
|
||||||
@ -275,16 +324,18 @@ func (m *Map) Lookup(key, valueOut interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LookupAndDelete retrieves and deletes a value from a Map.
|
// LookupAndDelete retrieves and deletes a value from a Map.
|
||||||
|
//
|
||||||
|
// Returns ErrKeyNotExist if the key doesn't exist.
|
||||||
func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
|
func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
|
||||||
valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
|
valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
|
||||||
|
|
||||||
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessage(err, "can't marshal key")
|
return fmt.Errorf("can't marshal key: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := bpfMapLookupAndDelete(m.fd, keyPtr, valuePtr); err != nil {
|
if err := bpfMapLookupAndDelete(m.fd, keyPtr, valuePtr); err != nil {
|
||||||
return errors.WithMessage(err, "lookup and delete and delete failed")
|
return fmt.Errorf("lookup and delete failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return unmarshalBytes(valueOut, valueBytes)
|
return unmarshalBytes(valueOut, valueBytes)
|
||||||
@ -298,7 +349,7 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
|
|||||||
valuePtr := internal.NewSlicePointer(valueBytes)
|
valuePtr := internal.NewSlicePointer(valueBytes)
|
||||||
|
|
||||||
err := m.lookup(key, valuePtr)
|
err := m.lookup(key, valuePtr)
|
||||||
if IsNotExist(err) {
|
if errors.Is(err, ErrKeyNotExist) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -308,11 +359,13 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
|
|||||||
func (m *Map) lookup(key interface{}, valueOut internal.Pointer) error {
|
func (m *Map) lookup(key interface{}, valueOut internal.Pointer) error {
|
||||||
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessage(err, "can't marshal key")
|
return fmt.Errorf("can't marshal key: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = bpfMapLookupElem(m.fd, keyPtr, valueOut)
|
if err = bpfMapLookupElem(m.fd, keyPtr, valueOut); err != nil {
|
||||||
return errors.WithMessage(err, "lookup failed")
|
return fmt.Errorf("lookup failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MapUpdateFlags controls the behaviour of the Map.Update call.
|
// MapUpdateFlags controls the behaviour of the Map.Update call.
|
||||||
@ -340,7 +393,7 @@ func (m *Map) Put(key, value interface{}) error {
|
|||||||
func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
|
func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
|
||||||
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessage(err, "can't marshal key")
|
return fmt.Errorf("can't marshal key: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var valuePtr internal.Pointer
|
var valuePtr internal.Pointer
|
||||||
@ -350,28 +403,36 @@ func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
|
|||||||
valuePtr, err = marshalPtr(value, int(m.abi.ValueSize))
|
valuePtr, err = marshalPtr(value, int(m.abi.ValueSize))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessage(err, "can't marshal value")
|
return fmt.Errorf("can't marshal value: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return bpfMapUpdateElem(m.fd, keyPtr, valuePtr, uint64(flags))
|
if err = bpfMapUpdateElem(m.fd, keyPtr, valuePtr, uint64(flags)); err != nil {
|
||||||
|
return fmt.Errorf("update failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete removes a value.
|
// Delete removes a value.
|
||||||
//
|
//
|
||||||
// Returns an error if the key does not exist, see IsNotExist.
|
// Returns ErrKeyNotExist if the key does not exist.
|
||||||
func (m *Map) Delete(key interface{}) error {
|
func (m *Map) Delete(key interface{}) error {
|
||||||
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessage(err, "can't marshal key")
|
return fmt.Errorf("can't marshal key: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = bpfMapDeleteElem(m.fd, keyPtr)
|
if err = bpfMapDeleteElem(m.fd, keyPtr); err != nil {
|
||||||
return errors.WithMessage(err, "can't delete key")
|
return fmt.Errorf("delete failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NextKey finds the key following an initial key.
|
// NextKey finds the key following an initial key.
|
||||||
//
|
//
|
||||||
// See NextKeyBytes for details.
|
// See NextKeyBytes for details.
|
||||||
|
//
|
||||||
|
// Returns ErrKeyNotExist if there is no next key.
|
||||||
func (m *Map) NextKey(key, nextKeyOut interface{}) error {
|
func (m *Map) NextKey(key, nextKeyOut interface{}) error {
|
||||||
nextKeyPtr, nextKeyBytes := makeBuffer(nextKeyOut, int(m.abi.KeySize))
|
nextKeyPtr, nextKeyBytes := makeBuffer(nextKeyOut, int(m.abi.KeySize))
|
||||||
|
|
||||||
@ -383,8 +444,10 @@ func (m *Map) NextKey(key, nextKeyOut interface{}) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err := unmarshalBytes(nextKeyOut, nextKeyBytes)
|
if err := unmarshalBytes(nextKeyOut, nextKeyBytes); err != nil {
|
||||||
return errors.WithMessage(err, "can't unmarshal next key")
|
return fmt.Errorf("can't unmarshal next key: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NextKeyBytes returns the key following an initial key as a byte slice.
|
// NextKeyBytes returns the key following an initial key as a byte slice.
|
||||||
@ -392,12 +455,14 @@ func (m *Map) NextKey(key, nextKeyOut interface{}) error {
|
|||||||
// Passing nil will return the first key.
|
// Passing nil will return the first key.
|
||||||
//
|
//
|
||||||
// Use Iterate if you want to traverse all entries in the map.
|
// Use Iterate if you want to traverse all entries in the map.
|
||||||
|
//
|
||||||
|
// Returns nil if there are no more keys.
|
||||||
func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) {
|
func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) {
|
||||||
nextKey := make([]byte, m.abi.KeySize)
|
nextKey := make([]byte, m.abi.KeySize)
|
||||||
nextKeyPtr := internal.NewSlicePointer(nextKey)
|
nextKeyPtr := internal.NewSlicePointer(nextKey)
|
||||||
|
|
||||||
err := m.nextKey(key, nextKeyPtr)
|
err := m.nextKey(key, nextKeyPtr)
|
||||||
if IsNotExist(err) {
|
if errors.Is(err, ErrKeyNotExist) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -413,12 +478,14 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error {
|
|||||||
if key != nil {
|
if key != nil {
|
||||||
keyPtr, err = marshalPtr(key, int(m.abi.KeySize))
|
keyPtr, err = marshalPtr(key, int(m.abi.KeySize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessage(err, "can't marshal key")
|
return fmt.Errorf("can't marshal key: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut)
|
if err = bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut); err != nil {
|
||||||
return errors.WithMessage(err, "can't get next key")
|
return fmt.Errorf("next key failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate traverses a map.
|
// Iterate traverses a map.
|
||||||
@ -469,7 +536,7 @@ func (m *Map) Clone() (*Map, error) {
|
|||||||
|
|
||||||
dup, err := m.fd.Dup()
|
dup, err := m.fd.Dup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't clone map")
|
return nil, fmt.Errorf("can't clone map: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newMap(dup, m.name, &m.abi)
|
return newMap(dup, m.name, &m.abi)
|
||||||
@ -479,7 +546,30 @@ func (m *Map) Clone() (*Map, error) {
|
|||||||
//
|
//
|
||||||
// This requires bpffs to be mounted above fileName. See http://cilium.readthedocs.io/en/doc-1.0/kubernetes/install/#mounting-the-bpf-fs-optional
|
// This requires bpffs to be mounted above fileName. See http://cilium.readthedocs.io/en/doc-1.0/kubernetes/install/#mounting-the-bpf-fs-optional
|
||||||
func (m *Map) Pin(fileName string) error {
|
func (m *Map) Pin(fileName string) error {
|
||||||
return bpfPinObject(fileName, m.fd)
|
return internal.BPFObjPin(fileName, m.fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Freeze prevents a map to be modified from user space.
|
||||||
|
//
|
||||||
|
// It makes no changes to kernel-side restrictions.
|
||||||
|
func (m *Map) Freeze() error {
|
||||||
|
if err := haveMapMutabilityModifiers(); err != nil {
|
||||||
|
return fmt.Errorf("can't freeze map: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := bpfMapFreeze(m.fd); err != nil {
|
||||||
|
return fmt.Errorf("can't freeze map: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Map) populate(contents []MapKV) error {
|
||||||
|
for _, kv := range contents {
|
||||||
|
if err := m.Put(kv.Key, kv.Value); err != nil {
|
||||||
|
return fmt.Errorf("key %v: %w", kv.Key, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadPinnedMap load a Map from a BPF file.
|
// LoadPinnedMap load a Map from a BPF file.
|
||||||
@ -487,7 +577,7 @@ func (m *Map) Pin(fileName string) error {
|
|||||||
// The function is not compatible with nested maps.
|
// The function is not compatible with nested maps.
|
||||||
// Use LoadPinnedMapExplicit in these situations.
|
// Use LoadPinnedMapExplicit in these situations.
|
||||||
func LoadPinnedMap(fileName string) (*Map, error) {
|
func LoadPinnedMap(fileName string) (*Map, error) {
|
||||||
fd, err := bpfGetObject(fileName)
|
fd, err := internal.BPFObjGet(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -501,7 +591,7 @@ func LoadPinnedMap(fileName string) (*Map, error) {
|
|||||||
|
|
||||||
// LoadPinnedMapExplicit loads a map with explicit parameters.
|
// LoadPinnedMapExplicit loads a map with explicit parameters.
|
||||||
func LoadPinnedMapExplicit(fileName string, abi *MapABI) (*Map, error) {
|
func LoadPinnedMapExplicit(fileName string, abi *MapABI) (*Map, error) {
|
||||||
fd, err := bpfGetObject(fileName)
|
fd, err := internal.BPFObjGet(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -516,18 +606,7 @@ func unmarshalMap(buf []byte) (*Map, error) {
|
|||||||
// Looking up an entry in a nested map or prog array returns an id,
|
// Looking up an entry in a nested map or prog array returns an id,
|
||||||
// not an fd.
|
// not an fd.
|
||||||
id := internal.NativeEndian.Uint32(buf)
|
id := internal.NativeEndian.Uint32(buf)
|
||||||
fd, err := bpfGetMapFDByID(id)
|
return NewMapFromID(MapID(id))
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
name, abi, err := newMapABIFromFd(fd)
|
|
||||||
if err != nil {
|
|
||||||
_ = fd.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return newMap(fd, name, abi)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary implements BinaryMarshaler.
|
// MarshalBinary implements BinaryMarshaler.
|
||||||
@ -542,6 +621,60 @@ func (m *Map) MarshalBinary() ([]byte, error) {
|
|||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func patchValue(value []byte, typ btf.Type, replacements map[string]interface{}) error {
|
||||||
|
replaced := make(map[string]bool)
|
||||||
|
replace := func(name string, offset, size int, replacement interface{}) error {
|
||||||
|
if offset+size > len(value) {
|
||||||
|
return fmt.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := marshalBytes(replacement, size)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("marshal %s: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(value[offset:offset+size], buf)
|
||||||
|
replaced[name] = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch parent := typ.(type) {
|
||||||
|
case *btf.Datasec:
|
||||||
|
for _, secinfo := range parent.Vars {
|
||||||
|
name := string(secinfo.Type.(*btf.Var).Name)
|
||||||
|
replacement, ok := replacements[name]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err := replace(name, int(secinfo.Offset), int(secinfo.Size), replacement)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("patching %T is not supported", typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(replaced) == len(replacements) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var missing []string
|
||||||
|
for name := range replacements {
|
||||||
|
if !replaced[name] {
|
||||||
|
missing = append(missing, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(missing) == 1 {
|
||||||
|
return fmt.Errorf("unknown field: %s", missing[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("unknown fields: %s", strings.Join(missing, ","))
|
||||||
|
}
|
||||||
|
|
||||||
// MapIterator iterates a Map.
|
// MapIterator iterates a Map.
|
||||||
//
|
//
|
||||||
// See Map.Iterate.
|
// See Map.Iterate.
|
||||||
@ -562,8 +695,6 @@ func newMapIterator(target *Map) *MapIterator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var errIterationAborted = errors.New("iteration aborted")
|
|
||||||
|
|
||||||
// Next decodes the next key and value.
|
// Next decodes the next key and value.
|
||||||
//
|
//
|
||||||
// Iterating a hash map from which keys are being deleted is not
|
// Iterating a hash map from which keys are being deleted is not
|
||||||
@ -599,7 +730,7 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
|
|||||||
mi.prevKey = mi.prevBytes
|
mi.prevKey = mi.prevBytes
|
||||||
|
|
||||||
mi.err = mi.target.Lookup(nextBytes, valueOut)
|
mi.err = mi.target.Lookup(nextBytes, valueOut)
|
||||||
if IsNotExist(mi.err) {
|
if errors.Is(mi.err, ErrKeyNotExist) {
|
||||||
// Even though the key should be valid, we couldn't look up
|
// Even though the key should be valid, we couldn't look up
|
||||||
// its value. If we're iterating a hash map this is probably
|
// its value. If we're iterating a hash map this is probably
|
||||||
// because a concurrent delete removed the value before we
|
// because a concurrent delete removed the value before we
|
||||||
@ -618,26 +749,50 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
|
|||||||
return mi.err == nil
|
return mi.err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
mi.err = errIterationAborted
|
mi.err = fmt.Errorf("%w", ErrIterationAborted)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Err returns any encountered error.
|
// Err returns any encountered error.
|
||||||
//
|
//
|
||||||
// The method must be called after Next returns nil.
|
// The method must be called after Next returns nil.
|
||||||
|
//
|
||||||
|
// Returns ErrIterationAborted if it wasn't possible to do a full iteration.
|
||||||
func (mi *MapIterator) Err() error {
|
func (mi *MapIterator) Err() error {
|
||||||
return mi.err
|
return mi.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNotExist returns true if the error indicates that a
|
// MapGetNextID returns the ID of the next eBPF map.
|
||||||
// key doesn't exist.
|
//
|
||||||
func IsNotExist(err error) bool {
|
// Returns ErrNotExist, if there is no next eBPF map.
|
||||||
return errors.Cause(err) == unix.ENOENT
|
func MapGetNextID(startID MapID) (MapID, error) {
|
||||||
|
id, err := objGetNextID(internal.BPF_MAP_GET_NEXT_ID, uint32(startID))
|
||||||
|
return MapID(id), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsIterationAborted returns true if the iteration was aborted.
|
// NewMapFromID returns the map for a given id.
|
||||||
//
|
//
|
||||||
// This occurs when keys are deleted from a hash map during iteration.
|
// Returns ErrNotExist, if there is no eBPF map with the given id.
|
||||||
func IsIterationAborted(err error) bool {
|
func NewMapFromID(id MapID) (*Map, error) {
|
||||||
return errors.Cause(err) == errIterationAborted
|
fd, err := bpfObjGetFDByID(internal.BPF_MAP_GET_FD_BY_ID, uint32(id))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
name, abi, err := newMapABIFromFd(fd)
|
||||||
|
if err != nil {
|
||||||
|
_ = fd.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newMap(fd, name, abi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID returns the systemwide unique ID of the map.
|
||||||
|
func (m *Map) ID() (MapID, error) {
|
||||||
|
info, err := bpfGetMapInfoByFD(m.fd)
|
||||||
|
if err != nil {
|
||||||
|
return MapID(0), err
|
||||||
|
}
|
||||||
|
return MapID(info.id), nil
|
||||||
}
|
}
|
||||||
|
24
vendor/github.com/cilium/ebpf/marshalers.go
generated
vendored
24
vendor/github.com/cilium/ebpf/marshalers.go
generated
vendored
@ -4,13 +4,13 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding"
|
"encoding"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/internal"
|
"github.com/cilium/ebpf/internal"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
|
func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
|
||||||
@ -46,7 +46,9 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) {
|
|||||||
default:
|
default:
|
||||||
var wr bytes.Buffer
|
var wr bytes.Buffer
|
||||||
err = binary.Write(&wr, internal.NativeEndian, value)
|
err = binary.Write(&wr, internal.NativeEndian, value)
|
||||||
err = errors.Wrapf(err, "encoding %T", value)
|
if err != nil {
|
||||||
|
err = fmt.Errorf("encoding %T: %v", value, err)
|
||||||
|
}
|
||||||
buf = wr.Bytes()
|
buf = wr.Bytes()
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -54,7 +56,7 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(buf) != length {
|
if len(buf) != length {
|
||||||
return nil, errors.Errorf("%T doesn't marshal to %d bytes", data, length)
|
return nil, fmt.Errorf("%T doesn't marshal to %d bytes", data, length)
|
||||||
}
|
}
|
||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
@ -95,8 +97,10 @@ func unmarshalBytes(data interface{}, buf []byte) error {
|
|||||||
return errors.New("require pointer to []byte")
|
return errors.New("require pointer to []byte")
|
||||||
default:
|
default:
|
||||||
rd := bytes.NewReader(buf)
|
rd := bytes.NewReader(buf)
|
||||||
err := binary.Read(rd, internal.NativeEndian, value)
|
if err := binary.Read(rd, internal.NativeEndian, value); err != nil {
|
||||||
return errors.Wrapf(err, "decoding %T", value)
|
return fmt.Errorf("decoding %T: %v", value, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,7 +124,7 @@ func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, er
|
|||||||
sliceValue := reflect.ValueOf(slice)
|
sliceValue := reflect.ValueOf(slice)
|
||||||
sliceLen := sliceValue.Len()
|
sliceLen := sliceValue.Len()
|
||||||
if sliceLen > possibleCPUs {
|
if sliceLen > possibleCPUs {
|
||||||
return internal.Pointer{}, errors.Errorf("per-CPU value exceeds number of CPUs")
|
return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs")
|
||||||
}
|
}
|
||||||
|
|
||||||
alignedElemLength := align(elemLength, 8)
|
alignedElemLength := align(elemLength, 8)
|
||||||
@ -147,7 +151,7 @@ func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, er
|
|||||||
func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) error {
|
func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) error {
|
||||||
slicePtrType := reflect.TypeOf(slicePtr)
|
slicePtrType := reflect.TypeOf(slicePtr)
|
||||||
if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice {
|
if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice {
|
||||||
return errors.Errorf("per-cpu value requires pointer to slice")
|
return fmt.Errorf("per-cpu value requires pointer to slice")
|
||||||
}
|
}
|
||||||
|
|
||||||
possibleCPUs, err := internal.PossibleCPUs()
|
possibleCPUs, err := internal.PossibleCPUs()
|
||||||
@ -166,7 +170,7 @@ func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) erro
|
|||||||
|
|
||||||
step := len(buf) / possibleCPUs
|
step := len(buf) / possibleCPUs
|
||||||
if step < elemLength {
|
if step < elemLength {
|
||||||
return errors.Errorf("per-cpu element length is larger than available data")
|
return fmt.Errorf("per-cpu element length is larger than available data")
|
||||||
}
|
}
|
||||||
for i := 0; i < possibleCPUs; i++ {
|
for i := 0; i < possibleCPUs; i++ {
|
||||||
var elem interface{}
|
var elem interface{}
|
||||||
@ -184,7 +188,7 @@ func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) erro
|
|||||||
|
|
||||||
err := unmarshalBytes(elem, elemBytes)
|
err := unmarshalBytes(elem, elemBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "cpu %d", i)
|
return fmt.Errorf("cpu %d: %w", i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = buf[step:]
|
buf = buf[step:]
|
||||||
|
262
vendor/github.com/cilium/ebpf/prog.go
generated
vendored
262
vendor/github.com/cilium/ebpf/prog.go
generated
vendored
@ -2,20 +2,25 @@ package ebpf
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/cilium/ebpf/asm"
|
"github.com/cilium/ebpf/asm"
|
||||||
"github.com/cilium/ebpf/internal"
|
"github.com/cilium/ebpf/internal"
|
||||||
"github.com/cilium/ebpf/internal/btf"
|
"github.com/cilium/ebpf/internal/btf"
|
||||||
"github.com/cilium/ebpf/internal/unix"
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrNotSupported is returned whenever the kernel doesn't support a feature.
|
||||||
|
var ErrNotSupported = internal.ErrNotSupported
|
||||||
|
|
||||||
|
// ProgramID represents the unique ID of an eBPF program
|
||||||
|
type ProgramID uint32
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN.
|
// Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN.
|
||||||
// This is currently the maximum of spare space allocated for SKB
|
// This is currently the maximum of spare space allocated for SKB
|
||||||
@ -41,17 +46,33 @@ type ProgramOptions struct {
|
|||||||
type ProgramSpec struct {
|
type ProgramSpec struct {
|
||||||
// Name is passed to the kernel as a debug aid. Must only contain
|
// Name is passed to the kernel as a debug aid. Must only contain
|
||||||
// alpha numeric and '_' characters.
|
// alpha numeric and '_' characters.
|
||||||
Name string
|
Name string
|
||||||
Type ProgramType
|
// Type determines at which hook in the kernel a program will run.
|
||||||
AttachType AttachType
|
Type ProgramType
|
||||||
Instructions asm.Instructions
|
AttachType AttachType
|
||||||
License string
|
// Name of a kernel data structure to attach to. It's interpretation
|
||||||
|
// depends on Type and AttachType.
|
||||||
|
AttachTo string
|
||||||
|
Instructions asm.Instructions
|
||||||
|
|
||||||
|
// License of the program. Some helpers are only available if
|
||||||
|
// the license is deemed compatible with the GPL.
|
||||||
|
//
|
||||||
|
// See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1
|
||||||
|
License string
|
||||||
|
|
||||||
|
// Version used by tracing programs.
|
||||||
|
//
|
||||||
|
// Deprecated: superseded by BTF.
|
||||||
KernelVersion uint32
|
KernelVersion uint32
|
||||||
|
|
||||||
// The BTF associated with this program. Changing Instructions
|
// The BTF associated with this program. Changing Instructions
|
||||||
// will most likely invalidate the contained data, and may
|
// will most likely invalidate the contained data, and may
|
||||||
// result in errors when attempting to load it into the kernel.
|
// result in errors when attempting to load it into the kernel.
|
||||||
BTF *btf.Program
|
BTF *btf.Program
|
||||||
|
|
||||||
|
// The byte order this program was compiled for, may be nil.
|
||||||
|
ByteOrder binary.ByteOrder
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy returns a copy of the spec.
|
// Copy returns a copy of the spec.
|
||||||
@ -74,9 +95,10 @@ type Program struct {
|
|||||||
// otherwise it is empty.
|
// otherwise it is empty.
|
||||||
VerifierLog string
|
VerifierLog string
|
||||||
|
|
||||||
fd *internal.FD
|
fd *internal.FD
|
||||||
name string
|
name string
|
||||||
abi ProgramABI
|
abi ProgramABI
|
||||||
|
attachType AttachType
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProgram creates a new Program.
|
// NewProgram creates a new Program.
|
||||||
@ -97,8 +119,8 @@ func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
handle, err := btf.NewHandle(btf.ProgramSpec(spec.BTF))
|
handle, err := btf.NewHandle(btf.ProgramSpec(spec.BTF))
|
||||||
if err != nil && !btf.IsNotSupported(err) {
|
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
|
||||||
return nil, errors.Wrap(err, "can't load BTF")
|
return nil, fmt.Errorf("can't load BTF: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newProgramWithBTF(spec, handle, opts)
|
return newProgramWithBTF(spec, handle, opts)
|
||||||
@ -130,6 +152,7 @@ func newProgramWithBTF(spec *ProgramSpec, btf *btf.Handle, opts ProgramOptions)
|
|||||||
return prog, nil
|
return prog, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logErr := err
|
||||||
if opts.LogLevel == 0 {
|
if opts.LogLevel == 0 {
|
||||||
// Re-run with the verifier enabled to get better error messages.
|
// Re-run with the verifier enabled to get better error messages.
|
||||||
logBuf = make([]byte, logSize)
|
logBuf = make([]byte, logSize)
|
||||||
@ -137,11 +160,11 @@ func newProgramWithBTF(spec *ProgramSpec, btf *btf.Handle, opts ProgramOptions)
|
|||||||
attr.logSize = uint32(len(logBuf))
|
attr.logSize = uint32(len(logBuf))
|
||||||
attr.logBuf = internal.NewSlicePointer(logBuf)
|
attr.logBuf = internal.NewSlicePointer(logBuf)
|
||||||
|
|
||||||
_, logErr := bpfProgLoad(attr)
|
_, logErr = bpfProgLoad(attr)
|
||||||
err = internal.ErrorWithLog(err, logBuf, logErr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.Wrap(err, "can't load program")
|
err = internal.ErrorWithLog(err, logBuf, logErr)
|
||||||
|
return nil, fmt.Errorf("can't load program: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProgramFromFD creates a program from a raw fd.
|
// NewProgramFromFD creates a program from a raw fd.
|
||||||
@ -181,6 +204,10 @@ func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr
|
|||||||
return nil, errors.New("License cannot be empty")
|
return nil, errors.New("License cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian {
|
||||||
|
return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
|
||||||
|
}
|
||||||
|
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize))
|
buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize))
|
||||||
err := spec.Instructions.Marshal(buf, internal.NativeEndian)
|
err := spec.Instructions.Marshal(buf, internal.NativeEndian)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -195,15 +222,11 @@ func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr
|
|||||||
insCount: insCount,
|
insCount: insCount,
|
||||||
instructions: internal.NewSlicePointer(bytecode),
|
instructions: internal.NewSlicePointer(bytecode),
|
||||||
license: internal.NewStringPointer(spec.License),
|
license: internal.NewStringPointer(spec.License),
|
||||||
}
|
kernelVersion: spec.KernelVersion,
|
||||||
|
|
||||||
name, err := newBPFObjName(spec.Name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if haveObjName() == nil {
|
if haveObjName() == nil {
|
||||||
attr.progName = name
|
attr.progName = newBPFObjName(spec.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if handle != nil && spec.BTF != nil {
|
if handle != nil && spec.BTF != nil {
|
||||||
@ -211,7 +234,7 @@ func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr
|
|||||||
|
|
||||||
recSize, bytes, err := btf.ProgramLineInfos(spec.BTF)
|
recSize, bytes, err := btf.ProgramLineInfos(spec.BTF)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't get BTF line infos")
|
return nil, fmt.Errorf("can't get BTF line infos: %w", err)
|
||||||
}
|
}
|
||||||
attr.lineInfoRecSize = recSize
|
attr.lineInfoRecSize = recSize
|
||||||
attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
|
attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
|
||||||
@ -219,13 +242,23 @@ func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr
|
|||||||
|
|
||||||
recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF)
|
recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't get BTF function infos")
|
return nil, fmt.Errorf("can't get BTF function infos: %w", err)
|
||||||
}
|
}
|
||||||
attr.funcInfoRecSize = recSize
|
attr.funcInfoRecSize = recSize
|
||||||
attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
|
attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
|
||||||
attr.funcInfo = internal.NewSlicePointer(bytes)
|
attr.funcInfo = internal.NewSlicePointer(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if spec.AttachTo != "" {
|
||||||
|
target, err := resolveBTFType(spec.AttachTo, spec.Type, spec.AttachType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if target != nil {
|
||||||
|
attr.attachBTFID = target.ID()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return attr, nil
|
return attr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,7 +300,7 @@ func (p *Program) Clone() (*Program, error) {
|
|||||||
|
|
||||||
dup, err := p.fd.Dup()
|
dup, err := p.fd.Dup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "can't clone program")
|
return nil, fmt.Errorf("can't clone program: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newProgram(dup, p.name, &p.abi), nil
|
return newProgram(dup, p.name, &p.abi), nil
|
||||||
@ -277,7 +310,10 @@ func (p *Program) Clone() (*Program, error) {
|
|||||||
//
|
//
|
||||||
// This requires bpffs to be mounted above fileName. See http://cilium.readthedocs.io/en/doc-1.0/kubernetes/install/#mounting-the-bpf-fs-optional
|
// This requires bpffs to be mounted above fileName. See http://cilium.readthedocs.io/en/doc-1.0/kubernetes/install/#mounting-the-bpf-fs-optional
|
||||||
func (p *Program) Pin(fileName string) error {
|
func (p *Program) Pin(fileName string) error {
|
||||||
return errors.Wrap(bpfPinObject(fileName, p.fd), "can't pin program")
|
if err := internal.BPFObjPin(fileName, p.fd); err != nil {
|
||||||
|
return fmt.Errorf("can't pin program: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close unloads the program from the kernel.
|
// Close unloads the program from the kernel.
|
||||||
@ -297,23 +333,33 @@ func (p *Program) Close() error {
|
|||||||
//
|
//
|
||||||
// This function requires at least Linux 4.12.
|
// This function requires at least Linux 4.12.
|
||||||
func (p *Program) Test(in []byte) (uint32, []byte, error) {
|
func (p *Program) Test(in []byte) (uint32, []byte, error) {
|
||||||
ret, out, _, err := p.testRun(in, 1)
|
ret, out, _, err := p.testRun(in, 1, nil)
|
||||||
return ret, out, errors.Wrap(err, "can't test program")
|
if err != nil {
|
||||||
|
return ret, nil, fmt.Errorf("can't test program: %w", err)
|
||||||
|
}
|
||||||
|
return ret, out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Benchmark runs the Program with the given input for a number of times
|
// Benchmark runs the Program with the given input for a number of times
|
||||||
// and returns the time taken per iteration.
|
// and returns the time taken per iteration.
|
||||||
//
|
//
|
||||||
// The returned value is the return value of the last execution of
|
// Returns the result of the last execution of the program and the time per
|
||||||
// the program.
|
// run or an error. reset is called whenever the benchmark syscall is
|
||||||
|
// interrupted, and should be set to testing.B.ResetTimer or similar.
|
||||||
|
//
|
||||||
|
// Note: profiling a call to this function will skew it's results, see
|
||||||
|
// https://github.com/cilium/ebpf/issues/24
|
||||||
//
|
//
|
||||||
// This function requires at least Linux 4.12.
|
// This function requires at least Linux 4.12.
|
||||||
func (p *Program) Benchmark(in []byte, repeat int) (uint32, time.Duration, error) {
|
func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) {
|
||||||
ret, _, total, err := p.testRun(in, repeat)
|
ret, _, total, err := p.testRun(in, repeat, reset)
|
||||||
return ret, total, errors.Wrap(err, "can't benchmark program")
|
if err != nil {
|
||||||
|
return ret, total, fmt.Errorf("can't benchmark program: %w", err)
|
||||||
|
}
|
||||||
|
return ret, total, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() bool {
|
var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() (bool, error) {
|
||||||
prog, err := NewProgram(&ProgramSpec{
|
prog, err := NewProgram(&ProgramSpec{
|
||||||
Type: SocketFilter,
|
Type: SocketFilter,
|
||||||
Instructions: asm.Instructions{
|
Instructions: asm.Instructions{
|
||||||
@ -324,31 +370,26 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() b
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This may be because we lack sufficient permissions, etc.
|
// This may be because we lack sufficient permissions, etc.
|
||||||
return false
|
return false, err
|
||||||
}
|
}
|
||||||
defer prog.Close()
|
defer prog.Close()
|
||||||
|
|
||||||
fd, err := prog.fd.Value()
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Programs require at least 14 bytes input
|
// Programs require at least 14 bytes input
|
||||||
in := make([]byte, 14)
|
in := make([]byte, 14)
|
||||||
attr := bpfProgTestRunAttr{
|
attr := bpfProgTestRunAttr{
|
||||||
fd: fd,
|
fd: uint32(prog.FD()),
|
||||||
dataSizeIn: uint32(len(in)),
|
dataSizeIn: uint32(len(in)),
|
||||||
dataIn: internal.NewSlicePointer(in),
|
dataIn: internal.NewSlicePointer(in),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = internal.BPF(_ProgTestRun, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
err = bpfProgTestRun(&attr)
|
||||||
|
|
||||||
// Check for EINVAL specifically, rather than err != nil since we
|
// Check for EINVAL specifically, rather than err != nil since we
|
||||||
// otherwise misdetect due to insufficient permissions.
|
// otherwise misdetect due to insufficient permissions.
|
||||||
return errors.Cause(err) != unix.EINVAL
|
return !errors.Is(err, unix.EINVAL), nil
|
||||||
})
|
})
|
||||||
|
|
||||||
func (p *Program) testRun(in []byte, repeat int) (uint32, []byte, time.Duration, error) {
|
func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, time.Duration, error) {
|
||||||
if uint(repeat) > math.MaxUint32 {
|
if uint(repeat) > math.MaxUint32 {
|
||||||
return 0, nil, 0, fmt.Errorf("repeat is too high")
|
return 0, nil, 0, fmt.Errorf("repeat is too high")
|
||||||
}
|
}
|
||||||
@ -386,9 +427,20 @@ func (p *Program) testRun(in []byte, repeat int) (uint32, []byte, time.Duration,
|
|||||||
repeat: uint32(repeat),
|
repeat: uint32(repeat),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = internal.BPF(_ProgTestRun, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
for {
|
||||||
if err != nil {
|
err = bpfProgTestRun(&attr)
|
||||||
return 0, nil, 0, errors.Wrap(err, "can't run test")
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, unix.EINTR) {
|
||||||
|
if reset != nil {
|
||||||
|
reset()
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, nil, 0, fmt.Errorf("can't run test: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if int(attr.dataSizeOut) > cap(out) {
|
if int(attr.dataSizeOut) > cap(out) {
|
||||||
@ -410,18 +462,7 @@ func unmarshalProgram(buf []byte) (*Program, error) {
|
|||||||
// Looking up an entry in a nested map or prog array returns an id,
|
// Looking up an entry in a nested map or prog array returns an id,
|
||||||
// not an fd.
|
// not an fd.
|
||||||
id := internal.NativeEndian.Uint32(buf)
|
id := internal.NativeEndian.Uint32(buf)
|
||||||
fd, err := bpfGetProgramFDByID(id)
|
return NewProgramFromID(ProgramID(id))
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
name, abi, err := newProgramABIFromFd(fd)
|
|
||||||
if err != nil {
|
|
||||||
_ = fd.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return newProgram(fd, name, abi), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary implements BinaryMarshaler.
|
// MarshalBinary implements BinaryMarshaler.
|
||||||
@ -436,7 +477,9 @@ func (p *Program) MarshalBinary() ([]byte, error) {
|
|||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attach a Program to a container object fd
|
// Attach a Program.
|
||||||
|
//
|
||||||
|
// Deprecated: use link.RawAttachProgram instead.
|
||||||
func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error {
|
func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error {
|
||||||
if fd < 0 {
|
if fd < 0 {
|
||||||
return errors.New("invalid fd")
|
return errors.New("invalid fd")
|
||||||
@ -447,42 +490,47 @@ func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
attr := bpfProgAlterAttr{
|
attr := internal.BPFProgAttachAttr{
|
||||||
targetFd: uint32(fd),
|
TargetFd: uint32(fd),
|
||||||
attachBpfFd: pfd,
|
AttachBpfFd: pfd,
|
||||||
attachType: uint32(typ),
|
AttachType: uint32(typ),
|
||||||
attachFlags: uint32(flags),
|
AttachFlags: uint32(flags),
|
||||||
}
|
}
|
||||||
|
|
||||||
return bpfProgAlter(_ProgAttach, &attr)
|
return internal.BPFProgAttach(&attr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Detach a Program from a container object fd
|
// Detach a Program.
|
||||||
|
//
|
||||||
|
// Deprecated: use link.RawDetachProgram instead.
|
||||||
func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error {
|
func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error {
|
||||||
if fd < 0 {
|
if fd < 0 {
|
||||||
return errors.New("invalid fd")
|
return errors.New("invalid fd")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if flags != 0 {
|
||||||
|
return errors.New("flags must be zero")
|
||||||
|
}
|
||||||
|
|
||||||
pfd, err := p.fd.Value()
|
pfd, err := p.fd.Value()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
attr := bpfProgAlterAttr{
|
attr := internal.BPFProgDetachAttr{
|
||||||
targetFd: uint32(fd),
|
TargetFd: uint32(fd),
|
||||||
attachBpfFd: pfd,
|
AttachBpfFd: pfd,
|
||||||
attachType: uint32(typ),
|
AttachType: uint32(typ),
|
||||||
attachFlags: uint32(flags),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return bpfProgAlter(_ProgDetach, &attr)
|
return internal.BPFProgDetach(&attr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadPinnedProgram loads a Program from a BPF file.
|
// LoadPinnedProgram loads a Program from a BPF file.
|
||||||
//
|
//
|
||||||
// Requires at least Linux 4.11.
|
// Requires at least Linux 4.11.
|
||||||
func LoadPinnedProgram(fileName string) (*Program, error) {
|
func LoadPinnedProgram(fileName string) (*Program, error) {
|
||||||
fd, err := bpfGetObject(fileName)
|
fd, err := internal.BPFObjGet(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -490,7 +538,7 @@ func LoadPinnedProgram(fileName string) (*Program, error) {
|
|||||||
name, abi, err := newProgramABIFromFd(fd)
|
name, abi, err := newProgramABIFromFd(fd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = fd.Close()
|
_ = fd.Close()
|
||||||
return nil, errors.Wrapf(err, "can't get ABI for %s", fileName)
|
return nil, fmt.Errorf("can't get ABI for %s: %w", fileName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newProgram(fd, name, abi), nil
|
return newProgram(fd, name, abi), nil
|
||||||
@ -512,9 +560,63 @@ func SanitizeName(name string, replacement rune) string {
|
|||||||
}, name)
|
}, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNotSupported returns true if an error occurred because
|
// ProgramGetNextID returns the ID of the next eBPF program.
|
||||||
// the kernel does not have support for a specific feature.
|
//
|
||||||
func IsNotSupported(err error) bool {
|
// Returns ErrNotExist, if there is no next eBPF program.
|
||||||
_, notSupported := errors.Cause(err).(*internal.UnsupportedFeatureError)
|
func ProgramGetNextID(startID ProgramID) (ProgramID, error) {
|
||||||
return notSupported
|
id, err := objGetNextID(internal.BPF_PROG_GET_NEXT_ID, uint32(startID))
|
||||||
|
return ProgramID(id), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProgramFromID returns the program for a given id.
|
||||||
|
//
|
||||||
|
// Returns ErrNotExist, if there is no eBPF program with the given id.
|
||||||
|
func NewProgramFromID(id ProgramID) (*Program, error) {
|
||||||
|
fd, err := bpfObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
name, abi, err := newProgramABIFromFd(fd)
|
||||||
|
if err != nil {
|
||||||
|
_ = fd.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newProgram(fd, name, abi), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID returns the systemwide unique ID of the program.
|
||||||
|
func (p *Program) ID() (ProgramID, error) {
|
||||||
|
info, err := bpfGetProgInfoByFD(p.fd)
|
||||||
|
if err != nil {
|
||||||
|
return ProgramID(0), err
|
||||||
|
}
|
||||||
|
return ProgramID(info.id), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveBTFType(name string, progType ProgramType, attachType AttachType) (btf.Type, error) {
|
||||||
|
kernel, err := btf.LoadKernelSpec()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't resolve BTF type %s: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type match struct {
|
||||||
|
p ProgramType
|
||||||
|
a AttachType
|
||||||
|
}
|
||||||
|
|
||||||
|
target := match{progType, attachType}
|
||||||
|
switch target {
|
||||||
|
case match{Tracing, AttachTraceIter}:
|
||||||
|
var target btf.Func
|
||||||
|
if err := kernel.FindType("bpf_iter_"+name, &target); err != nil {
|
||||||
|
return nil, fmt.Errorf("can't resolve BTF for iterator %s: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &target, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
5
vendor/github.com/cilium/ebpf/readme.md
generated
vendored
5
vendor/github.com/cilium/ebpf/readme.md
generated
vendored
@ -13,6 +13,11 @@ The library is maintained by [Cloudflare](https://www.cloudflare.com) and [Ciliu
|
|||||||
The package is production ready, but **the API is explicitly unstable
|
The package is production ready, but **the API is explicitly unstable
|
||||||
right now**. Expect to update your code if you want to follow along.
|
right now**. Expect to update your code if you want to follow along.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
* A version of Go that is [supported by upstream](https://golang.org/doc/devel/release.html#policy)
|
||||||
|
* Linux 4.9, 4.19 or 5.4 (versions in-between should work, but are not tested)
|
||||||
|
|
||||||
## Useful resources
|
## Useful resources
|
||||||
|
|
||||||
* [Cilium eBPF documentation](https://cilium.readthedocs.io/en/latest/bpf/#bpf-guide) (recommended)
|
* [Cilium eBPF documentation](https://cilium.readthedocs.io/en/latest/bpf/#bpf-guide) (recommended)
|
||||||
|
256
vendor/github.com/cilium/ebpf/syscalls.go
generated
vendored
256
vendor/github.com/cilium/ebpf/syscalls.go
generated
vendored
@ -1,15 +1,19 @@
|
|||||||
package ebpf
|
package ebpf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"path/filepath"
|
"errors"
|
||||||
"strings"
|
"fmt"
|
||||||
|
"os"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/cilium/ebpf/internal"
|
"github.com/cilium/ebpf/internal"
|
||||||
"github.com/cilium/ebpf/internal/btf"
|
"github.com/cilium/ebpf/internal/btf"
|
||||||
"github.com/cilium/ebpf/internal/unix"
|
"github.com/cilium/ebpf/internal/unix"
|
||||||
|
)
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
// Generic errors returned by BPF syscalls.
|
||||||
|
var (
|
||||||
|
ErrNotExist = errors.New("requested object does not exist")
|
||||||
)
|
)
|
||||||
|
|
||||||
// bpfObjName is a null-terminated string made up of
|
// bpfObjName is a null-terminated string made up of
|
||||||
@ -17,18 +21,15 @@ import (
|
|||||||
type bpfObjName [unix.BPF_OBJ_NAME_LEN]byte
|
type bpfObjName [unix.BPF_OBJ_NAME_LEN]byte
|
||||||
|
|
||||||
// newBPFObjName truncates the result if it is too long.
|
// newBPFObjName truncates the result if it is too long.
|
||||||
func newBPFObjName(name string) (bpfObjName, error) {
|
func newBPFObjName(name string) bpfObjName {
|
||||||
idx := strings.IndexFunc(name, invalidBPFObjNameChar)
|
|
||||||
if idx != -1 {
|
|
||||||
return bpfObjName{}, errors.Errorf("invalid character '%c' in name '%s'", name[idx], name)
|
|
||||||
}
|
|
||||||
|
|
||||||
var result bpfObjName
|
var result bpfObjName
|
||||||
copy(result[:unix.BPF_OBJ_NAME_LEN-1], name)
|
copy(result[:unix.BPF_OBJ_NAME_LEN-1], name)
|
||||||
return result, nil
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func invalidBPFObjNameChar(char rune) bool {
|
func invalidBPFObjNameChar(char rune) bool {
|
||||||
|
dotAllowed := objNameAllowsDot() == nil
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case char >= 'A' && char <= 'Z':
|
case char >= 'A' && char <= 'Z':
|
||||||
fallthrough
|
fallthrough
|
||||||
@ -36,6 +37,8 @@ func invalidBPFObjNameChar(char rune) bool {
|
|||||||
fallthrough
|
fallthrough
|
||||||
case char >= '0' && char <= '9':
|
case char >= '0' && char <= '9':
|
||||||
fallthrough
|
fallthrough
|
||||||
|
case dotAllowed && char == '.':
|
||||||
|
fallthrough
|
||||||
case char == '_':
|
case char == '_':
|
||||||
return false
|
return false
|
||||||
default:
|
default:
|
||||||
@ -76,12 +79,6 @@ type bpfMapInfo struct {
|
|||||||
mapName bpfObjName // since 4.15 ad5b177bd73f
|
mapName bpfObjName // since 4.15 ad5b177bd73f
|
||||||
}
|
}
|
||||||
|
|
||||||
type bpfPinObjAttr struct {
|
|
||||||
fileName internal.Pointer
|
|
||||||
fd uint32
|
|
||||||
padding uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
type bpfProgLoadAttr struct {
|
type bpfProgLoadAttr struct {
|
||||||
progType ProgramType
|
progType ProgramType
|
||||||
insCount uint32
|
insCount uint32
|
||||||
@ -102,6 +99,8 @@ type bpfProgLoadAttr struct {
|
|||||||
lineInfoRecSize uint32
|
lineInfoRecSize uint32
|
||||||
lineInfo internal.Pointer
|
lineInfo internal.Pointer
|
||||||
lineInfoCnt uint32
|
lineInfoCnt uint32
|
||||||
|
attachBTFID btf.TypeID
|
||||||
|
attachProgFd uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
type bpfProgInfo struct {
|
type bpfProgInfo struct {
|
||||||
@ -130,13 +129,6 @@ type bpfProgTestRunAttr struct {
|
|||||||
duration uint32
|
duration uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
type bpfProgAlterAttr struct {
|
|
||||||
targetFd uint32
|
|
||||||
attachBpfFd uint32
|
|
||||||
attachType uint32
|
|
||||||
attachFlags uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
type bpfObjGetInfoByFDAttr struct {
|
type bpfObjGetInfoByFDAttr struct {
|
||||||
fd uint32
|
fd uint32
|
||||||
infoLen uint32
|
infoLen uint32
|
||||||
@ -148,9 +140,19 @@ type bpfGetFDByIDAttr struct {
|
|||||||
next uint32
|
next uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type bpfMapFreezeAttr struct {
|
||||||
|
mapFd uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type bpfObjGetNextIDAttr struct {
|
||||||
|
startID uint32
|
||||||
|
nextID uint32
|
||||||
|
openFlags uint32
|
||||||
|
}
|
||||||
|
|
||||||
func bpfProgLoad(attr *bpfProgLoadAttr) (*internal.FD, error) {
|
func bpfProgLoad(attr *bpfProgLoadAttr) (*internal.FD, error) {
|
||||||
for {
|
for {
|
||||||
fd, err := internal.BPF(_ProgLoad, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
fd, err := internal.BPF(internal.BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
// As of ~4.20 the verifier can be interrupted by a signal,
|
// As of ~4.20 the verifier can be interrupted by a signal,
|
||||||
// and returns EAGAIN in that case.
|
// and returns EAGAIN in that case.
|
||||||
if err == unix.EAGAIN {
|
if err == unix.EAGAIN {
|
||||||
@ -165,13 +167,17 @@ func bpfProgLoad(attr *bpfProgLoadAttr) (*internal.FD, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func bpfProgAlter(cmd int, attr *bpfProgAlterAttr) error {
|
func bpfProgTestRun(attr *bpfProgTestRunAttr) error {
|
||||||
_, err := internal.BPF(cmd, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
_, err := internal.BPF(internal.BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func bpfMapCreate(attr *bpfMapCreateAttr) (*internal.FD, error) {
|
func bpfMapCreate(attr *bpfMapCreateAttr) (*internal.FD, error) {
|
||||||
fd, err := internal.BPF(_MapCreate, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
fd, err := internal.BPF(internal.BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||||
|
if errors.Is(err, os.ErrPermission) {
|
||||||
|
return nil, errors.New("permission denied or insufficient rlimit to lock memory for map")
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -179,7 +185,7 @@ func bpfMapCreate(attr *bpfMapCreateAttr) (*internal.FD, error) {
|
|||||||
return internal.NewFD(uint32(fd)), nil
|
return internal.NewFD(uint32(fd)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() bool {
|
var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() (bool, error) {
|
||||||
inner, err := bpfMapCreate(&bpfMapCreateAttr{
|
inner, err := bpfMapCreate(&bpfMapCreateAttr{
|
||||||
mapType: Array,
|
mapType: Array,
|
||||||
keySize: 4,
|
keySize: 4,
|
||||||
@ -187,7 +193,7 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() bool {
|
|||||||
maxEntries: 1,
|
maxEntries: 1,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false, err
|
||||||
}
|
}
|
||||||
defer inner.Close()
|
defer inner.Close()
|
||||||
|
|
||||||
@ -200,11 +206,28 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() bool {
|
|||||||
innerMapFd: innerFd,
|
innerMapFd: innerFd,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = nested.Close()
|
_ = nested.Close()
|
||||||
return true
|
return true, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() (bool, error) {
|
||||||
|
// This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since
|
||||||
|
// BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check.
|
||||||
|
m, err := bpfMapCreate(&bpfMapCreateAttr{
|
||||||
|
mapType: Array,
|
||||||
|
keySize: 4,
|
||||||
|
valueSize: 4,
|
||||||
|
maxEntries: 1,
|
||||||
|
flags: unix.BPF_F_RDONLY_PROG,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
_ = m.Close()
|
||||||
|
return true, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error {
|
func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error {
|
||||||
@ -218,8 +241,8 @@ func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error {
|
|||||||
key: key,
|
key: key,
|
||||||
value: valueOut,
|
value: valueOut,
|
||||||
}
|
}
|
||||||
_, err = internal.BPF(_MapLookupElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
_, err = internal.BPF(internal.BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
return err
|
return wrapMapError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error {
|
func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error {
|
||||||
@ -233,8 +256,8 @@ func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error
|
|||||||
key: key,
|
key: key,
|
||||||
value: valueOut,
|
value: valueOut,
|
||||||
}
|
}
|
||||||
_, err = internal.BPF(_MapLookupAndDeleteElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
_, err = internal.BPF(internal.BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
return err
|
return wrapMapError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint64) error {
|
func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint64) error {
|
||||||
@ -249,8 +272,8 @@ func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint
|
|||||||
value: valueOut,
|
value: valueOut,
|
||||||
flags: flags,
|
flags: flags,
|
||||||
}
|
}
|
||||||
_, err = internal.BPF(_MapUpdateElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
_, err = internal.BPF(internal.BPF_MAP_UPDATE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
return err
|
return wrapMapError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error {
|
func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error {
|
||||||
@ -263,8 +286,8 @@ func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error {
|
|||||||
mapFd: fd,
|
mapFd: fd,
|
||||||
key: key,
|
key: key,
|
||||||
}
|
}
|
||||||
_, err = internal.BPF(_MapDeleteElem, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
_, err = internal.BPF(internal.BPF_MAP_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
return err
|
return wrapMapError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error {
|
func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error {
|
||||||
@ -278,44 +301,58 @@ func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error {
|
|||||||
key: key,
|
key: key,
|
||||||
value: nextKeyOut,
|
value: nextKeyOut,
|
||||||
}
|
}
|
||||||
_, err = internal.BPF(_MapGetNextKey, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
_, err = internal.BPF(internal.BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
return wrapMapError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func objGetNextID(cmd internal.BPFCmd, start uint32) (uint32, error) {
|
||||||
|
attr := bpfObjGetNextIDAttr{
|
||||||
|
startID: start,
|
||||||
|
}
|
||||||
|
_, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
|
return attr.nextID, wrapObjError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapObjError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if errors.Is(err, unix.ENOENT) {
|
||||||
|
return fmt.Errorf("%w", ErrNotExist)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapMapError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, unix.ENOENT) {
|
||||||
|
return ErrKeyNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, unix.EEXIST) {
|
||||||
|
return ErrKeyExist
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func bpfMapFreeze(m *internal.FD) error {
|
||||||
|
fd, err := m.Value()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := bpfMapFreezeAttr{
|
||||||
|
mapFd: fd,
|
||||||
|
}
|
||||||
|
_, err = internal.BPF(internal.BPF_MAP_FREEZE, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
const bpfFSType = 0xcafe4a11
|
|
||||||
|
|
||||||
func bpfPinObject(fileName string, fd *internal.FD) error {
|
|
||||||
dirName := filepath.Dir(fileName)
|
|
||||||
var statfs unix.Statfs_t
|
|
||||||
if err := unix.Statfs(dirName, &statfs); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if uint64(statfs.Type) != bpfFSType {
|
|
||||||
return errors.Errorf("%s is not on a bpf filesystem", fileName)
|
|
||||||
}
|
|
||||||
|
|
||||||
value, err := fd.Value()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = internal.BPF(_ObjPin, unsafe.Pointer(&bpfPinObjAttr{
|
|
||||||
fileName: internal.NewStringPointer(fileName),
|
|
||||||
fd: value,
|
|
||||||
}), 16)
|
|
||||||
return errors.Wrapf(err, "pin object %s", fileName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func bpfGetObject(fileName string) (*internal.FD, error) {
|
|
||||||
ptr, err := internal.BPF(_ObjGet, unsafe.Pointer(&bpfPinObjAttr{
|
|
||||||
fileName: internal.NewStringPointer(fileName),
|
|
||||||
}), 16)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "get object %s", fileName)
|
|
||||||
}
|
|
||||||
return internal.NewFD(uint32(ptr)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func bpfGetObjectInfoByFD(fd *internal.FD, info unsafe.Pointer, size uintptr) error {
|
func bpfGetObjectInfoByFD(fd *internal.FD, info unsafe.Pointer, size uintptr) error {
|
||||||
value, err := fd.Value()
|
value, err := fd.Value()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -328,28 +365,51 @@ func bpfGetObjectInfoByFD(fd *internal.FD, info unsafe.Pointer, size uintptr) er
|
|||||||
infoLen: uint32(size),
|
infoLen: uint32(size),
|
||||||
info: internal.NewPointer(info),
|
info: internal.NewPointer(info),
|
||||||
}
|
}
|
||||||
_, err = internal.BPF(_ObjGetInfoByFD, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
_, err = internal.BPF(internal.BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
return errors.Wrapf(err, "fd %d", fd)
|
if err != nil {
|
||||||
|
return fmt.Errorf("fd %d: %w", fd, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) {
|
func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) {
|
||||||
var info bpfProgInfo
|
var info bpfProgInfo
|
||||||
err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
|
if err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil {
|
||||||
return &info, errors.Wrap(err, "can't get program info")
|
return nil, fmt.Errorf("can't get program info: %w", err)
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) {
|
func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) {
|
||||||
var info bpfMapInfo
|
var info bpfMapInfo
|
||||||
err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
|
err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
|
||||||
return &info, errors.Wrap(err, "can't get map info")
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't get map info: %w", err)
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var haveObjName = internal.FeatureTest("object names", "4.15", func() bool {
|
var haveObjName = internal.FeatureTest("object names", "4.15", func() (bool, error) {
|
||||||
name, err := newBPFObjName("feature_test")
|
attr := bpfMapCreateAttr{
|
||||||
|
mapType: Array,
|
||||||
|
keySize: 4,
|
||||||
|
valueSize: 4,
|
||||||
|
maxEntries: 1,
|
||||||
|
mapName: newBPFObjName("feature_test"),
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := bpfMapCreate(&attr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This really is a fatal error, but it should be caught
|
return false, nil
|
||||||
// by the unit tests not working.
|
}
|
||||||
return false
|
|
||||||
|
_ = fd.Close()
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() (bool, error) {
|
||||||
|
if err := haveObjName(); err != nil {
|
||||||
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
attr := bpfMapCreateAttr{
|
attr := bpfMapCreateAttr{
|
||||||
@ -357,38 +417,22 @@ var haveObjName = internal.FeatureTest("object names", "4.15", func() bool {
|
|||||||
keySize: 4,
|
keySize: 4,
|
||||||
valueSize: 4,
|
valueSize: 4,
|
||||||
maxEntries: 1,
|
maxEntries: 1,
|
||||||
mapName: name,
|
mapName: newBPFObjName(".test"),
|
||||||
}
|
}
|
||||||
|
|
||||||
fd, err := bpfMapCreate(&attr)
|
fd, err := bpfMapCreate(&attr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = fd.Close()
|
_ = fd.Close()
|
||||||
return true
|
return true, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
func bpfGetMapFDByID(id uint32) (*internal.FD, error) {
|
func bpfObjGetFDByID(cmd internal.BPFCmd, id uint32) (*internal.FD, error) {
|
||||||
// available from 4.13
|
|
||||||
attr := bpfGetFDByIDAttr{
|
attr := bpfGetFDByIDAttr{
|
||||||
id: id,
|
id: id,
|
||||||
}
|
}
|
||||||
ptr, err := internal.BPF(_MapGetFDByID, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
ptr, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||||
if err != nil {
|
return internal.NewFD(uint32(ptr)), wrapObjError(err)
|
||||||
return nil, errors.Wrapf(err, "can't get fd for map id %d", id)
|
|
||||||
}
|
|
||||||
return internal.NewFD(uint32(ptr)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func bpfGetProgramFDByID(id uint32) (*internal.FD, error) {
|
|
||||||
// available from 4.13
|
|
||||||
attr := bpfGetFDByIDAttr{
|
|
||||||
id: id,
|
|
||||||
}
|
|
||||||
ptr, err := internal.BPF(_ProgGetFDByID, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "can't get fd for program id %d", id)
|
|
||||||
}
|
|
||||||
return internal.NewFD(uint32(ptr)), nil
|
|
||||||
}
|
}
|
||||||
|
39
vendor/github.com/cilium/ebpf/types.go
generated
vendored
39
vendor/github.com/cilium/ebpf/types.go
generated
vendored
@ -1,6 +1,6 @@
|
|||||||
package ebpf
|
package ebpf
|
||||||
|
|
||||||
//go:generate stringer -output types_string.go -type=MapType,ProgramType
|
//go:generate stringer -output types_string.go -type=MapType,ProgramType,AttachType
|
||||||
|
|
||||||
// MapType indicates the type map structure
|
// MapType indicates the type map structure
|
||||||
// that will be initialized in the kernel.
|
// that will be initialized in the kernel.
|
||||||
@ -85,44 +85,12 @@ const (
|
|||||||
|
|
||||||
// hasPerCPUValue returns true if the Map stores a value per CPU.
|
// hasPerCPUValue returns true if the Map stores a value per CPU.
|
||||||
func (mt MapType) hasPerCPUValue() bool {
|
func (mt MapType) hasPerCPUValue() bool {
|
||||||
if mt == PerCPUHash || mt == PerCPUArray {
|
if mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
_MapCreate = iota
|
|
||||||
_MapLookupElem
|
|
||||||
_MapUpdateElem
|
|
||||||
_MapDeleteElem
|
|
||||||
_MapGetNextKey
|
|
||||||
_ProgLoad
|
|
||||||
_ObjPin
|
|
||||||
_ObjGet
|
|
||||||
_ProgAttach
|
|
||||||
_ProgDetach
|
|
||||||
_ProgTestRun
|
|
||||||
_ProgGetNextID
|
|
||||||
_MapGetNextID
|
|
||||||
_ProgGetFDByID
|
|
||||||
_MapGetFDByID
|
|
||||||
_ObjGetInfoByFD
|
|
||||||
_ProgQuery
|
|
||||||
_RawTracepointOpen
|
|
||||||
_BTFLoad
|
|
||||||
_BTFGetFDByID
|
|
||||||
_TaskFDQuery
|
|
||||||
_MapLookupAndDeleteElem
|
|
||||||
_MapFreeze
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
_Any = iota
|
|
||||||
_NoExist
|
|
||||||
_Exist
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProgramType of the eBPF program
|
// ProgramType of the eBPF program
|
||||||
type ProgramType uint32
|
type ProgramType uint32
|
||||||
|
|
||||||
@ -219,6 +187,9 @@ const (
|
|||||||
AttachTraceRawTp
|
AttachTraceRawTp
|
||||||
AttachTraceFEntry
|
AttachTraceFEntry
|
||||||
AttachTraceFExit
|
AttachTraceFExit
|
||||||
|
AttachModifyReturn
|
||||||
|
AttachLSMMac
|
||||||
|
AttachTraceIter
|
||||||
)
|
)
|
||||||
|
|
||||||
// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command
|
// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command
|
||||||
|
48
vendor/github.com/cilium/ebpf/types_string.go
generated
vendored
48
vendor/github.com/cilium/ebpf/types_string.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// Code generated by "stringer -output types_string.go -type=MapType,ProgramType"; DO NOT EDIT.
|
// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,AttachType"; DO NOT EDIT.
|
||||||
|
|
||||||
package ebpf
|
package ebpf
|
||||||
|
|
||||||
@ -89,3 +89,49 @@ func (i ProgramType) String() string {
|
|||||||
}
|
}
|
||||||
return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]]
|
return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]]
|
||||||
}
|
}
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[AttachNone-0]
|
||||||
|
_ = x[AttachCGroupInetIngress-0]
|
||||||
|
_ = x[AttachCGroupInetEgress-1]
|
||||||
|
_ = x[AttachCGroupInetSockCreate-2]
|
||||||
|
_ = x[AttachCGroupSockOps-3]
|
||||||
|
_ = x[AttachSkSKBStreamParser-4]
|
||||||
|
_ = x[AttachSkSKBStreamVerdict-5]
|
||||||
|
_ = x[AttachCGroupDevice-6]
|
||||||
|
_ = x[AttachSkMsgVerdict-7]
|
||||||
|
_ = x[AttachCGroupInet4Bind-8]
|
||||||
|
_ = x[AttachCGroupInet6Bind-9]
|
||||||
|
_ = x[AttachCGroupInet4Connect-10]
|
||||||
|
_ = x[AttachCGroupInet6Connect-11]
|
||||||
|
_ = x[AttachCGroupInet4PostBind-12]
|
||||||
|
_ = x[AttachCGroupInet6PostBind-13]
|
||||||
|
_ = x[AttachCGroupUDP4Sendmsg-14]
|
||||||
|
_ = x[AttachCGroupUDP6Sendmsg-15]
|
||||||
|
_ = x[AttachLircMode2-16]
|
||||||
|
_ = x[AttachFlowDissector-17]
|
||||||
|
_ = x[AttachCGroupSysctl-18]
|
||||||
|
_ = x[AttachCGroupUDP4Recvmsg-19]
|
||||||
|
_ = x[AttachCGroupUDP6Recvmsg-20]
|
||||||
|
_ = x[AttachCGroupGetsockopt-21]
|
||||||
|
_ = x[AttachCGroupSetsockopt-22]
|
||||||
|
_ = x[AttachTraceRawTp-23]
|
||||||
|
_ = x[AttachTraceFEntry-24]
|
||||||
|
_ = x[AttachTraceFExit-25]
|
||||||
|
_ = x[AttachModifyReturn-26]
|
||||||
|
_ = x[AttachLSMMac-27]
|
||||||
|
_ = x[AttachTraceIter-28]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _AttachType_name = "AttachNoneAttachCGroupInetEgressAttachCGroupInetSockCreateAttachCGroupSockOpsAttachSkSKBStreamParserAttachSkSKBStreamVerdictAttachCGroupDeviceAttachSkMsgVerdictAttachCGroupInet4BindAttachCGroupInet6BindAttachCGroupInet4ConnectAttachCGroupInet6ConnectAttachCGroupInet4PostBindAttachCGroupInet6PostBindAttachCGroupUDP4SendmsgAttachCGroupUDP6SendmsgAttachLircMode2AttachFlowDissectorAttachCGroupSysctlAttachCGroupUDP4RecvmsgAttachCGroupUDP6RecvmsgAttachCGroupGetsockoptAttachCGroupSetsockoptAttachTraceRawTpAttachTraceFEntryAttachTraceFExitAttachModifyReturnAttachLSMMacAttachTraceIter"
|
||||||
|
|
||||||
|
var _AttachType_index = [...]uint16{0, 10, 32, 58, 77, 100, 124, 142, 160, 181, 202, 226, 250, 275, 300, 323, 346, 361, 380, 398, 421, 444, 466, 488, 504, 521, 537, 555, 567, 582}
|
||||||
|
|
||||||
|
func (i AttachType) String() string {
|
||||||
|
if i >= AttachType(len(_AttachType_index)-1) {
|
||||||
|
return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]]
|
||||||
|
}
|
||||||
|
25
vendor/github.com/containerd/cgroups/README.md
generated
vendored
25
vendor/github.com/containerd/cgroups/README.md
generated
vendored
@ -1,6 +1,6 @@
|
|||||||
# cgroups
|
# cgroups
|
||||||
|
|
||||||
[](https://travis-ci.org/containerd/cgroups)
|
[](https://github.com/containerd/cgroups/actions?query=workflow%3ACI)
|
||||||
[](https://codecov.io/gh/containerd/cgroups)
|
[](https://codecov.io/gh/containerd/cgroups)
|
||||||
[](https://godoc.org/github.com/containerd/cgroups)
|
[](https://godoc.org/github.com/containerd/cgroups)
|
||||||
[](https://goreportcard.com/report/github.com/containerd/cgroups)
|
[](https://goreportcard.com/report/github.com/containerd/cgroups)
|
||||||
@ -65,7 +65,7 @@ To update the resources applied in the cgroup
|
|||||||
```go
|
```go
|
||||||
shares = uint64(200)
|
shares = uint64(200)
|
||||||
if err := control.Update(&specs.LinuxResources{
|
if err := control.Update(&specs.LinuxResources{
|
||||||
CPU: &specs.CPU{
|
CPU: &specs.LinuxCPU{
|
||||||
Shares: &shares,
|
Shares: &shares,
|
||||||
},
|
},
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
@ -112,6 +112,27 @@ err := control.MoveTo(destination)
|
|||||||
subCgroup, err := control.New("child", resources)
|
subCgroup, err := control.New("child", resources)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Registering for memory events
|
||||||
|
|
||||||
|
This allows you to get notified by an eventfd for v1 memory cgroups events.
|
||||||
|
|
||||||
|
```go
|
||||||
|
event := cgroups.MemoryThresholdEvent(50 * 1024 * 1024, false)
|
||||||
|
efd, err := control.RegisterMemoryEvent(event)
|
||||||
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
event := cgroups.MemoryPressureEvent(cgroups.MediumPressure, cgroups.DefaultMode)
|
||||||
|
efd, err := control.RegisterMemoryEvent(event)
|
||||||
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
efd, err := control.OOMEventFD()
|
||||||
|
// or by using RegisterMemoryEvent
|
||||||
|
event := cgroups.OOMEvent()
|
||||||
|
efd, err := control.RegisterMemoryEvent(event)
|
||||||
|
```
|
||||||
|
|
||||||
### Attention
|
### Attention
|
||||||
|
|
||||||
All static path should not include `/sys/fs/cgroup/` prefix, it should start with your own cgroups name
|
All static path should not include `/sys/fs/cgroup/` prefix, it should start with your own cgroups name
|
||||||
|
21
vendor/github.com/containerd/cgroups/cgroup.go
generated
vendored
21
vendor/github.com/containerd/cgroups/cgroup.go
generated
vendored
@ -458,7 +458,26 @@ func (c *cgroup) OOMEventFD() (uintptr, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return s.(*memoryController).OOMEventFD(sp)
|
return s.(*memoryController).memoryEvent(sp, OOMEvent())
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterMemoryEvent allows the ability to register for all v1 memory cgroups
|
||||||
|
// notifications.
|
||||||
|
func (c *cgroup) RegisterMemoryEvent(event MemoryEvent) (uintptr, error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.err != nil {
|
||||||
|
return 0, c.err
|
||||||
|
}
|
||||||
|
s := c.getSubsystem(Memory)
|
||||||
|
if s == nil {
|
||||||
|
return 0, ErrMemoryNotSupported
|
||||||
|
}
|
||||||
|
sp, err := c.path(Memory)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return s.(*memoryController).memoryEvent(sp, event)
|
||||||
}
|
}
|
||||||
|
|
||||||
// State returns the state of the cgroup and its processes
|
// State returns the state of the cgroup and its processes
|
||||||
|
3
vendor/github.com/containerd/cgroups/control.go
generated
vendored
3
vendor/github.com/containerd/cgroups/control.go
generated
vendored
@ -82,6 +82,9 @@ type Cgroup interface {
|
|||||||
Thaw() error
|
Thaw() error
|
||||||
// OOMEventFD returns the memory subsystem's event fd for OOM events
|
// OOMEventFD returns the memory subsystem's event fd for OOM events
|
||||||
OOMEventFD() (uintptr, error)
|
OOMEventFD() (uintptr, error)
|
||||||
|
// RegisterMemoryEvent returns the memory subsystems event fd for whatever memory event was
|
||||||
|
// registered for. Can alternatively register for the oom event with this method.
|
||||||
|
RegisterMemoryEvent(MemoryEvent) (uintptr, error)
|
||||||
// State returns the cgroups current state
|
// State returns the cgroups current state
|
||||||
State() State
|
State() State
|
||||||
// Subsystems returns all the subsystems in the cgroup
|
// Subsystems returns all the subsystems in the cgroup
|
||||||
|
2
vendor/github.com/containerd/cgroups/cpuset.go
generated
vendored
2
vendor/github.com/containerd/cgroups/cpuset.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
|||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewCputset(root string) *cpusetController {
|
func NewCpuset(root string) *cpusetController {
|
||||||
return &cpusetController{
|
return &cpusetController{
|
||||||
root: filepath.Join(root, string(Cpuset)),
|
root: filepath.Join(root, string(Cpuset)),
|
||||||
}
|
}
|
||||||
|
9
vendor/github.com/containerd/cgroups/go.mod
generated
vendored
9
vendor/github.com/containerd/cgroups/go.mod
generated
vendored
@ -3,17 +3,16 @@ module github.com/containerd/cgroups
|
|||||||
go 1.13
|
go 1.13
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3
|
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775
|
||||||
github.com/coreos/go-systemd/v22 v22.0.0
|
github.com/coreos/go-systemd/v22 v22.0.0
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||||
github.com/docker/go-units v0.4.0
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/godbus/dbus/v5 v5.0.3
|
github.com/godbus/dbus/v5 v5.0.3
|
||||||
github.com/gogo/protobuf v1.3.1
|
github.com/gogo/protobuf v1.3.1
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
github.com/opencontainers/runtime-spec v1.0.2
|
||||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
|
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/sirupsen/logrus v1.4.2
|
github.com/sirupsen/logrus v1.6.0
|
||||||
github.com/stretchr/testify v1.2.2
|
github.com/stretchr/testify v1.2.2
|
||||||
github.com/urfave/cli v1.22.2
|
github.com/urfave/cli v1.22.2
|
||||||
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479
|
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
|
||||||
)
|
)
|
||||||
|
2
vendor/github.com/containerd/cgroups/hierarchy.go
generated
vendored
2
vendor/github.com/containerd/cgroups/hierarchy.go
generated
vendored
@ -16,5 +16,5 @@
|
|||||||
|
|
||||||
package cgroups
|
package cgroups
|
||||||
|
|
||||||
// Hierarchy enableds both unified and split hierarchy for cgroups
|
// Hierarchy enables both unified and split hierarchy for cgroups
|
||||||
type Hierarchy func() ([]Subsystem, error)
|
type Hierarchy func() ([]Subsystem, error)
|
||||||
|
171
vendor/github.com/containerd/cgroups/memory.go
generated
vendored
171
vendor/github.com/containerd/cgroups/memory.go
generated
vendored
@ -32,6 +32,128 @@ import (
|
|||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// MemoryEvent is an interface that V1 memory Cgroup notifications implement. Arg returns the
|
||||||
|
// file name whose fd should be written to "cgroups.event_control". EventFile returns the name of
|
||||||
|
// the file that supports the notification api e.g. "memory.usage_in_bytes".
|
||||||
|
type MemoryEvent interface {
|
||||||
|
Arg() string
|
||||||
|
EventFile() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type memoryThresholdEvent struct {
|
||||||
|
threshold uint64
|
||||||
|
swap bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemoryThresholdEvent returns a new memory threshold event to be used with RegisterMemoryEvent.
|
||||||
|
// If swap is true, the event will be registered using memory.memsw.usage_in_bytes
|
||||||
|
func MemoryThresholdEvent(threshold uint64, swap bool) MemoryEvent {
|
||||||
|
return &memoryThresholdEvent{
|
||||||
|
threshold,
|
||||||
|
swap,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryThresholdEvent) Arg() string {
|
||||||
|
return strconv.FormatUint(m.threshold, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryThresholdEvent) EventFile() string {
|
||||||
|
if m.swap {
|
||||||
|
return "memory.memsw.usage_in_bytes"
|
||||||
|
}
|
||||||
|
return "memory.usage_in_bytes"
|
||||||
|
}
|
||||||
|
|
||||||
|
type oomEvent struct{}
|
||||||
|
|
||||||
|
// OOMEvent returns a new oom event to be used with RegisterMemoryEvent.
|
||||||
|
func OOMEvent() MemoryEvent {
|
||||||
|
return &oomEvent{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oom *oomEvent) Arg() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oom *oomEvent) EventFile() string {
|
||||||
|
return "memory.oom_control"
|
||||||
|
}
|
||||||
|
|
||||||
|
type memoryPressureEvent struct {
|
||||||
|
pressureLevel MemoryPressureLevel
|
||||||
|
hierarchy EventNotificationMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemoryPressureEvent returns a new memory pressure event to be used with RegisterMemoryEvent.
|
||||||
|
func MemoryPressureEvent(pressureLevel MemoryPressureLevel, hierarchy EventNotificationMode) MemoryEvent {
|
||||||
|
return &memoryPressureEvent{
|
||||||
|
pressureLevel,
|
||||||
|
hierarchy,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryPressureEvent) Arg() string {
|
||||||
|
return string(m.pressureLevel) + "," + string(m.hierarchy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoryPressureEvent) EventFile() string {
|
||||||
|
return "memory.pressure_level"
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemoryPressureLevel corresponds to the memory pressure levels defined
|
||||||
|
// for memory cgroups.
|
||||||
|
type MemoryPressureLevel string
|
||||||
|
|
||||||
|
// The three memory pressure levels are as follows.
|
||||||
|
// - The "low" level means that the system is reclaiming memory for new
|
||||||
|
// allocations. Monitoring this reclaiming activity might be useful for
|
||||||
|
// maintaining cache level. Upon notification, the program (typically
|
||||||
|
// "Activity Manager") might analyze vmstat and act in advance (i.e.
|
||||||
|
// prematurely shutdown unimportant services).
|
||||||
|
// - The "medium" level means that the system is experiencing medium memory
|
||||||
|
// pressure, the system might be making swap, paging out active file caches,
|
||||||
|
// etc. Upon this event applications may decide to further analyze
|
||||||
|
// vmstat/zoneinfo/memcg or internal memory usage statistics and free any
|
||||||
|
// resources that can be easily reconstructed or re-read from a disk.
|
||||||
|
// - The "critical" level means that the system is actively thrashing, it is
|
||||||
|
// about to out of memory (OOM) or even the in-kernel OOM killer is on its
|
||||||
|
// way to trigger. Applications should do whatever they can to help the
|
||||||
|
// system. It might be too late to consult with vmstat or any other
|
||||||
|
// statistics, so it is advisable to take an immediate action.
|
||||||
|
// "https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt" Section 11
|
||||||
|
const (
|
||||||
|
LowPressure MemoryPressureLevel = "low"
|
||||||
|
MediumPressure MemoryPressureLevel = "medium"
|
||||||
|
CriticalPressure MemoryPressureLevel = "critical"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EventNotificationMode corresponds to the notification modes
|
||||||
|
// for the memory cgroups pressure level notifications.
|
||||||
|
type EventNotificationMode string
|
||||||
|
|
||||||
|
// There are three optional modes that specify different propagation behavior:
|
||||||
|
// - "default": this is the default behavior specified above. This mode is the
|
||||||
|
// same as omitting the optional mode parameter, preserved by backwards
|
||||||
|
// compatibility.
|
||||||
|
// - "hierarchy": events always propagate up to the root, similar to the default
|
||||||
|
// behavior, except that propagation continues regardless of whether there are
|
||||||
|
// event listeners at each level, with the "hierarchy" mode. In the above
|
||||||
|
// example, groups A, B, and C will receive notification of memory pressure.
|
||||||
|
// - "local": events are pass-through, i.e. they only receive notifications when
|
||||||
|
// memory pressure is experienced in the memcg for which the notification is
|
||||||
|
// registered. In the above example, group C will receive notification if
|
||||||
|
// registered for "local" notification and the group experiences memory
|
||||||
|
// pressure. However, group B will never receive notification, regardless if
|
||||||
|
// there is an event listener for group C or not, if group B is registered for
|
||||||
|
// local notification.
|
||||||
|
// "https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt" Section 11
|
||||||
|
const (
|
||||||
|
DefaultMode EventNotificationMode = "default"
|
||||||
|
LocalMode EventNotificationMode = "local"
|
||||||
|
HierarchyMode EventNotificationMode = "hierarchy"
|
||||||
|
)
|
||||||
|
|
||||||
// NewMemory returns a Memory controller given the root folder of cgroups.
|
// NewMemory returns a Memory controller given the root folder of cgroups.
|
||||||
// It may optionally accept other configuration options, such as IgnoreModules(...)
|
// It may optionally accept other configuration options, such as IgnoreModules(...)
|
||||||
func NewMemory(root string, options ...func(*memoryController)) *memoryController {
|
func NewMemory(root string, options ...func(*memoryController)) *memoryController {
|
||||||
@ -201,34 +323,6 @@ func (m *memoryController) Stat(path string, stats *v1.Metrics) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *memoryController) OOMEventFD(path string) (uintptr, error) {
|
|
||||||
root := m.Path(path)
|
|
||||||
f, err := os.Open(filepath.Join(root, "memory.oom_control"))
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
fd, _, serr := unix.RawSyscall(unix.SYS_EVENTFD2, 0, unix.EFD_CLOEXEC, 0)
|
|
||||||
if serr != 0 {
|
|
||||||
return 0, serr
|
|
||||||
}
|
|
||||||
if err := writeEventFD(root, f.Fd(), fd); err != nil {
|
|
||||||
unix.Close(int(fd))
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return fd, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeEventFD(root string, cfd, efd uintptr) error {
|
|
||||||
f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd))
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *memoryController) parseStats(r io.Reader, stat *v1.MemoryStat) error {
|
func (m *memoryController) parseStats(r io.Reader, stat *v1.MemoryStat) error {
|
||||||
var (
|
var (
|
||||||
raw = make(map[string]uint64)
|
raw = make(map[string]uint64)
|
||||||
@ -359,3 +453,24 @@ func getOomControlValue(mem *specs.LinuxMemory) *int64 {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *memoryController) memoryEvent(path string, event MemoryEvent) (uintptr, error) {
|
||||||
|
root := m.Path(path)
|
||||||
|
efd, err := unix.Eventfd(0, unix.EFD_CLOEXEC)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
evtFile, err := os.Open(filepath.Join(root, event.EventFile()))
|
||||||
|
if err != nil {
|
||||||
|
unix.Close(efd)
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer evtFile.Close()
|
||||||
|
data := fmt.Sprintf("%d %d %s", efd, evtFile.Fd(), event.Arg())
|
||||||
|
evctlPath := filepath.Join(root, "cgroup.event_control")
|
||||||
|
if err := ioutil.WriteFile(evctlPath, []byte(data), 0700); err != nil {
|
||||||
|
unix.Close(efd)
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uintptr(efd), nil
|
||||||
|
}
|
||||||
|
2
vendor/github.com/containerd/cgroups/utils.go
generated
vendored
2
vendor/github.com/containerd/cgroups/utils.go
generated
vendored
@ -121,7 +121,7 @@ func defaults(root string) ([]Subsystem, error) {
|
|||||||
NewNetCls(root),
|
NewNetCls(root),
|
||||||
NewNetPrio(root),
|
NewNetPrio(root),
|
||||||
NewPerfEvent(root),
|
NewPerfEvent(root),
|
||||||
NewCputset(root),
|
NewCpuset(root),
|
||||||
NewCpu(root),
|
NewCpu(root),
|
||||||
NewCpuacct(root),
|
NewCpuacct(root),
|
||||||
NewMemory(root),
|
NewMemory(root),
|
||||||
|
64
vendor/github.com/containerd/cgroups/v2/manager.go
generated
vendored
64
vendor/github.com/containerd/cgroups/v2/manager.go
generated
vendored
@ -166,6 +166,9 @@ func writeValues(path string, values []Value) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewManager(mountpoint string, group string, resources *Resources) (*Manager, error) {
|
func NewManager(mountpoint string, group string, resources *Resources) (*Manager, error) {
|
||||||
|
if resources == nil {
|
||||||
|
return nil, errors.New("resources reference is nil")
|
||||||
|
}
|
||||||
if err := VerifyGroupPath(group); err != nil {
|
if err := VerifyGroupPath(group); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -376,6 +379,12 @@ func (c *Manager) Stat() (*stats.Metrics, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
memoryEvents := make(map[string]interface{})
|
||||||
|
if err := readKVStatsFile(c.path, "memory.events", memoryEvents); err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
var metrics stats.Metrics
|
var metrics stats.Metrics
|
||||||
|
|
||||||
metrics.Pids = &stats.PidsStat{
|
metrics.Pids = &stats.PidsStat{
|
||||||
@ -427,7 +436,15 @@ func (c *Manager) Stat() (*stats.Metrics, error) {
|
|||||||
SwapUsage: getStatFileContentUint64(filepath.Join(c.path, "memory.swap.current")),
|
SwapUsage: getStatFileContentUint64(filepath.Join(c.path, "memory.swap.current")),
|
||||||
SwapLimit: getStatFileContentUint64(filepath.Join(c.path, "memory.swap.max")),
|
SwapLimit: getStatFileContentUint64(filepath.Join(c.path, "memory.swap.max")),
|
||||||
}
|
}
|
||||||
|
if len(memoryEvents) > 0 {
|
||||||
|
metrics.MemoryEvents = &stats.MemoryEvents{
|
||||||
|
Low: getUint64Value("low", memoryEvents),
|
||||||
|
High: getUint64Value("high", memoryEvents),
|
||||||
|
Max: getUint64Value("max", memoryEvents),
|
||||||
|
Oom: getUint64Value("oom", memoryEvents),
|
||||||
|
OomKill: getUint64Value("oom_kill", memoryEvents),
|
||||||
|
}
|
||||||
|
}
|
||||||
metrics.Io = &stats.IOStat{Usage: readIoStats(c.path)}
|
metrics.Io = &stats.IOStat{Usage: readIoStats(c.path)}
|
||||||
metrics.Rdma = &stats.RdmaStat{
|
metrics.Rdma = &stats.RdmaStat{
|
||||||
Current: rdmaStats(filepath.Join(c.path, "rdma.current")),
|
Current: rdmaStats(filepath.Join(c.path, "rdma.current")),
|
||||||
@ -572,15 +589,44 @@ func (c *Manager) waitForEvents(ec chan<- Event, errCh chan<- error) {
|
|||||||
errCh <- err
|
errCh <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var out map[string]interface{}
|
|
||||||
if bytesRead >= syscall.SizeofInotifyEvent {
|
if bytesRead >= syscall.SizeofInotifyEvent {
|
||||||
if err := readKVStatsFile(c.path, "memory.events", out); err != nil {
|
out := make(map[string]interface{})
|
||||||
e := Event{
|
if err := readKVStatsFile(c.path, "memory.events", out); err == nil {
|
||||||
High: out["high"].(uint64),
|
e := Event{}
|
||||||
Low: out["low"].(uint64),
|
if v, ok := out["high"]; ok {
|
||||||
Max: out["max"].(uint64),
|
e.High, ok = v.(uint64)
|
||||||
OOM: out["oom"].(uint64),
|
if !ok {
|
||||||
OOMKill: out["oom_kill"].(uint64),
|
errCh <- errors.Errorf("cannot convert high to uint64: %+v", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := out["low"]; ok {
|
||||||
|
e.Low, ok = v.(uint64)
|
||||||
|
if !ok {
|
||||||
|
errCh <- errors.Errorf("cannot convert low to uint64: %+v", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := out["max"]; ok {
|
||||||
|
e.Max, ok = v.(uint64)
|
||||||
|
if !ok {
|
||||||
|
errCh <- errors.Errorf("cannot convert max to uint64: %+v", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := out["oom"]; ok {
|
||||||
|
e.OOM, ok = v.(uint64)
|
||||||
|
if !ok {
|
||||||
|
errCh <- errors.Errorf("cannot convert oom to uint64: %+v", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := out["oom_kill"]; ok {
|
||||||
|
e.OOMKill, ok = v.(uint64)
|
||||||
|
if !ok {
|
||||||
|
errCh <- errors.Errorf("cannot convert oom_kill to uint64: %+v", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ec <- e
|
ec <- e
|
||||||
} else {
|
} else {
|
||||||
|
490
vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.go
generated
vendored
490
vendor/github.com/containerd/cgroups/v2/stats/metrics.pb.go
generated
vendored
@ -31,6 +31,7 @@ type Metrics struct {
|
|||||||
Rdma *RdmaStat `protobuf:"bytes,5,opt,name=rdma,proto3" json:"rdma,omitempty"`
|
Rdma *RdmaStat `protobuf:"bytes,5,opt,name=rdma,proto3" json:"rdma,omitempty"`
|
||||||
Io *IOStat `protobuf:"bytes,6,opt,name=io,proto3" json:"io,omitempty"`
|
Io *IOStat `protobuf:"bytes,6,opt,name=io,proto3" json:"io,omitempty"`
|
||||||
Hugetlb []*HugeTlbStat `protobuf:"bytes,7,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"`
|
Hugetlb []*HugeTlbStat `protobuf:"bytes,7,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"`
|
||||||
|
MemoryEvents *MemoryEvents `protobuf:"bytes,8,opt,name=memory_events,json=memoryEvents,proto3" json:"memory_events,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_sizecache int32 `json:"-"`
|
||||||
@ -225,6 +226,49 @@ func (m *MemoryStat) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_MemoryStat proto.InternalMessageInfo
|
var xxx_messageInfo_MemoryStat proto.InternalMessageInfo
|
||||||
|
|
||||||
|
type MemoryEvents struct {
|
||||||
|
Low uint64 `protobuf:"varint,1,opt,name=low,proto3" json:"low,omitempty"`
|
||||||
|
High uint64 `protobuf:"varint,2,opt,name=high,proto3" json:"high,omitempty"`
|
||||||
|
Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"`
|
||||||
|
Oom uint64 `protobuf:"varint,4,opt,name=oom,proto3" json:"oom,omitempty"`
|
||||||
|
OomKill uint64 `protobuf:"varint,5,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemoryEvents) Reset() { *m = MemoryEvents{} }
|
||||||
|
func (*MemoryEvents) ProtoMessage() {}
|
||||||
|
func (*MemoryEvents) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_2fc6005842049e6b, []int{4}
|
||||||
|
}
|
||||||
|
func (m *MemoryEvents) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *MemoryEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
if deterministic {
|
||||||
|
return xxx_messageInfo_MemoryEvents.Marshal(b, m, deterministic)
|
||||||
|
} else {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalTo(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (m *MemoryEvents) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_MemoryEvents.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *MemoryEvents) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *MemoryEvents) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_MemoryEvents.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_MemoryEvents proto.InternalMessageInfo
|
||||||
|
|
||||||
type RdmaStat struct {
|
type RdmaStat struct {
|
||||||
Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"`
|
Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"`
|
||||||
Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"`
|
Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"`
|
||||||
@ -236,7 +280,7 @@ type RdmaStat struct {
|
|||||||
func (m *RdmaStat) Reset() { *m = RdmaStat{} }
|
func (m *RdmaStat) Reset() { *m = RdmaStat{} }
|
||||||
func (*RdmaStat) ProtoMessage() {}
|
func (*RdmaStat) ProtoMessage() {}
|
||||||
func (*RdmaStat) Descriptor() ([]byte, []int) {
|
func (*RdmaStat) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_2fc6005842049e6b, []int{4}
|
return fileDescriptor_2fc6005842049e6b, []int{5}
|
||||||
}
|
}
|
||||||
func (m *RdmaStat) XXX_Unmarshal(b []byte) error {
|
func (m *RdmaStat) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -277,7 +321,7 @@ type RdmaEntry struct {
|
|||||||
func (m *RdmaEntry) Reset() { *m = RdmaEntry{} }
|
func (m *RdmaEntry) Reset() { *m = RdmaEntry{} }
|
||||||
func (*RdmaEntry) ProtoMessage() {}
|
func (*RdmaEntry) ProtoMessage() {}
|
||||||
func (*RdmaEntry) Descriptor() ([]byte, []int) {
|
func (*RdmaEntry) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_2fc6005842049e6b, []int{5}
|
return fileDescriptor_2fc6005842049e6b, []int{6}
|
||||||
}
|
}
|
||||||
func (m *RdmaEntry) XXX_Unmarshal(b []byte) error {
|
func (m *RdmaEntry) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -316,7 +360,7 @@ type IOStat struct {
|
|||||||
func (m *IOStat) Reset() { *m = IOStat{} }
|
func (m *IOStat) Reset() { *m = IOStat{} }
|
||||||
func (*IOStat) ProtoMessage() {}
|
func (*IOStat) ProtoMessage() {}
|
||||||
func (*IOStat) Descriptor() ([]byte, []int) {
|
func (*IOStat) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_2fc6005842049e6b, []int{6}
|
return fileDescriptor_2fc6005842049e6b, []int{7}
|
||||||
}
|
}
|
||||||
func (m *IOStat) XXX_Unmarshal(b []byte) error {
|
func (m *IOStat) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -360,7 +404,7 @@ type IOEntry struct {
|
|||||||
func (m *IOEntry) Reset() { *m = IOEntry{} }
|
func (m *IOEntry) Reset() { *m = IOEntry{} }
|
||||||
func (*IOEntry) ProtoMessage() {}
|
func (*IOEntry) ProtoMessage() {}
|
||||||
func (*IOEntry) Descriptor() ([]byte, []int) {
|
func (*IOEntry) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_2fc6005842049e6b, []int{7}
|
return fileDescriptor_2fc6005842049e6b, []int{8}
|
||||||
}
|
}
|
||||||
func (m *IOEntry) XXX_Unmarshal(b []byte) error {
|
func (m *IOEntry) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -401,7 +445,7 @@ type HugeTlbStat struct {
|
|||||||
func (m *HugeTlbStat) Reset() { *m = HugeTlbStat{} }
|
func (m *HugeTlbStat) Reset() { *m = HugeTlbStat{} }
|
||||||
func (*HugeTlbStat) ProtoMessage() {}
|
func (*HugeTlbStat) ProtoMessage() {}
|
||||||
func (*HugeTlbStat) Descriptor() ([]byte, []int) {
|
func (*HugeTlbStat) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_2fc6005842049e6b, []int{8}
|
return fileDescriptor_2fc6005842049e6b, []int{9}
|
||||||
}
|
}
|
||||||
func (m *HugeTlbStat) XXX_Unmarshal(b []byte) error {
|
func (m *HugeTlbStat) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -435,6 +479,7 @@ func init() {
|
|||||||
proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v2.PidsStat")
|
proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v2.PidsStat")
|
||||||
proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v2.CPUStat")
|
proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v2.CPUStat")
|
||||||
proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v2.MemoryStat")
|
proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v2.MemoryStat")
|
||||||
|
proto.RegisterType((*MemoryEvents)(nil), "io.containerd.cgroups.v2.MemoryEvents")
|
||||||
proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v2.RdmaStat")
|
proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v2.RdmaStat")
|
||||||
proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v2.RdmaEntry")
|
proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v2.RdmaEntry")
|
||||||
proto.RegisterType((*IOStat)(nil), "io.containerd.cgroups.v2.IOStat")
|
proto.RegisterType((*IOStat)(nil), "io.containerd.cgroups.v2.IOStat")
|
||||||
@ -447,77 +492,82 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor_2fc6005842049e6b = []byte{
|
var fileDescriptor_2fc6005842049e6b = []byte{
|
||||||
// 1118 bytes of a gzipped FileDescriptorProto
|
// 1198 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4d, 0x6f, 0x1c, 0x45,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4d, 0x73, 0xd4, 0x46,
|
||||||
0x10, 0xcd, 0xda, 0x9b, 0xfd, 0xe8, 0xb5, 0x13, 0xa7, 0xe3, 0x84, 0x4e, 0x42, 0xd6, 0xf6, 0x86,
|
0x13, 0x66, 0xed, 0xc5, 0xeb, 0xed, 0xb5, 0xc1, 0x0c, 0x86, 0x57, 0xc0, 0xcb, 0xda, 0x5e, 0x02,
|
||||||
0xa0, 0x20, 0xc1, 0x2e, 0x32, 0x5f, 0x02, 0x05, 0x21, 0x27, 0x10, 0x05, 0x09, 0x13, 0x6b, 0xe2,
|
0x45, 0xaa, 0x92, 0xdd, 0x94, 0xf3, 0x55, 0x49, 0x91, 0x4a, 0x19, 0x02, 0x45, 0x8a, 0x10, 0x5c,
|
||||||
0x15, 0xc7, 0x51, 0xef, 0x4c, 0x7b, 0x76, 0xec, 0xf9, 0x52, 0x77, 0x8f, 0xcd, 0xe6, 0xc4, 0x81,
|
0x02, 0x57, 0x8e, 0xaa, 0x59, 0x69, 0x2c, 0x0d, 0x96, 0x34, 0xaa, 0x99, 0x91, 0x1d, 0x73, 0xca,
|
||||||
0x2b, 0xe2, 0x6f, 0xe5, 0x06, 0x47, 0x4e, 0x88, 0xf8, 0xc4, 0xcf, 0x40, 0x55, 0xd5, 0xb3, 0x33,
|
0x21, 0xd7, 0x54, 0x7e, 0x4d, 0xfe, 0x03, 0xb7, 0xe4, 0x98, 0x53, 0x2a, 0xf8, 0x97, 0xa4, 0xba,
|
||||||
0x1c, 0x6c, 0xb8, 0x75, 0xbd, 0x7a, 0xaf, 0xa6, 0xfa, 0xf5, 0x76, 0xf5, 0xb2, 0x8f, 0xa3, 0xd8,
|
0x67, 0x64, 0x29, 0x07, 0x43, 0x6e, 0xd3, 0x4f, 0x3f, 0xdd, 0xea, 0x8f, 0x99, 0x6e, 0xc1, 0x27,
|
||||||
0xce, 0xcb, 0xd9, 0x38, 0xc8, 0xd3, 0x49, 0x90, 0x67, 0x56, 0xc6, 0x99, 0xd2, 0xe1, 0x24, 0x88,
|
0xa9, 0xb4, 0x59, 0x3d, 0x9f, 0xc6, 0xaa, 0x98, 0xc5, 0xaa, 0xb4, 0x5c, 0x96, 0x42, 0x27, 0xb3,
|
||||||
0x74, 0x5e, 0x16, 0x66, 0x72, 0xba, 0x3b, 0x31, 0x56, 0x5a, 0x33, 0x49, 0x95, 0xd5, 0x71, 0x60,
|
0x38, 0xd5, 0xaa, 0xae, 0xcc, 0xec, 0x70, 0x7b, 0x66, 0x2c, 0xb7, 0x66, 0x56, 0x08, 0xab, 0x65,
|
||||||
0xc6, 0x85, 0xce, 0x6d, 0xce, 0x45, 0x9c, 0x8f, 0x6b, 0xf6, 0xd8, 0xb1, 0xc7, 0xa7, 0xbb, 0x77,
|
0x6c, 0xa6, 0x95, 0x56, 0x56, 0xb1, 0x40, 0xaa, 0x69, 0xcb, 0x9e, 0x7a, 0xf6, 0xf4, 0x70, 0xfb,
|
||||||
0x37, 0xa3, 0x3c, 0xca, 0x91, 0x34, 0x81, 0x15, 0xf1, 0x47, 0x7f, 0xaf, 0xb0, 0xee, 0x3e, 0x55,
|
0xfa, 0x7a, 0xaa, 0x52, 0x45, 0xa4, 0x19, 0x9e, 0x1c, 0x7f, 0xf2, 0xdb, 0x22, 0x0c, 0x9e, 0x3a,
|
||||||
0xe0, 0x9f, 0xb2, 0x76, 0x11, 0x87, 0x46, 0xb4, 0xb6, 0x5b, 0x8f, 0x06, 0xbb, 0xa3, 0xf1, 0x45,
|
0x0f, 0xec, 0x33, 0xe8, 0x57, 0x32, 0x31, 0x41, 0x6f, 0xb3, 0x77, 0x77, 0xb4, 0x3d, 0x99, 0x9e,
|
||||||
0xa5, 0xc6, 0x07, 0x71, 0x68, 0x5e, 0x5a, 0x69, 0x3d, 0xe4, 0xf3, 0xc7, 0x6c, 0x35, 0x28, 0x4a,
|
0xe5, 0x6a, 0xba, 0x2b, 0x13, 0xf3, 0xdc, 0x72, 0x1b, 0x12, 0x9f, 0xdd, 0x83, 0xc5, 0xb8, 0xaa,
|
||||||
0xb1, 0x82, 0xb2, 0x9d, 0x8b, 0x65, 0x4f, 0x0f, 0xa6, 0xa0, 0x7a, 0xd2, 0x3d, 0xff, 0x73, 0x6b,
|
0x83, 0x05, 0x32, 0xdb, 0x3a, 0xdb, 0xec, 0xc1, 0xee, 0x1e, 0x5a, 0xdd, 0x1f, 0x9c, 0xfc, 0xb5,
|
||||||
0xf5, 0xe9, 0xc1, 0xd4, 0x03, 0x19, 0x7f, 0xcc, 0x3a, 0xa9, 0x4a, 0x73, 0xbd, 0x10, 0x6d, 0x2c,
|
0xb1, 0xf8, 0x60, 0x77, 0x2f, 0x44, 0x33, 0x76, 0x0f, 0x96, 0x0a, 0x51, 0x28, 0x7d, 0x1c, 0xf4,
|
||||||
0xf0, 0xce, 0xc5, 0x05, 0xf6, 0x91, 0x87, 0x5f, 0x76, 0x1a, 0xe8, 0x59, 0x87, 0xa9, 0x14, 0x57,
|
0xc9, 0xc1, 0x7b, 0x67, 0x3b, 0x78, 0x4a, 0x3c, 0xfa, 0xb2, 0xb7, 0xc1, 0x98, 0x75, 0x52, 0xf0,
|
||||||
0xff, 0xab, 0x67, 0x2f, 0x4c, 0x25, 0xf5, 0x0c, 0x7c, 0xfe, 0x21, 0x5b, 0x89, 0x73, 0xd1, 0x41,
|
0xe0, 0xfc, 0xbb, 0x62, 0x0e, 0x93, 0x82, 0xbb, 0x98, 0x91, 0xcf, 0x3e, 0x82, 0x05, 0xa9, 0x82,
|
||||||
0xd5, 0xf6, 0xc5, 0xaa, 0x6f, 0x5f, 0xa0, 0x66, 0x25, 0xce, 0xf9, 0x57, 0xac, 0x3b, 0x2f, 0x23,
|
0x25, 0xb2, 0xda, 0x3c, 0xdb, 0xea, 0xdb, 0x67, 0x64, 0xb3, 0x20, 0x15, 0xfb, 0x1a, 0x06, 0x59,
|
||||||
0x65, 0x93, 0x99, 0xe8, 0x6e, 0xaf, 0x3e, 0x1a, 0xec, 0x3e, 0xbc, 0x58, 0xf6, 0xbc, 0x8c, 0xd4,
|
0x9d, 0x0a, 0x9b, 0xcf, 0x83, 0xc1, 0xe6, 0xe2, 0xdd, 0xd1, 0xf6, 0xed, 0xb3, 0xcd, 0x1e, 0xd7,
|
||||||
0x61, 0x32, 0x43, 0x6d, 0xa5, 0x1a, 0x7d, 0xc1, 0x7a, 0x95, 0x71, 0x5c, 0xb0, 0x6e, 0x50, 0x6a,
|
0xa9, 0x78, 0x91, 0xcf, 0xc9, 0xb6, 0xb1, 0x62, 0x4f, 0x60, 0xd5, 0x05, 0x1d, 0x89, 0x43, 0x51,
|
||||||
0xad, 0x32, 0x8b, 0x6e, 0xb7, 0xbd, 0x2a, 0xe4, 0x9b, 0xec, 0x6a, 0x12, 0xa7, 0xb1, 0x45, 0x3b,
|
0x5a, 0x13, 0x2c, 0xd3, 0xd7, 0xef, 0xbc, 0x2b, 0xdf, 0x87, 0xc4, 0x0e, 0x57, 0x8a, 0x8e, 0x34,
|
||||||
0xdb, 0x1e, 0x05, 0xa3, 0xdf, 0x5a, 0xac, 0xeb, 0xec, 0xe3, 0xf7, 0x19, 0x2b, 0x8d, 0x8c, 0x94,
|
0xf9, 0x12, 0x96, 0x9b, 0x2e, 0xb0, 0x00, 0x06, 0x71, 0xad, 0xb5, 0x28, 0x2d, 0xb5, 0xae, 0x1f,
|
||||||
0x5f, 0x1a, 0x15, 0x38, 0x79, 0x1f, 0x91, 0xa9, 0x51, 0x01, 0xbf, 0xc7, 0xfa, 0xa5, 0x51, 0x9a,
|
0x36, 0x22, 0x5b, 0x87, 0xf3, 0xb9, 0x2c, 0xa4, 0xa5, 0xde, 0xf4, 0x43, 0x27, 0x4c, 0x7e, 0xef,
|
||||||
0xb2, 0x54, 0xa4, 0x07, 0x00, 0x26, 0xb7, 0xd8, 0xc0, 0x2c, 0x8c, 0x55, 0x29, 0xa5, 0x57, 0x31,
|
0xc1, 0xc0, 0xf7, 0x82, 0xdd, 0x04, 0xa8, 0x0d, 0x4f, 0x45, 0x54, 0x1b, 0x11, 0x7b, 0xf3, 0x21,
|
||||||
0xcd, 0x08, 0x42, 0xc2, 0x7d, 0xc6, 0x32, 0xed, 0x17, 0x4a, 0xc7, 0x79, 0x68, 0xf0, 0x44, 0xda,
|
0x21, 0x7b, 0x46, 0xc4, 0xec, 0x06, 0x0c, 0x6b, 0x23, 0xb4, 0xd3, 0x3a, 0x27, 0xcb, 0x08, 0x90,
|
||||||
0x5e, 0x3f, 0xd3, 0x07, 0x04, 0xf0, 0x1d, 0xb6, 0x96, 0x69, 0xdf, 0xce, 0x75, 0x6e, 0x6d, 0xa2,
|
0x72, 0x03, 0x46, 0xe6, 0xd8, 0x58, 0x51, 0x38, 0xf5, 0x22, 0xa9, 0xc1, 0x41, 0x44, 0xb8, 0x09,
|
||||||
0x42, 0xb4, 0xbd, 0xed, 0x0d, 0x32, 0x7d, 0x58, 0x41, 0xfc, 0x21, 0xbb, 0xb6, 0xcc, 0xd3, 0x57,
|
0x50, 0xea, 0xa8, 0x12, 0x5a, 0xaa, 0xc4, 0x50, 0x7b, 0xfb, 0xe1, 0xb0, 0xd4, 0xbb, 0x0e, 0x60,
|
||||||
0x3a, 0x48, 0x5a, 0x5f, 0xa2, 0xf0, 0xa1, 0xd1, 0xaf, 0x7d, 0xc6, 0xea, 0xf3, 0xe4, 0x9c, 0xb5,
|
0x5b, 0xb0, 0x52, 0xea, 0xc8, 0x66, 0x5a, 0x59, 0x9b, 0x8b, 0x84, 0x7a, 0xd8, 0x0f, 0x47, 0xa5,
|
||||||
0x65, 0x96, 0x67, 0x6e, 0x3b, 0xb8, 0x06, 0xec, 0x28, 0x4e, 0x94, 0xdb, 0x04, 0xae, 0xa1, 0x81,
|
0x7e, 0xd1, 0x40, 0xec, 0x36, 0x5c, 0x38, 0xd5, 0xbb, 0xaf, 0x2c, 0x11, 0x69, 0xf5, 0x14, 0xc5,
|
||||||
0x13, 0xa5, 0x33, 0x95, 0xf8, 0xc6, 0xca, 0xe0, 0xc4, 0xed, 0x60, 0x40, 0xd8, 0x4b, 0x80, 0x40,
|
0x0f, 0x4d, 0x7e, 0x1d, 0x02, 0xb4, 0x97, 0x83, 0x31, 0xe8, 0xf3, 0x52, 0x95, 0x3e, 0x1d, 0x3a,
|
||||||
0x66, 0x12, 0x39, 0x73, 0xcd, 0xe3, 0x1a, 0xb1, 0x3c, 0x38, 0x71, 0xfd, 0xe2, 0x1a, 0x9c, 0x36,
|
0x23, 0xb6, 0x2f, 0x73, 0xe1, 0x93, 0xa0, 0x33, 0x06, 0x70, 0x20, 0x74, 0x29, 0xf2, 0xc8, 0x58,
|
||||||
0xf3, 0x54, 0xa5, 0xae, 0x3f, 0x0a, 0xc0, 0x21, 0xf8, 0x90, 0x9f, 0xca, 0xa2, 0x50, 0xa1, 0xe8,
|
0x1e, 0x1f, 0xf8, 0x0c, 0x46, 0x0e, 0x7b, 0x8e, 0x10, 0x9a, 0x99, 0x9c, 0xcf, 0x7d, 0xf0, 0x74,
|
||||||
0x92, 0x43, 0x00, 0xed, 0x23, 0x02, 0x0e, 0x21, 0x21, 0x8c, 0xb5, 0x5d, 0x88, 0x1e, 0x39, 0x04,
|
0x26, 0x4c, 0xc5, 0x07, 0x3e, 0x5e, 0x3a, 0x63, 0xa5, 0x4d, 0x56, 0x88, 0xc2, 0xc7, 0xe7, 0x04,
|
||||||
0xc8, 0xd7, 0x00, 0xc0, 0xf6, 0x31, 0x7d, 0xa6, 0x63, 0xab, 0x66, 0xd0, 0x62, 0x9f, 0xb6, 0x0f,
|
0xac, 0x10, 0x7e, 0x28, 0x2a, 0x78, 0x55, 0x89, 0x24, 0x18, 0xb8, 0x0a, 0x21, 0xf4, 0x94, 0x10,
|
||||||
0xe8, 0x0f, 0x15, 0xc8, 0xef, 0xb0, 0x1e, 0xec, 0xd1, 0xb7, 0xf3, 0x42, 0x30, 0xfa, 0x05, 0x40,
|
0xac, 0x10, 0x11, 0x12, 0xa9, 0xed, 0x31, 0x5d, 0x88, 0x7e, 0x38, 0x44, 0xe4, 0x1b, 0x04, 0x30,
|
||||||
0x7c, 0x38, 0x2f, 0xf8, 0x03, 0xb6, 0x1e, 0x67, 0x32, 0xb0, 0xf1, 0xa9, 0xf2, 0xd1, 0x93, 0x01,
|
0x7d, 0x52, 0x1f, 0x69, 0x69, 0xc5, 0x1c, 0x43, 0x1c, 0xba, 0xf4, 0x11, 0xfd, 0xa1, 0x01, 0xd9,
|
||||||
0xe6, 0xd7, 0x2a, 0x70, 0x0f, 0xbc, 0xd9, 0x62, 0x83, 0x26, 0x65, 0x8d, 0xda, 0x6c, 0x10, 0x9a,
|
0x35, 0x58, 0xc6, 0x1c, 0x23, 0x9b, 0x55, 0x01, 0xb8, 0x1b, 0x80, 0xf2, 0x8b, 0xac, 0x62, 0xb7,
|
||||||
0x55, 0xd0, 0xc5, 0xf5, 0x7f, 0x57, 0x79, 0x06, 0x6e, 0xd6, 0x55, 0x90, 0x72, 0xad, 0x59, 0x05,
|
0x60, 0x55, 0x96, 0x3c, 0xb6, 0xf2, 0x50, 0x44, 0x54, 0x93, 0x11, 0xe9, 0x57, 0x1a, 0x70, 0x07,
|
||||||
0x09, 0xdb, 0x6c, 0x50, 0x66, 0xea, 0x34, 0x0e, 0xac, 0x9c, 0x25, 0x4a, 0x5c, 0x27, 0xb7, 0x1b,
|
0x6b, 0xb3, 0x01, 0xa3, 0x2e, 0x65, 0xc5, 0x85, 0xd9, 0x21, 0x74, 0xbd, 0x50, 0x15, 0x57, 0xff,
|
||||||
0x10, 0x7f, 0x8f, 0x6d, 0x80, 0xc3, 0xbe, 0x56, 0x41, 0x22, 0xe3, 0x14, 0x69, 0x1b, 0x48, 0xbb,
|
0xed, 0xe5, 0x11, 0x56, 0xb3, 0xf5, 0x42, 0x94, 0x0b, 0x5d, 0x2f, 0x44, 0xd8, 0x84, 0x51, 0x5d,
|
||||||
0x0e, 0xb8, 0x57, 0xc3, 0xfc, 0x03, 0xc6, 0x91, 0x5a, 0x66, 0x4d, 0xf2, 0x0d, 0x24, 0xdf, 0x80,
|
0x8a, 0x43, 0x19, 0x5b, 0x3e, 0xcf, 0x45, 0x70, 0xd1, 0x55, 0xbb, 0x03, 0xb1, 0xf7, 0x61, 0x0d,
|
||||||
0xcc, 0xb4, 0x99, 0x80, 0x3b, 0x52, 0x44, 0x47, 0xb2, 0x4c, 0xac, 0xe0, 0xe4, 0x90, 0x0b, 0xf9,
|
0x2b, 0x1c, 0x69, 0x11, 0xe7, 0x5c, 0x16, 0x44, 0x5b, 0x23, 0xda, 0x45, 0xc4, 0xc3, 0x16, 0x66,
|
||||||
0x90, 0xb1, 0x22, 0x4a, 0xe5, 0x31, 0x25, 0x6f, 0x52, 0xd7, 0x35, 0x02, 0x1f, 0x3a, 0xcb, 0xf5,
|
0x1f, 0x02, 0x23, 0x6a, 0x5d, 0x76, 0xc9, 0x97, 0x88, 0x7c, 0x09, 0x35, 0x7b, 0x5d, 0x05, 0xbe,
|
||||||
0x49, 0x9c, 0x45, 0x46, 0x59, 0x5f, 0x2b, 0xe2, 0x6d, 0xd2, 0x87, 0xea, 0x8c, 0x47, 0x09, 0x3e,
|
0x91, 0x2a, 0xdd, 0xe7, 0x75, 0x6e, 0x03, 0xe6, 0x2a, 0xe4, 0x45, 0x36, 0x06, 0xa8, 0xd2, 0x82,
|
||||||
0x61, 0x37, 0x1b, 0x74, 0xdc, 0xbd, 0xb4, 0x4a, 0xdc, 0x42, 0x7e, 0xa3, 0xd2, 0x9e, 0xcb, 0xf0,
|
0xbf, 0x74, 0xca, 0xcb, 0x2e, 0xea, 0x16, 0xc1, 0x0f, 0x1d, 0x29, 0x7d, 0x20, 0xcb, 0xd4, 0x08,
|
||||||
0x4f, 0xd8, 0xed, 0x86, 0x20, 0xcb, 0x43, 0xe5, 0xfa, 0x16, 0xb7, 0x51, 0x73, 0xab, 0xce, 0x7e,
|
0x1b, 0x69, 0xe1, 0x78, 0xeb, 0xee, 0x43, 0xad, 0x26, 0x74, 0x0a, 0x36, 0x83, 0xcb, 0x1d, 0x3a,
|
||||||
0x5f, 0x27, 0xf9, 0x5d, 0xd6, 0x2b, 0x22, 0xad, 0x8e, 0xe2, 0x24, 0x11, 0x6f, 0xd1, 0xc5, 0xac,
|
0x65, 0xcf, 0xad, 0x08, 0xae, 0x10, 0xbf, 0xe3, 0x69, 0xc7, 0x6b, 0xd8, 0xa7, 0x70, 0xb5, 0x63,
|
||||||
0x62, 0x7e, 0x9b, 0x75, 0x8a, 0xc8, 0x04, 0x32, 0x13, 0x02, 0x33, 0x2e, 0x22, 0x13, 0x8c, 0x55,
|
0x50, 0xaa, 0x44, 0xf8, 0xb8, 0x83, 0xab, 0x64, 0x73, 0xa5, 0xd5, 0x7e, 0xdf, 0x2a, 0xd9, 0x75,
|
||||||
0x32, 0x11, 0x77, 0x2a, 0x13, 0x30, 0x24, 0x13, 0x96, 0xcd, 0xde, 0xad, 0x4c, 0xa8, 0x10, 0x3e,
|
0x58, 0xae, 0x52, 0x2d, 0xf6, 0x65, 0x9e, 0x07, 0xff, 0x73, 0x0f, 0xb3, 0x91, 0xd9, 0x55, 0x58,
|
||||||
0x62, 0x6b, 0x45, 0x14, 0xaa, 0x25, 0xe3, 0x1e, 0x9d, 0x7f, 0x13, 0xa3, 0x1a, 0x89, 0x7c, 0xb5,
|
0xaa, 0x52, 0x13, 0xf3, 0x32, 0x08, 0x48, 0xe3, 0x25, 0x57, 0x04, 0x63, 0x05, 0xcf, 0x83, 0x6b,
|
||||||
0x38, 0xd2, 0x4a, 0x89, 0xb7, 0xab, 0x1a, 0x15, 0x02, 0xc7, 0x5f, 0x47, 0xa1, 0xb8, 0x4f, 0xc7,
|
0x4d, 0x11, 0x48, 0x74, 0x45, 0x38, 0x0d, 0xf6, 0x7a, 0x53, 0x84, 0x06, 0x61, 0x13, 0x58, 0xa9,
|
||||||
0xdf, 0x80, 0xf8, 0xbb, 0xec, 0xba, 0x9d, 0x17, 0x3e, 0x1a, 0xe9, 0xcb, 0x24, 0xc9, 0x03, 0x31,
|
0xd2, 0x44, 0x9c, 0x32, 0x6e, 0xb8, 0xfe, 0x77, 0x31, 0xe7, 0x23, 0xe7, 0xaf, 0x8e, 0xf7, 0xb5,
|
||||||
0xac, 0xae, 0x7b, 0xf1, 0x0c, 0xd0, 0x3d, 0x00, 0xf9, 0xfb, 0x8c, 0x03, 0x2f, 0xc8, 0x93, 0x44,
|
0x10, 0xc1, 0xff, 0x1b, 0x1f, 0x0d, 0x82, 0xed, 0x6f, 0xa5, 0x24, 0xb8, 0xe9, 0xda, 0xdf, 0x81,
|
||||||
0x16, 0x46, 0x39, 0xea, 0x16, 0x52, 0x37, 0xec, 0xbc, 0x78, 0xea, 0x12, 0xc4, 0xde, 0x64, 0x57,
|
0xd8, 0x1d, 0xb8, 0x68, 0xb3, 0x2a, 0xa2, 0x42, 0x46, 0x3c, 0xcf, 0x55, 0x1c, 0x8c, 0x9b, 0xe7,
|
||||||
0x71, 0xa0, 0x89, 0x6d, 0xba, 0x9a, 0x18, 0xc0, 0xaf, 0x95, 0x06, 0x1f, 0x0d, 0xc8, 0x1d, 0x6a,
|
0x5e, 0x3d, 0x42, 0x74, 0x07, 0x41, 0xf6, 0x01, 0x30, 0xe4, 0xc5, 0x2a, 0xcf, 0x79, 0x65, 0x84,
|
||||||
0x17, 0xa1, 0xef, 0x00, 0x81, 0xab, 0x69, 0xce, 0x64, 0xe1, 0x93, 0x76, 0x44, 0x57, 0x13, 0x90,
|
0xa7, 0x6e, 0x10, 0x75, 0xcd, 0x66, 0xd5, 0x03, 0xaf, 0x70, 0xec, 0x75, 0x38, 0x4f, 0x03, 0x2d,
|
||||||
0x29, 0xea, 0xab, 0x34, 0xc9, 0x1f, 0xd4, 0x69, 0x54, 0x8f, 0x7e, 0x6e, 0xb1, 0x5e, 0xf5, 0x4a,
|
0xd8, 0x74, 0x4f, 0x93, 0x04, 0xbc, 0xad, 0x6e, 0xf0, 0xb9, 0x01, 0xb9, 0xe5, 0xc2, 0x25, 0xe8,
|
||||||
0xf0, 0x2f, 0x9b, 0x03, 0x1a, 0xa6, 0xfd, 0x83, 0xcb, 0x9f, 0x96, 0x6f, 0x32, 0xab, 0x17, 0xf5,
|
0x3b, 0x44, 0xf0, 0x69, 0x9a, 0x23, 0x5e, 0x45, 0xce, 0x76, 0xe2, 0x9e, 0x26, 0x22, 0x7b, 0x64,
|
||||||
0x14, 0xff, 0xbc, 0x9e, 0xe2, 0xff, 0x5b, 0xec, 0x46, 0xbd, 0x62, 0xfd, 0x25, 0x06, 0x3f, 0x8b,
|
0xdf, 0xa8, 0x9d, 0xf9, 0xad, 0x56, 0x4d, 0xd6, 0x13, 0x03, 0x2b, 0xdd, 0xe9, 0xcd, 0xd6, 0x60,
|
||||||
0x10, 0xee, 0x9a, 0xc2, 0xc1, 0xd8, 0xf7, 0x5c, 0x04, 0x56, 0xcc, 0x03, 0xe9, 0xcf, 0x65, 0x16,
|
0x31, 0x57, 0x47, 0x7e, 0x22, 0xe1, 0x11, 0xa7, 0x48, 0x26, 0xd3, 0xac, 0x19, 0x48, 0x78, 0x46,
|
||||||
0x26, 0xca, 0xe0, 0x84, 0x5c, 0xf7, 0xd8, 0x3c, 0x90, 0xcf, 0x09, 0xa9, 0x08, 0xf9, 0xec, 0x58,
|
0x56, 0xc1, 0x7f, 0xf4, 0x73, 0x08, 0x8f, 0x88, 0x28, 0x55, 0xf8, 0xf1, 0x83, 0x47, 0x7c, 0xec,
|
||||||
0x05, 0xd6, 0xe0, 0x98, 0x24, 0xc2, 0x0b, 0x42, 0x46, 0x7b, 0xac, 0x43, 0x8f, 0x1b, 0xff, 0xac,
|
0x4a, 0x15, 0xd1, 0x01, 0x36, 0xde, 0x4d, 0xa0, 0x81, 0x52, 0xc5, 0x13, 0x99, 0xe7, 0x93, 0x9f,
|
||||||
0x32, 0x9b, 0x36, 0xba, 0x73, 0xd9, 0x6b, 0xe8, 0x3a, 0x45, 0xfe, 0xe8, 0x97, 0x16, 0xeb, 0x3a,
|
0x7b, 0xb0, 0xdc, 0xec, 0x39, 0xf6, 0x55, 0x77, 0x2b, 0xe0, 0xbe, 0xba, 0xf5, 0xf6, 0xe5, 0xf8,
|
||||||
0x08, 0x4e, 0x2c, 0x95, 0xc7, 0xb9, 0x76, 0x03, 0x9c, 0x02, 0x44, 0xe3, 0x2c, 0xd7, 0xd5, 0x63,
|
0xb0, 0xb4, 0xfa, 0xb8, 0x5d, 0x1d, 0x5f, 0xb4, 0xab, 0xe3, 0x3f, 0x1b, 0xfb, 0xfd, 0x22, 0x60,
|
||||||
0x86, 0x01, 0x6c, 0x4a, 0xcf, 0x16, 0x56, 0x19, 0x37, 0xbd, 0x5d, 0x04, 0xf8, 0x19, 0xe1, 0x34,
|
0x78, 0x8a, 0xe1, 0x5d, 0x4c, 0xf0, 0x81, 0x0b, 0xca, 0x7d, 0x18, 0x7a, 0x09, 0xeb, 0x9f, 0xc5,
|
||||||
0xba, 0x5d, 0x04, 0xc3, 0x5b, 0xc7, 0xb9, 0xa9, 0x86, 0x37, 0xac, 0x01, 0x3b, 0x03, 0x8c, 0x66,
|
0x3c, 0xca, 0x78, 0x99, 0xe4, 0xc2, 0x50, 0x15, 0x56, 0x43, 0xc8, 0x62, 0xfe, 0xd8, 0x21, 0x0d,
|
||||||
0x37, 0xae, 0x47, 0x53, 0x36, 0x68, 0x3c, 0xbc, 0x97, 0xbc, 0xb1, 0x1b, 0x6c, 0x35, 0x95, 0x3f,
|
0x41, 0xcd, 0x5f, 0x8a, 0xd8, 0x1a, 0xaa, 0x89, 0x23, 0x3c, 0x73, 0xc8, 0x64, 0x07, 0x96, 0xdc,
|
||||||
0xba, 0xa6, 0x60, 0x89, 0x57, 0x53, 0x46, 0xca, 0xc4, 0xaf, 0x14, 0x36, 0xd5, 0xf7, 0x96, 0xf1,
|
0x7a, 0x66, 0x9f, 0x37, 0x1d, 0x76, 0x89, 0x6e, 0xbd, 0x6d, 0x9f, 0xfb, 0x48, 0x89, 0x3f, 0xf9,
|
||||||
0x13, 0xf1, 0xfa, 0xcd, 0xf0, 0xca, 0x1f, 0x6f, 0x86, 0x57, 0x7e, 0x3a, 0x1f, 0xb6, 0x5e, 0x9f,
|
0xa5, 0x07, 0x03, 0x0f, 0xe1, 0x35, 0x29, 0xf8, 0x4b, 0xa5, 0x7d, 0x8f, 0x9c, 0x40, 0xa8, 0x2c,
|
||||||
0x0f, 0x5b, 0xbf, 0x9f, 0x0f, 0x5b, 0x7f, 0x9d, 0x0f, 0x5b, 0xb3, 0x0e, 0xfe, 0x87, 0xfa, 0xe8,
|
0x95, 0x6e, 0x36, 0x28, 0x09, 0x98, 0x94, 0x9e, 0x1f, 0x5b, 0x61, 0x7c, 0xab, 0xbc, 0x84, 0xf8,
|
||||||
0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0xd2, 0xcd, 0xe2, 0xab, 0x09, 0x00, 0x00,
|
0x91, 0xc3, 0x5d, 0xc3, 0xbc, 0x84, 0xbd, 0xd6, 0x52, 0x99, 0x66, 0x63, 0xe0, 0x19, 0xb1, 0x23,
|
||||||
|
0xc4, 0xdc, 0xc2, 0xa0, 0xf3, 0x64, 0x0f, 0x46, 0x9d, 0x5f, 0x87, 0xb7, 0x2c, 0x76, 0x7f, 0x51,
|
||||||
|
0x16, 0xda, 0x8b, 0x82, 0xf3, 0x80, 0xa7, 0xc2, 0xc8, 0x57, 0x82, 0x82, 0x1a, 0x86, 0xa7, 0xf2,
|
||||||
|
0xfd, 0xe0, 0xf5, 0x9b, 0xf1, 0xb9, 0x3f, 0xdf, 0x8c, 0xcf, 0xfd, 0x74, 0x32, 0xee, 0xbd, 0x3e,
|
||||||
|
0x19, 0xf7, 0xfe, 0x38, 0x19, 0xf7, 0xfe, 0x3e, 0x19, 0xf7, 0xe6, 0x4b, 0xf4, 0x17, 0xf8, 0xf1,
|
||||||
|
0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4f, 0x2b, 0x30, 0xd6, 0x6d, 0x0a, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metrics) Marshal() (dAtA []byte, err error) {
|
func (m *Metrics) Marshal() (dAtA []byte, err error) {
|
||||||
@ -597,6 +647,16 @@ func (m *Metrics) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
i += n
|
i += n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if m.MemoryEvents != nil {
|
||||||
|
dAtA[i] = 0x42
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.MemoryEvents.Size()))
|
||||||
|
n6, err := m.MemoryEvents.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n6
|
||||||
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
@ -921,6 +981,52 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *MemoryEvents) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MemoryEvents) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Low != 0 {
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.Low))
|
||||||
|
}
|
||||||
|
if m.High != 0 {
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.High))
|
||||||
|
}
|
||||||
|
if m.Max != 0 {
|
||||||
|
dAtA[i] = 0x18
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.Max))
|
||||||
|
}
|
||||||
|
if m.Oom != 0 {
|
||||||
|
dAtA[i] = 0x20
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.Oom))
|
||||||
|
}
|
||||||
|
if m.OomKill != 0 {
|
||||||
|
dAtA[i] = 0x28
|
||||||
|
i++
|
||||||
|
i = encodeVarintMetrics(dAtA, i, uint64(m.OomKill))
|
||||||
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *RdmaStat) Marshal() (dAtA []byte, err error) {
|
func (m *RdmaStat) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
@ -1165,6 +1271,10 @@ func (m *Metrics) Size() (n int) {
|
|||||||
n += 1 + l + sovMetrics(uint64(l))
|
n += 1 + l + sovMetrics(uint64(l))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if m.MemoryEvents != nil {
|
||||||
|
l = m.MemoryEvents.Size()
|
||||||
|
n += 1 + l + sovMetrics(uint64(l))
|
||||||
|
}
|
||||||
if m.XXX_unrecognized != nil {
|
if m.XXX_unrecognized != nil {
|
||||||
n += len(m.XXX_unrecognized)
|
n += len(m.XXX_unrecognized)
|
||||||
}
|
}
|
||||||
@ -1336,6 +1446,33 @@ func (m *MemoryStat) Size() (n int) {
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *MemoryEvents) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Low != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.Low))
|
||||||
|
}
|
||||||
|
if m.High != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.High))
|
||||||
|
}
|
||||||
|
if m.Max != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.Max))
|
||||||
|
}
|
||||||
|
if m.Oom != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.Oom))
|
||||||
|
}
|
||||||
|
if m.OomKill != 0 {
|
||||||
|
n += 1 + sovMetrics(uint64(m.OomKill))
|
||||||
|
}
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
n += len(m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func (m *RdmaStat) Size() (n int) {
|
func (m *RdmaStat) Size() (n int) {
|
||||||
if m == nil {
|
if m == nil {
|
||||||
return 0
|
return 0
|
||||||
@ -1476,6 +1613,7 @@ func (this *Metrics) String() string {
|
|||||||
`Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`,
|
`Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`,
|
||||||
`Io:` + strings.Replace(fmt.Sprintf("%v", this.Io), "IOStat", "IOStat", 1) + `,`,
|
`Io:` + strings.Replace(fmt.Sprintf("%v", this.Io), "IOStat", "IOStat", 1) + `,`,
|
||||||
`Hugetlb:` + strings.Replace(fmt.Sprintf("%v", this.Hugetlb), "HugeTlbStat", "HugeTlbStat", 1) + `,`,
|
`Hugetlb:` + strings.Replace(fmt.Sprintf("%v", this.Hugetlb), "HugeTlbStat", "HugeTlbStat", 1) + `,`,
|
||||||
|
`MemoryEvents:` + strings.Replace(fmt.Sprintf("%v", this.MemoryEvents), "MemoryEvents", "MemoryEvents", 1) + `,`,
|
||||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
@ -1554,6 +1692,21 @@ func (this *MemoryStat) String() string {
|
|||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
func (this *MemoryEvents) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&MemoryEvents{`,
|
||||||
|
`Low:` + fmt.Sprintf("%v", this.Low) + `,`,
|
||||||
|
`High:` + fmt.Sprintf("%v", this.High) + `,`,
|
||||||
|
`Max:` + fmt.Sprintf("%v", this.Max) + `,`,
|
||||||
|
`Oom:` + fmt.Sprintf("%v", this.Oom) + `,`,
|
||||||
|
`OomKill:` + fmt.Sprintf("%v", this.OomKill) + `,`,
|
||||||
|
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
func (this *RdmaStat) String() string {
|
func (this *RdmaStat) String() string {
|
||||||
if this == nil {
|
if this == nil {
|
||||||
return "nil"
|
return "nil"
|
||||||
@ -1870,6 +2023,42 @@ func (m *Metrics) Unmarshal(dAtA []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 8:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field MemoryEvents", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthMetrics
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthMetrics
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if m.MemoryEvents == nil {
|
||||||
|
m.MemoryEvents = &MemoryEvents{}
|
||||||
|
}
|
||||||
|
if err := m.MemoryEvents.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipMetrics(dAtA[iNdEx:])
|
skippy, err := skipMetrics(dAtA[iNdEx:])
|
||||||
@ -2874,6 +3063,155 @@ func (m *MemoryStat) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (m *MemoryEvents) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: MemoryEvents: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: MemoryEvents: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Low", wireType)
|
||||||
|
}
|
||||||
|
m.Low = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Low |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field High", wireType)
|
||||||
|
}
|
||||||
|
m.High = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.High |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
|
||||||
|
}
|
||||||
|
m.Max = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Max |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 4:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Oom", wireType)
|
||||||
|
}
|
||||||
|
m.Oom = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Oom |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 5:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field OomKill", wireType)
|
||||||
|
}
|
||||||
|
m.OomKill = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowMetrics
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.OomKill |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipMetrics(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthMetrics
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) < 0 {
|
||||||
|
return ErrInvalidLengthMetrics
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func (m *RdmaStat) Unmarshal(dAtA []byte) error {
|
func (m *RdmaStat) Unmarshal(dAtA []byte) error {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
|
9
vendor/github.com/containerd/cgroups/v2/stats/metrics.proto
generated
vendored
9
vendor/github.com/containerd/cgroups/v2/stats/metrics.proto
generated
vendored
@ -11,6 +11,7 @@ message Metrics {
|
|||||||
RdmaStat rdma = 5;
|
RdmaStat rdma = 5;
|
||||||
IOStat io = 6;
|
IOStat io = 6;
|
||||||
repeated HugeTlbStat hugetlb = 7;
|
repeated HugeTlbStat hugetlb = 7;
|
||||||
|
MemoryEvents memory_events = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
message PidsStat {
|
message PidsStat {
|
||||||
@ -65,6 +66,14 @@ message MemoryStat {
|
|||||||
uint64 swap_limit = 35;
|
uint64 swap_limit = 35;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message MemoryEvents {
|
||||||
|
uint64 low = 1;
|
||||||
|
uint64 high = 2;
|
||||||
|
uint64 max = 3;
|
||||||
|
uint64 oom = 4;
|
||||||
|
uint64 oom_kill = 5;
|
||||||
|
}
|
||||||
|
|
||||||
message RdmaStat {
|
message RdmaStat {
|
||||||
repeated RdmaEntry current = 1;
|
repeated RdmaEntry current = 1;
|
||||||
repeated RdmaEntry limit = 2;
|
repeated RdmaEntry limit = 2;
|
||||||
|
21
vendor/github.com/cpuguy83/go-md2man/README.md
generated
vendored
21
vendor/github.com/cpuguy83/go-md2man/README.md
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
go-md2man
|
|
||||||
=========
|
|
||||||
|
|
||||||
** Work in Progress **
|
|
||||||
This still needs a lot of help to be complete, or even usable!
|
|
||||||
|
|
||||||
Uses blackfriday to process markdown into man pages.
|
|
||||||
|
|
||||||
### Usage
|
|
||||||
|
|
||||||
./md2man -in /path/to/markdownfile.md -out /manfile/output/path
|
|
||||||
|
|
||||||
### How to contribute
|
|
||||||
|
|
||||||
We use [dep](https://github.com/golang/dep/) for vendoring Go packages.
|
|
||||||
See dep documentation for how to update.
|
|
||||||
|
|
||||||
### TODO
|
|
||||||
|
|
||||||
- Needs oh so much testing love
|
|
||||||
- Look into blackfriday's 2.0 API
|
|
5
vendor/github.com/cpuguy83/go-md2man/go.mod
generated
vendored
5
vendor/github.com/cpuguy83/go-md2man/go.mod
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
module github.com/cpuguy83/go-md2man
|
|
||||||
|
|
||||||
go 1.12
|
|
||||||
|
|
||||||
require github.com/russross/blackfriday v1.5.2
|
|
20
vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go
generated
vendored
20
vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
package md2man
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/russross/blackfriday"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Render converts a markdown document into a roff formatted document.
|
|
||||||
func Render(doc []byte) []byte {
|
|
||||||
renderer := RoffRenderer(0)
|
|
||||||
extensions := 0
|
|
||||||
extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
|
||||||
extensions |= blackfriday.EXTENSION_TABLES
|
|
||||||
extensions |= blackfriday.EXTENSION_FENCED_CODE
|
|
||||||
extensions |= blackfriday.EXTENSION_AUTOLINK
|
|
||||||
extensions |= blackfriday.EXTENSION_SPACE_HEADERS
|
|
||||||
extensions |= blackfriday.EXTENSION_FOOTNOTES
|
|
||||||
extensions |= blackfriday.EXTENSION_TITLEBLOCK
|
|
||||||
|
|
||||||
return blackfriday.Markdown(doc, renderer, extensions)
|
|
||||||
}
|
|
285
vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
generated
vendored
285
vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
generated
vendored
@ -1,285 +0,0 @@
|
|||||||
package md2man
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"html"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/russross/blackfriday"
|
|
||||||
)
|
|
||||||
|
|
||||||
type roffRenderer struct {
|
|
||||||
ListCounters []int
|
|
||||||
}
|
|
||||||
|
|
||||||
// RoffRenderer creates a new blackfriday Renderer for generating roff documents
|
|
||||||
// from markdown
|
|
||||||
func RoffRenderer(flags int) blackfriday.Renderer {
|
|
||||||
return &roffRenderer{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) GetFlags() int {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString(".TH ")
|
|
||||||
|
|
||||||
splitText := bytes.Split(text, []byte("\n"))
|
|
||||||
for i, line := range splitText {
|
|
||||||
line = bytes.TrimPrefix(line, []byte("% "))
|
|
||||||
if i == 0 {
|
|
||||||
line = bytes.Replace(line, []byte("("), []byte("\" \""), 1)
|
|
||||||
line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1)
|
|
||||||
}
|
|
||||||
line = append([]byte("\""), line...)
|
|
||||||
line = append(line, []byte("\" ")...)
|
|
||||||
out.Write(line)
|
|
||||||
}
|
|
||||||
out.WriteString("\n")
|
|
||||||
|
|
||||||
// disable hyphenation
|
|
||||||
out.WriteString(".nh\n")
|
|
||||||
// disable justification (adjust text to left margin only)
|
|
||||||
out.WriteString(".ad l\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
|
|
||||||
out.WriteString("\n.PP\n.RS\n\n.nf\n")
|
|
||||||
escapeSpecialChars(out, text)
|
|
||||||
out.WriteString("\n.fi\n.RE\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("\n.PP\n.RS\n")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("\n.RE\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { // nolint: golint
|
|
||||||
out.Write(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) {
|
|
||||||
marker := out.Len()
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case marker == 0:
|
|
||||||
// This is the doc header
|
|
||||||
out.WriteString(".TH ")
|
|
||||||
case level == 1:
|
|
||||||
out.WriteString("\n\n.SH ")
|
|
||||||
case level == 2:
|
|
||||||
out.WriteString("\n.SH ")
|
|
||||||
default:
|
|
||||||
out.WriteString("\n.SS ")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !text() {
|
|
||||||
out.Truncate(marker)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) HRule(out *bytes.Buffer) {
|
|
||||||
out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) {
|
|
||||||
marker := out.Len()
|
|
||||||
r.ListCounters = append(r.ListCounters, 1)
|
|
||||||
out.WriteString("\n.RS\n")
|
|
||||||
if !text() {
|
|
||||||
out.Truncate(marker)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
r.ListCounters = r.ListCounters[:len(r.ListCounters)-1]
|
|
||||||
out.WriteString("\n.RE\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
|
|
||||||
if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
|
|
||||||
out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", r.ListCounters[len(r.ListCounters)-1]))
|
|
||||||
r.ListCounters[len(r.ListCounters)-1]++
|
|
||||||
} else {
|
|
||||||
out.WriteString(".IP \\(bu 2\n")
|
|
||||||
}
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
|
|
||||||
marker := out.Len()
|
|
||||||
out.WriteString("\n.PP\n")
|
|
||||||
if !text() {
|
|
||||||
out.Truncate(marker)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if marker != 0 {
|
|
||||||
out.WriteString("\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
|
|
||||||
out.WriteString("\n.TS\nallbox;\n")
|
|
||||||
|
|
||||||
maxDelims := 0
|
|
||||||
lines := strings.Split(strings.TrimRight(string(header), "\n")+"\n"+strings.TrimRight(string(body), "\n"), "\n")
|
|
||||||
for _, w := range lines {
|
|
||||||
curDelims := strings.Count(w, "\t")
|
|
||||||
if curDelims > maxDelims {
|
|
||||||
maxDelims = curDelims
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out.Write([]byte(strings.Repeat("l ", maxDelims+1) + "\n"))
|
|
||||||
out.Write([]byte(strings.Repeat("l ", maxDelims+1) + ".\n"))
|
|
||||||
out.Write(header)
|
|
||||||
if len(header) > 0 {
|
|
||||||
out.Write([]byte("\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
out.Write(body)
|
|
||||||
out.WriteString("\n.TE\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) {
|
|
||||||
if out.Len() > 0 {
|
|
||||||
out.WriteString("\n")
|
|
||||||
}
|
|
||||||
out.Write(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
|
|
||||||
if out.Len() > 0 {
|
|
||||||
out.WriteString("\t")
|
|
||||||
}
|
|
||||||
if len(text) == 0 {
|
|
||||||
text = []byte{' '}
|
|
||||||
}
|
|
||||||
out.Write([]byte("\\fB\\fC" + string(text) + "\\fR"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) {
|
|
||||||
if out.Len() > 0 {
|
|
||||||
out.WriteString("\t")
|
|
||||||
}
|
|
||||||
if len(text) > 30 {
|
|
||||||
text = append([]byte("T{\n"), text...)
|
|
||||||
text = append(text, []byte("\nT}")...)
|
|
||||||
}
|
|
||||||
if len(text) == 0 {
|
|
||||||
text = []byte{' '}
|
|
||||||
}
|
|
||||||
out.Write(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) {
|
|
||||||
out.WriteString("\n\\[la]")
|
|
||||||
out.Write(link)
|
|
||||||
out.WriteString("\\[ra]")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("\\fB\\fC")
|
|
||||||
escapeSpecialChars(out, text)
|
|
||||||
out.WriteString("\\fR")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("\\fB")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("\\fP")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("\\fI")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("\\fP")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) LineBreak(out *bytes.Buffer) {
|
|
||||||
out.WriteString("\n.br\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
|
|
||||||
out.Write(content)
|
|
||||||
r.AutoLink(out, link, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { // nolint: golint
|
|
||||||
out.Write(tag)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("\\s+2")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("\\s-2")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) {
|
|
||||||
out.WriteString(html.UnescapeString(string(entity)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) {
|
|
||||||
escapeSpecialChars(out, text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func needsBackslash(c byte) bool {
|
|
||||||
for _, r := range []byte("-_&\\~") {
|
|
||||||
if c == r {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func escapeSpecialChars(out *bytes.Buffer, text []byte) {
|
|
||||||
for i := 0; i < len(text); i++ {
|
|
||||||
// escape initial apostrophe or period
|
|
||||||
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
|
|
||||||
out.WriteString("\\&")
|
|
||||||
}
|
|
||||||
|
|
||||||
// directly copy normal characters
|
|
||||||
org := i
|
|
||||||
|
|
||||||
for i < len(text) && !needsBackslash(text[i]) {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i > org {
|
|
||||||
out.Write(text[org:i])
|
|
||||||
}
|
|
||||||
|
|
||||||
// escape a character
|
|
||||||
if i >= len(text) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
out.WriteByte('\\')
|
|
||||||
out.WriteByte(text[i])
|
|
||||||
}
|
|
||||||
}
|
|
15
vendor/github.com/cpuguy83/go-md2man/v2/README.md
generated
vendored
Normal file
15
vendor/github.com/cpuguy83/go-md2man/v2/README.md
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
go-md2man
|
||||||
|
=========
|
||||||
|
|
||||||
|
Converts markdown into roff (man pages).
|
||||||
|
|
||||||
|
Uses blackfriday to process markdown into man pages.
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
./md2man -in /path/to/markdownfile.md -out /manfile/output/path
|
||||||
|
|
||||||
|
### How to contribute
|
||||||
|
|
||||||
|
We use go modules to manage dependencies.
|
||||||
|
As such you must be using at lest go1.11.
|
9
vendor/github.com/cpuguy83/go-md2man/v2/go.mod
generated
vendored
Normal file
9
vendor/github.com/cpuguy83/go-md2man/v2/go.mod
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
module github.com/cpuguy83/go-md2man/v2
|
||||||
|
|
||||||
|
go 1.12
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/russross/blackfriday/v2 v2.0.1
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
||||||
|
)
|
14
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
Normal file
14
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
package md2man
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/russross/blackfriday/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Render converts a markdown document into a roff formatted document.
|
||||||
|
func Render(doc []byte) []byte {
|
||||||
|
renderer := NewRoffRenderer()
|
||||||
|
|
||||||
|
return blackfriday.Run(doc,
|
||||||
|
[]blackfriday.Option{blackfriday.WithRenderer(renderer),
|
||||||
|
blackfriday.WithExtensions(renderer.GetExtensions())}...)
|
||||||
|
}
|
345
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
Normal file
345
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
Normal file
@ -0,0 +1,345 @@
|
|||||||
|
package md2man
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/russross/blackfriday/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// roffRenderer implements the blackfriday.Renderer interface for creating
|
||||||
|
// roff format (manpages) from markdown text
|
||||||
|
type roffRenderer struct {
|
||||||
|
extensions blackfriday.Extensions
|
||||||
|
listCounters []int
|
||||||
|
firstHeader bool
|
||||||
|
defineTerm bool
|
||||||
|
listDepth int
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
titleHeader = ".TH "
|
||||||
|
topLevelHeader = "\n\n.SH "
|
||||||
|
secondLevelHdr = "\n.SH "
|
||||||
|
otherHeader = "\n.SS "
|
||||||
|
crTag = "\n"
|
||||||
|
emphTag = "\\fI"
|
||||||
|
emphCloseTag = "\\fP"
|
||||||
|
strongTag = "\\fB"
|
||||||
|
strongCloseTag = "\\fP"
|
||||||
|
breakTag = "\n.br\n"
|
||||||
|
paraTag = "\n.PP\n"
|
||||||
|
hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
|
||||||
|
linkTag = "\n\\[la]"
|
||||||
|
linkCloseTag = "\\[ra]"
|
||||||
|
codespanTag = "\\fB\\fC"
|
||||||
|
codespanCloseTag = "\\fR"
|
||||||
|
codeTag = "\n.PP\n.RS\n\n.nf\n"
|
||||||
|
codeCloseTag = "\n.fi\n.RE\n"
|
||||||
|
quoteTag = "\n.PP\n.RS\n"
|
||||||
|
quoteCloseTag = "\n.RE\n"
|
||||||
|
listTag = "\n.RS\n"
|
||||||
|
listCloseTag = "\n.RE\n"
|
||||||
|
arglistTag = "\n.TP\n"
|
||||||
|
tableStart = "\n.TS\nallbox;\n"
|
||||||
|
tableEnd = ".TE\n"
|
||||||
|
tableCellStart = "T{\n"
|
||||||
|
tableCellEnd = "\nT}\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
|
||||||
|
// from markdown
|
||||||
|
func NewRoffRenderer() *roffRenderer { // nolint: golint
|
||||||
|
var extensions blackfriday.Extensions
|
||||||
|
|
||||||
|
extensions |= blackfriday.NoIntraEmphasis
|
||||||
|
extensions |= blackfriday.Tables
|
||||||
|
extensions |= blackfriday.FencedCode
|
||||||
|
extensions |= blackfriday.SpaceHeadings
|
||||||
|
extensions |= blackfriday.Footnotes
|
||||||
|
extensions |= blackfriday.Titleblock
|
||||||
|
extensions |= blackfriday.DefinitionLists
|
||||||
|
return &roffRenderer{
|
||||||
|
extensions: extensions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetExtensions returns the list of extensions used by this renderer implementation
|
||||||
|
func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
|
||||||
|
return r.extensions
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderHeader handles outputting the header at document start
|
||||||
|
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
|
||||||
|
// disable hyphenation
|
||||||
|
out(w, ".nh\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderFooter handles outputting the footer at the document end; the roff
|
||||||
|
// renderer has no footer information
|
||||||
|
func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderNode is called for each node in a markdown document; based on the node
|
||||||
|
// type the equivalent roff output is sent to the writer
|
||||||
|
func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||||
|
|
||||||
|
var walkAction = blackfriday.GoToNext
|
||||||
|
|
||||||
|
switch node.Type {
|
||||||
|
case blackfriday.Text:
|
||||||
|
r.handleText(w, node, entering)
|
||||||
|
case blackfriday.Softbreak:
|
||||||
|
out(w, crTag)
|
||||||
|
case blackfriday.Hardbreak:
|
||||||
|
out(w, breakTag)
|
||||||
|
case blackfriday.Emph:
|
||||||
|
if entering {
|
||||||
|
out(w, emphTag)
|
||||||
|
} else {
|
||||||
|
out(w, emphCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Strong:
|
||||||
|
if entering {
|
||||||
|
out(w, strongTag)
|
||||||
|
} else {
|
||||||
|
out(w, strongCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Link:
|
||||||
|
if !entering {
|
||||||
|
out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Image:
|
||||||
|
// ignore images
|
||||||
|
walkAction = blackfriday.SkipChildren
|
||||||
|
case blackfriday.Code:
|
||||||
|
out(w, codespanTag)
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
out(w, codespanCloseTag)
|
||||||
|
case blackfriday.Document:
|
||||||
|
break
|
||||||
|
case blackfriday.Paragraph:
|
||||||
|
// roff .PP markers break lists
|
||||||
|
if r.listDepth > 0 {
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
out(w, paraTag)
|
||||||
|
} else {
|
||||||
|
out(w, crTag)
|
||||||
|
}
|
||||||
|
case blackfriday.BlockQuote:
|
||||||
|
if entering {
|
||||||
|
out(w, quoteTag)
|
||||||
|
} else {
|
||||||
|
out(w, quoteCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Heading:
|
||||||
|
r.handleHeading(w, node, entering)
|
||||||
|
case blackfriday.HorizontalRule:
|
||||||
|
out(w, hruleTag)
|
||||||
|
case blackfriday.List:
|
||||||
|
r.handleList(w, node, entering)
|
||||||
|
case blackfriday.Item:
|
||||||
|
r.handleItem(w, node, entering)
|
||||||
|
case blackfriday.CodeBlock:
|
||||||
|
out(w, codeTag)
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
out(w, codeCloseTag)
|
||||||
|
case blackfriday.Table:
|
||||||
|
r.handleTable(w, node, entering)
|
||||||
|
case blackfriday.TableCell:
|
||||||
|
r.handleTableCell(w, node, entering)
|
||||||
|
case blackfriday.TableHead:
|
||||||
|
case blackfriday.TableBody:
|
||||||
|
case blackfriday.TableRow:
|
||||||
|
// no action as cell entries do all the nroff formatting
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
default:
|
||||||
|
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||||
|
}
|
||||||
|
return walkAction
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
var (
|
||||||
|
start, end string
|
||||||
|
)
|
||||||
|
// handle special roff table cell text encapsulation
|
||||||
|
if node.Parent.Type == blackfriday.TableCell {
|
||||||
|
if len(node.Literal) > 30 {
|
||||||
|
start = tableCellStart
|
||||||
|
end = tableCellEnd
|
||||||
|
} else {
|
||||||
|
// end rows that aren't terminated by "tableCellEnd" with a cr if end of row
|
||||||
|
if node.Parent.Next == nil && !node.Parent.IsHeader {
|
||||||
|
end = crTag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out(w, start)
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
out(w, end)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
switch node.Level {
|
||||||
|
case 1:
|
||||||
|
if !r.firstHeader {
|
||||||
|
out(w, titleHeader)
|
||||||
|
r.firstHeader = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
out(w, topLevelHeader)
|
||||||
|
case 2:
|
||||||
|
out(w, secondLevelHdr)
|
||||||
|
default:
|
||||||
|
out(w, otherHeader)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
openTag := listTag
|
||||||
|
closeTag := listCloseTag
|
||||||
|
if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||||
|
// tags for definition lists handled within Item node
|
||||||
|
openTag = ""
|
||||||
|
closeTag = ""
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
r.listDepth++
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
r.listCounters = append(r.listCounters, 1)
|
||||||
|
}
|
||||||
|
out(w, openTag)
|
||||||
|
} else {
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
r.listCounters = r.listCounters[:len(r.listCounters)-1]
|
||||||
|
}
|
||||||
|
out(w, closeTag)
|
||||||
|
r.listDepth--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
|
||||||
|
r.listCounters[len(r.listCounters)-1]++
|
||||||
|
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||||
|
// state machine for handling terms and following definitions
|
||||||
|
// since blackfriday does not distinguish them properly, nor
|
||||||
|
// does it seperate them into separate lists as it should
|
||||||
|
if !r.defineTerm {
|
||||||
|
out(w, arglistTag)
|
||||||
|
r.defineTerm = true
|
||||||
|
} else {
|
||||||
|
r.defineTerm = false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out(w, ".IP \\(bu 2\n")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out(w, "\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
out(w, tableStart)
|
||||||
|
//call walker to count cells (and rows?) so format section can be produced
|
||||||
|
columns := countColumns(node)
|
||||||
|
out(w, strings.Repeat("l ", columns)+"\n")
|
||||||
|
out(w, strings.Repeat("l ", columns)+".\n")
|
||||||
|
} else {
|
||||||
|
out(w, tableEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
var (
|
||||||
|
start, end string
|
||||||
|
)
|
||||||
|
if node.IsHeader {
|
||||||
|
start = codespanTag
|
||||||
|
end = codespanCloseTag
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
|
||||||
|
out(w, "\t"+start)
|
||||||
|
} else {
|
||||||
|
out(w, start)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// need to carriage return if we are at the end of the header row
|
||||||
|
if node.IsHeader && node.Next == nil {
|
||||||
|
end = end + crTag
|
||||||
|
}
|
||||||
|
out(w, end)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// because roff format requires knowing the column count before outputting any table
|
||||||
|
// data we need to walk a table tree and count the columns
|
||||||
|
func countColumns(node *blackfriday.Node) int {
|
||||||
|
var columns int
|
||||||
|
|
||||||
|
node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||||
|
switch node.Type {
|
||||||
|
case blackfriday.TableRow:
|
||||||
|
if !entering {
|
||||||
|
return blackfriday.Terminate
|
||||||
|
}
|
||||||
|
case blackfriday.TableCell:
|
||||||
|
if entering {
|
||||||
|
columns++
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
})
|
||||||
|
return columns
|
||||||
|
}
|
||||||
|
|
||||||
|
func out(w io.Writer, output string) {
|
||||||
|
io.WriteString(w, output) // nolint: errcheck
|
||||||
|
}
|
||||||
|
|
||||||
|
func needsBackslash(c byte) bool {
|
||||||
|
for _, r := range []byte("-_&\\~") {
|
||||||
|
if c == r {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeSpecialChars(w io.Writer, text []byte) {
|
||||||
|
for i := 0; i < len(text); i++ {
|
||||||
|
// escape initial apostrophe or period
|
||||||
|
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
|
||||||
|
out(w, "\\&")
|
||||||
|
}
|
||||||
|
|
||||||
|
// directly copy normal characters
|
||||||
|
org := i
|
||||||
|
|
||||||
|
for i < len(text) && !needsBackslash(text[i]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i > org {
|
||||||
|
w.Write(text[org:i]) // nolint: errcheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// escape a character
|
||||||
|
if i >= len(text) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
|
||||||
|
}
|
||||||
|
}
|
2
vendor/github.com/golang/protobuf/go.mod
generated
vendored
2
vendor/github.com/golang/protobuf/go.mod
generated
vendored
@ -1,3 +1,3 @@
|
|||||||
module github.com/golang/protobuf
|
module github.com/golang/protobuf
|
||||||
|
|
||||||
go 1.12
|
go 1.9
|
||||||
|
7
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
7
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
@ -102,7 +102,8 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|||||||
//
|
//
|
||||||
type Any struct {
|
type Any struct {
|
||||||
// A URL/resource name that uniquely identifies the type of the serialized
|
// A URL/resource name that uniquely identifies the type of the serialized
|
||||||
// protocol buffer message. The last segment of the URL's path must represent
|
// protocol buffer message. This string must contain at least
|
||||||
|
// one "/" character. The last segment of the URL's path must represent
|
||||||
// the fully qualified name of the type (as in
|
// the fully qualified name of the type (as in
|
||||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||||
// (e.g., leading "." is not accepted).
|
// (e.g., leading "." is not accepted).
|
||||||
@ -181,7 +182,9 @@ func init() {
|
|||||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
|
func init() {
|
||||||
|
proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
||||||
// 185 bytes of a gzipped FileDescriptorProto
|
// 185 bytes of a gzipped FileDescriptorProto
|
||||||
|
3
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
3
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
@ -121,7 +121,8 @@ option objc_class_prefix = "GPB";
|
|||||||
//
|
//
|
||||||
message Any {
|
message Any {
|
||||||
// A URL/resource name that uniquely identifies the type of the serialized
|
// A URL/resource name that uniquely identifies the type of the serialized
|
||||||
// protocol buffer message. The last segment of the URL's path must represent
|
// protocol buffer message. This string must contain at least
|
||||||
|
// one "/" character. The last segment of the URL's path must represent
|
||||||
// the fully qualified name of the type (as in
|
// the fully qualified name of the type (as in
|
||||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||||
// (e.g., leading "." is not accepted).
|
// (e.g., leading "." is not accepted).
|
||||||
|
6
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
6
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
@ -41,7 +41,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|||||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||||
// duration.seconds += 1;
|
// duration.seconds += 1;
|
||||||
// duration.nanos -= 1000000000;
|
// duration.nanos -= 1000000000;
|
||||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
||||||
// duration.seconds -= 1;
|
// duration.seconds -= 1;
|
||||||
// duration.nanos += 1000000000;
|
// duration.nanos += 1000000000;
|
||||||
// }
|
// }
|
||||||
@ -142,7 +142,9 @@ func init() {
|
|||||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
|
func init() {
|
||||||
|
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
||||||
// 190 bytes of a gzipped FileDescriptorProto
|
// 190 bytes of a gzipped FileDescriptorProto
|
||||||
|
3
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
3
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
@ -61,7 +61,7 @@ option objc_class_prefix = "GPB";
|
|||||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||||
// duration.seconds += 1;
|
// duration.seconds += 1;
|
||||||
// duration.nanos -= 1000000000;
|
// duration.nanos -= 1000000000;
|
||||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
||||||
// duration.seconds -= 1;
|
// duration.seconds -= 1;
|
||||||
// duration.nanos += 1000000000;
|
// duration.nanos += 1000000000;
|
||||||
// }
|
// }
|
||||||
@ -101,7 +101,6 @@ option objc_class_prefix = "GPB";
|
|||||||
//
|
//
|
||||||
//
|
//
|
||||||
message Duration {
|
message Duration {
|
||||||
|
|
||||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||||
|
40
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
40
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
@ -20,17 +20,19 @@ var _ = math.Inf
|
|||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
// A Timestamp represents a point in time independent of any time zone
|
// A Timestamp represents a point in time independent of any time zone or local
|
||||||
// or calendar, represented as seconds and fractions of seconds at
|
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||||
// backwards to year one. It is encoded assuming all minutes are 60
|
// Gregorian calendar backwards to year one.
|
||||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
//
|
||||||
// table is needed for interpretation. Range is from
|
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
// second table is needed for interpretation, using a [24-hour linear
|
||||||
// By restricting to that range, we ensure that we can convert to
|
// smear](https://developers.google.com/time/smear).
|
||||||
// and from RFC 3339 date strings.
|
//
|
||||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||||
|
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||||
|
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||||
//
|
//
|
||||||
// # Examples
|
// # Examples
|
||||||
//
|
//
|
||||||
@ -91,12 +93,14 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|||||||
// 01:30 UTC on January 15, 2017.
|
// 01:30 UTC on January 15, 2017.
|
||||||
//
|
//
|
||||||
// In JavaScript, one can convert a Date object to this format using the
|
// In JavaScript, one can convert a Date object to this format using the
|
||||||
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
|
// standard
|
||||||
|
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
// to this format using
|
||||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||||
|
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
||||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||||
//
|
//
|
||||||
//
|
//
|
||||||
@ -160,7 +164,9 @@ func init() {
|
|||||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
|
func init() {
|
||||||
|
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e)
|
||||||
|
}
|
||||||
|
|
||||||
var fileDescriptor_292007bbfe81227e = []byte{
|
var fileDescriptor_292007bbfe81227e = []byte{
|
||||||
// 191 bytes of a gzipped FileDescriptorProto
|
// 191 bytes of a gzipped FileDescriptorProto
|
||||||
|
37
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
37
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
@ -40,17 +40,19 @@ option java_outer_classname = "TimestampProto";
|
|||||||
option java_multiple_files = true;
|
option java_multiple_files = true;
|
||||||
option objc_class_prefix = "GPB";
|
option objc_class_prefix = "GPB";
|
||||||
|
|
||||||
// A Timestamp represents a point in time independent of any time zone
|
// A Timestamp represents a point in time independent of any time zone or local
|
||||||
// or calendar, represented as seconds and fractions of seconds at
|
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||||
// backwards to year one. It is encoded assuming all minutes are 60
|
// Gregorian calendar backwards to year one.
|
||||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
//
|
||||||
// table is needed for interpretation. Range is from
|
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
// second table is needed for interpretation, using a [24-hour linear
|
||||||
// By restricting to that range, we ensure that we can convert to
|
// smear](https://developers.google.com/time/smear).
|
||||||
// and from RFC 3339 date strings.
|
//
|
||||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||||
|
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||||
|
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||||
//
|
//
|
||||||
// # Examples
|
// # Examples
|
||||||
//
|
//
|
||||||
@ -111,17 +113,18 @@ option objc_class_prefix = "GPB";
|
|||||||
// 01:30 UTC on January 15, 2017.
|
// 01:30 UTC on January 15, 2017.
|
||||||
//
|
//
|
||||||
// In JavaScript, one can convert a Date object to this format using the
|
// In JavaScript, one can convert a Date object to this format using the
|
||||||
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
|
// standard
|
||||||
|
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
// to this format using
|
||||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||||
|
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
||||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||||
//
|
//
|
||||||
//
|
//
|
||||||
message Timestamp {
|
message Timestamp {
|
||||||
|
|
||||||
// Represents seconds of UTC time since Unix epoch
|
// Represents seconds of UTC time since Unix epoch
|
||||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||||
// 9999-12-31T23:59:59Z inclusive.
|
// 9999-12-31T23:59:59Z inclusive.
|
||||||
|
58
vendor/github.com/opencontainers/runc/README.md
generated
vendored
58
vendor/github.com/opencontainers/runc/README.md
generated
vendored
@ -3,6 +3,7 @@
|
|||||||
[](https://travis-ci.org/opencontainers/runc)
|
[](https://travis-ci.org/opencontainers/runc)
|
||||||
[](https://goreportcard.com/report/github.com/opencontainers/runc)
|
[](https://goreportcard.com/report/github.com/opencontainers/runc)
|
||||||
[](https://godoc.org/github.com/opencontainers/runc)
|
[](https://godoc.org/github.com/opencontainers/runc)
|
||||||
|
[](https://bestpractices.coreinfrastructure.org/projects/588)
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
@ -18,22 +19,23 @@ You can find official releases of `runc` on the [release](https://github.com/ope
|
|||||||
|
|
||||||
Currently, the following features are not considered to be production-ready:
|
Currently, the following features are not considered to be production-ready:
|
||||||
|
|
||||||
* Support for cgroup v2
|
* [Support for cgroup v2](./docs/cgroup-v2.md)
|
||||||
|
|
||||||
## Security
|
## Security
|
||||||
|
|
||||||
The reporting process and disclosure communications are outlined in [/org/security](https://github.com/opencontainers/org/blob/master/security/).
|
The reporting process and disclosure communications are outlined [here](https://github.com/opencontainers/org/blob/master/SECURITY.md).
|
||||||
|
|
||||||
|
### Security Audit
|
||||||
|
A third party security audit was performed by Cure53, you can see the full report [here](https://github.com/opencontainers/runc/blob/master/docs/Security-Audit.pdf).
|
||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
`runc` currently supports the Linux platform with various architecture support.
|
`runc` currently supports the Linux platform with various architecture support.
|
||||||
It must be built with Go version 1.6 or higher in order for some features to function properly.
|
It must be built with Go version 1.13 or higher.
|
||||||
|
|
||||||
In order to enable seccomp support you will need to install `libseccomp` on your platform.
|
In order to enable seccomp support you will need to install `libseccomp` on your platform.
|
||||||
> e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu
|
> e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu
|
||||||
|
|
||||||
Otherwise, if you do not want to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# create a 'github.com/opencontainers' in your GOPATH/src
|
# create a 'github.com/opencontainers' in your GOPATH/src
|
||||||
cd github.com/opencontainers
|
cd github.com/opencontainers
|
||||||
@ -58,20 +60,22 @@ sudo make install
|
|||||||
|
|
||||||
#### Build Tags
|
#### Build Tags
|
||||||
|
|
||||||
`runc` supports optional build tags for compiling support of various features.
|
`runc` supports optional build tags for compiling support of various features,
|
||||||
To add build tags to the make option the `BUILDTAGS` variable must be set.
|
with some of them enabled by default (see `BUILDTAGS` in top-level `Makefile`).
|
||||||
|
|
||||||
|
To change build tags from the default, set the `BUILDTAGS` variable for make,
|
||||||
|
e.g.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make BUILDTAGS='seccomp apparmor'
|
make BUILDTAGS='seccomp apparmor'
|
||||||
```
|
```
|
||||||
|
|
||||||
| Build Tag | Feature | Dependency |
|
| Build Tag | Feature | Enabled by default | Dependency |
|
||||||
|-----------|------------------------------------|-------------|
|
|-----------|------------------------------------|--------------------|------------|
|
||||||
| seccomp | Syscall filtering | libseccomp |
|
| seccomp | Syscall filtering | yes | libseccomp |
|
||||||
| selinux | selinux process and mount labeling | <none> |
|
| selinux | selinux process and mount labeling | yes | <none> |
|
||||||
| apparmor | apparmor profile support | <none> |
|
| apparmor | apparmor profile support | yes | <none> |
|
||||||
| ambient | ambient capability support | kernel 4.3 |
|
| nokmem | disable kernel memory accounting | no | <none> |
|
||||||
| nokmem | disable kernel memory account | <none> |
|
|
||||||
|
|
||||||
|
|
||||||
### Running the test suite
|
### Running the test suite
|
||||||
@ -97,17 +101,30 @@ You can run a specific integration test by setting the `TESTPATH` variable.
|
|||||||
# make test TESTPATH="/checkpoint.bats"
|
# make test TESTPATH="/checkpoint.bats"
|
||||||
```
|
```
|
||||||
|
|
||||||
You can run a test in your proxy environment by setting `DOCKER_BUILD_PROXY` and `DOCKER_RUN_PROXY` variables.
|
You can run a specific rootless integration test by setting the `ROOTLESS_TESTPATH` variable.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# make test DOCKER_BUILD_PROXY="--build-arg HTTP_PROXY=http://yourproxy/" DOCKER_RUN_PROXY="-e HTTP_PROXY=http://yourproxy/"
|
# make test ROOTLESS_TESTPATH="/checkpoint.bats"
|
||||||
|
```
|
||||||
|
|
||||||
|
You can run a test using your container engine's flags by setting `CONTAINER_ENGINE_BUILD_FLAGS` and `CONTAINER_ENGINE_RUN_FLAGS` variables.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# make test CONTAINER_ENGINE_BUILD_FLAGS="--build-arg http_proxy=http://yourproxy/" CONTAINER_ENGINE_RUN_FLAGS="-e http_proxy=http://yourproxy/"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Dependencies Management
|
### Dependencies Management
|
||||||
|
|
||||||
`runc` uses [vndr](https://github.com/LK4D4/vndr) for dependencies management.
|
`runc` uses [Go Modules](https://github.com/golang/go/wiki/Modules) for dependencies management.
|
||||||
Please refer to [vndr](https://github.com/LK4D4/vndr) for how to add or update
|
Please refer to [Go Modules](https://github.com/golang/go/wiki/Modules) for how to add or update
|
||||||
new dependencies.
|
new dependencies. When updating dependencies, be sure that you are running Go `1.14` or newer.
|
||||||
|
|
||||||
|
```
|
||||||
|
# Update vendored dependencies
|
||||||
|
make vendor
|
||||||
|
# Verify all dependencies
|
||||||
|
make verify-dependencies
|
||||||
|
```
|
||||||
|
|
||||||
## Using runc
|
## Using runc
|
||||||
|
|
||||||
@ -275,6 +292,9 @@ PIDFile=/run/mycontainerid.pid
|
|||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### cgroup v2
|
||||||
|
See [`./docs/cgroup-v2.md`](./docs/cgroup-v2.md).
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
The code and docs are released under the [Apache 2.0 license](LICENSE).
|
The code and docs are released under the [Apache 2.0 license](LICENSE).
|
||||||
|
26
vendor/github.com/opencontainers/runc/go.mod
generated
vendored
Normal file
26
vendor/github.com/opencontainers/runc/go.mod
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
module github.com/opencontainers/runc
|
||||||
|
|
||||||
|
go 1.14
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/checkpoint-restore/go-criu/v4 v4.0.2
|
||||||
|
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3
|
||||||
|
github.com/containerd/console v1.0.0
|
||||||
|
github.com/coreos/go-systemd/v22 v22.0.0
|
||||||
|
github.com/cyphar/filepath-securejoin v0.2.2
|
||||||
|
github.com/docker/go-units v0.4.0
|
||||||
|
github.com/godbus/dbus/v5 v5.0.3
|
||||||
|
github.com/golang/protobuf v1.3.5
|
||||||
|
github.com/moby/sys/mountinfo v0.1.3
|
||||||
|
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
|
||||||
|
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
|
||||||
|
github.com/opencontainers/selinux v1.5.1
|
||||||
|
github.com/pkg/errors v0.9.1
|
||||||
|
github.com/seccomp/libseccomp-golang v0.9.1
|
||||||
|
github.com/sirupsen/logrus v1.6.0
|
||||||
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
||||||
|
// NOTE: urfave/cli must be <= v1.22.1 due to a regression: https://github.com/urfave/cli/issues/1092
|
||||||
|
github.com/urfave/cli v1.22.1
|
||||||
|
github.com/vishvananda/netlink v1.1.0
|
||||||
|
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
|
||||||
|
)
|
5
vendor/github.com/opencontainers/runc/libcontainer/README.md
generated
vendored
5
vendor/github.com/opencontainers/runc/libcontainer/README.md
generated
vendored
@ -155,8 +155,7 @@ config := &configs.Config{
|
|||||||
Parent: "system",
|
Parent: "system",
|
||||||
Resources: &configs.Resources{
|
Resources: &configs.Resources{
|
||||||
MemorySwappiness: nil,
|
MemorySwappiness: nil,
|
||||||
AllowAllDevices: nil,
|
Devices: specconv.AllowedDevices,
|
||||||
AllowedDevices: configs.DefaultAllowedDevices,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
MaskPaths: []string{
|
MaskPaths: []string{
|
||||||
@ -166,7 +165,7 @@ config := &configs.Config{
|
|||||||
ReadonlyPaths: []string{
|
ReadonlyPaths: []string{
|
||||||
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
|
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
|
||||||
},
|
},
|
||||||
Devices: configs.DefaultAutoCreatedDevices,
|
Devices: specconv.AllowedDevices,
|
||||||
Hostname: "testing",
|
Hostname: "testing",
|
||||||
Mounts: []*configs.Mount{
|
Mounts: []*configs.Mount{
|
||||||
{
|
{
|
||||||
|
23
vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
generated
vendored
23
vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
generated
vendored
@ -1,5 +1,9 @@
|
|||||||
package configs
|
package configs
|
||||||
|
|
||||||
|
import (
|
||||||
|
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
|
||||||
|
)
|
||||||
|
|
||||||
type FreezerState string
|
type FreezerState string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -29,18 +33,16 @@ type Cgroup struct {
|
|||||||
|
|
||||||
// Resources contains various cgroups settings to apply
|
// Resources contains various cgroups settings to apply
|
||||||
*Resources
|
*Resources
|
||||||
|
|
||||||
|
// SystemdProps are any additional properties for systemd,
|
||||||
|
// derived from org.systemd.property.xxx annotations.
|
||||||
|
// Ignored unless systemd is used for managing cgroups.
|
||||||
|
SystemdProps []systemdDbus.Property `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Resources struct {
|
type Resources struct {
|
||||||
// If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list.
|
// Devices is the set of access rules for devices in the container.
|
||||||
// Deprecated
|
Devices []*DeviceRule `json:"devices"`
|
||||||
AllowAllDevices *bool `json:"allow_all_devices,omitempty"`
|
|
||||||
// Deprecated
|
|
||||||
AllowedDevices []*Device `json:"allowed_devices,omitempty"`
|
|
||||||
// Deprecated
|
|
||||||
DeniedDevices []*Device `json:"denied_devices,omitempty"`
|
|
||||||
|
|
||||||
Devices []*Device `json:"devices"`
|
|
||||||
|
|
||||||
// Memory limit (in bytes)
|
// Memory limit (in bytes)
|
||||||
Memory int64 `json:"memory"`
|
Memory int64 `json:"memory"`
|
||||||
@ -124,7 +126,4 @@ type Resources struct {
|
|||||||
|
|
||||||
// CpuWeight sets a proportional bandwidth limit.
|
// CpuWeight sets a proportional bandwidth limit.
|
||||||
CpuWeight uint64 `json:"cpu_weight"`
|
CpuWeight uint64 `json:"cpu_weight"`
|
||||||
|
|
||||||
// CpuMax sets she maximum bandwidth limit (format: max period).
|
|
||||||
CpuMax string `json:"cpu_max"`
|
|
||||||
}
|
}
|
||||||
|
96
vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
generated
vendored
96
vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
generated
vendored
@ -8,7 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -70,9 +70,10 @@ type Arg struct {
|
|||||||
|
|
||||||
// Syscall is a rule to match a syscall in Seccomp
|
// Syscall is a rule to match a syscall in Seccomp
|
||||||
type Syscall struct {
|
type Syscall struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Action Action `json:"action"`
|
Action Action `json:"action"`
|
||||||
Args []*Arg `json:"args"`
|
ErrnoRet *uint `json:"errnoRet"`
|
||||||
|
Args []*Arg `json:"args"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO Windows. Many of these fields should be factored out into those parts
|
// TODO Windows. Many of these fields should be factored out into those parts
|
||||||
@ -175,7 +176,7 @@ type Config struct {
|
|||||||
|
|
||||||
// Hooks are a collection of actions to perform at various container lifecycle events.
|
// Hooks are a collection of actions to perform at various container lifecycle events.
|
||||||
// CommandHooks are serialized to JSON, but other hooks are not.
|
// CommandHooks are serialized to JSON, but other hooks are not.
|
||||||
Hooks *Hooks
|
Hooks Hooks
|
||||||
|
|
||||||
// Version is the version of opencontainer specification that is supported.
|
// Version is the version of opencontainer specification that is supported.
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
@ -202,17 +203,50 @@ type Config struct {
|
|||||||
RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
|
RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Hooks struct {
|
type HookName string
|
||||||
|
type HookList []Hook
|
||||||
|
type Hooks map[HookName]HookList
|
||||||
|
|
||||||
|
const (
|
||||||
// Prestart commands are executed after the container namespaces are created,
|
// Prestart commands are executed after the container namespaces are created,
|
||||||
// but before the user supplied command is executed from init.
|
// but before the user supplied command is executed from init.
|
||||||
Prestart []Hook
|
// Note: This hook is now deprecated
|
||||||
|
// Prestart commands are called in the Runtime namespace.
|
||||||
|
Prestart HookName = "prestart"
|
||||||
|
|
||||||
|
// CreateRuntime commands MUST be called as part of the create operation after
|
||||||
|
// the runtime environment has been created but before the pivot_root has been executed.
|
||||||
|
// CreateRuntime is called immediately after the deprecated Prestart hook.
|
||||||
|
// CreateRuntime commands are called in the Runtime Namespace.
|
||||||
|
CreateRuntime = "createRuntime"
|
||||||
|
|
||||||
|
// CreateContainer commands MUST be called as part of the create operation after
|
||||||
|
// the runtime environment has been created but before the pivot_root has been executed.
|
||||||
|
// CreateContainer commands are called in the Container namespace.
|
||||||
|
CreateContainer = "createContainer"
|
||||||
|
|
||||||
|
// StartContainer commands MUST be called as part of the start operation and before
|
||||||
|
// the container process is started.
|
||||||
|
// StartContainer commands are called in the Container namespace.
|
||||||
|
StartContainer = "startContainer"
|
||||||
|
|
||||||
// Poststart commands are executed after the container init process starts.
|
// Poststart commands are executed after the container init process starts.
|
||||||
Poststart []Hook
|
// Poststart commands are called in the Runtime Namespace.
|
||||||
|
Poststart = "poststart"
|
||||||
|
|
||||||
// Poststop commands are executed after the container init process exits.
|
// Poststop commands are executed after the container init process exits.
|
||||||
Poststop []Hook
|
// Poststop commands are called in the Runtime Namespace.
|
||||||
}
|
Poststop = "poststop"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO move this to runtime-spec
|
||||||
|
// See: https://github.com/opencontainers/runtime-spec/pull/1046
|
||||||
|
const (
|
||||||
|
Creating = "creating"
|
||||||
|
Created = "created"
|
||||||
|
Running = "running"
|
||||||
|
Stopped = "stopped"
|
||||||
|
)
|
||||||
|
|
||||||
type Capabilities struct {
|
type Capabilities struct {
|
||||||
// Bounding is the set of capabilities checked by the kernel.
|
// Bounding is the set of capabilities checked by the kernel.
|
||||||
@ -227,32 +261,39 @@ type Capabilities struct {
|
|||||||
Ambient []string
|
Ambient []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hooks *Hooks) UnmarshalJSON(b []byte) error {
|
func (hooks HookList) RunHooks(state *specs.State) error {
|
||||||
var state struct {
|
for i, h := range hooks {
|
||||||
Prestart []CommandHook
|
if err := h.Run(state); err != nil {
|
||||||
Poststart []CommandHook
|
return errors.Wrapf(err, "Running hook #%d:", i)
|
||||||
Poststop []CommandHook
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hooks *Hooks) UnmarshalJSON(b []byte) error {
|
||||||
|
var state map[HookName][]CommandHook
|
||||||
|
|
||||||
if err := json.Unmarshal(b, &state); err != nil {
|
if err := json.Unmarshal(b, &state); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
deserialize := func(shooks []CommandHook) (hooks []Hook) {
|
*hooks = Hooks{}
|
||||||
for _, shook := range shooks {
|
for n, commandHooks := range state {
|
||||||
hooks = append(hooks, shook)
|
if len(commandHooks) == 0 {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
return hooks
|
(*hooks)[n] = HookList{}
|
||||||
|
for _, h := range commandHooks {
|
||||||
|
(*hooks)[n] = append((*hooks)[n], h)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hooks.Prestart = deserialize(state.Prestart)
|
|
||||||
hooks.Poststart = deserialize(state.Poststart)
|
|
||||||
hooks.Poststop = deserialize(state.Poststop)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hooks Hooks) MarshalJSON() ([]byte, error) {
|
func (hooks *Hooks) MarshalJSON() ([]byte, error) {
|
||||||
serialize := func(hooks []Hook) (serializableHooks []CommandHook) {
|
serialize := func(hooks []Hook) (serializableHooks []CommandHook) {
|
||||||
for _, hook := range hooks {
|
for _, hook := range hooks {
|
||||||
switch chook := hook.(type) {
|
switch chook := hook.(type) {
|
||||||
@ -267,9 +308,12 @@ func (hooks Hooks) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return json.Marshal(map[string]interface{}{
|
return json.Marshal(map[string]interface{}{
|
||||||
"prestart": serialize(hooks.Prestart),
|
"prestart": serialize((*hooks)[Prestart]),
|
||||||
"poststart": serialize(hooks.Poststart),
|
"createRuntime": serialize((*hooks)[CreateRuntime]),
|
||||||
"poststop": serialize(hooks.Poststop),
|
"createContainer": serialize((*hooks)[CreateContainer]),
|
||||||
|
"startContainer": serialize((*hooks)[StartContainer]),
|
||||||
|
"poststart": serialize((*hooks)[Poststart]),
|
||||||
|
"poststop": serialize((*hooks)[Poststop]),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
175
vendor/github.com/opencontainers/runc/libcontainer/configs/device.go
generated
vendored
175
vendor/github.com/opencontainers/runc/libcontainer/configs/device.go
generated
vendored
@ -1,8 +1,12 @@
|
|||||||
package configs
|
package configs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -12,21 +16,11 @@ const (
|
|||||||
// TODO Windows: This can be factored out in the future
|
// TODO Windows: This can be factored out in the future
|
||||||
|
|
||||||
type Device struct {
|
type Device struct {
|
||||||
// Device type, block, char, etc.
|
DeviceRule
|
||||||
Type rune `json:"type"`
|
|
||||||
|
|
||||||
// Path to the device.
|
// Path to the device.
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
|
|
||||||
// Major is the device's major number.
|
|
||||||
Major int64 `json:"major"`
|
|
||||||
|
|
||||||
// Minor is the device's minor number.
|
|
||||||
Minor int64 `json:"minor"`
|
|
||||||
|
|
||||||
// Cgroup permissions format, rwm.
|
|
||||||
Permissions string `json:"permissions"`
|
|
||||||
|
|
||||||
// FileMode permission bits for the device.
|
// FileMode permission bits for the device.
|
||||||
FileMode os.FileMode `json:"file_mode"`
|
FileMode os.FileMode `json:"file_mode"`
|
||||||
|
|
||||||
@ -35,23 +29,154 @@ type Device struct {
|
|||||||
|
|
||||||
// Gid of the device.
|
// Gid of the device.
|
||||||
Gid uint32 `json:"gid"`
|
Gid uint32 `json:"gid"`
|
||||||
|
}
|
||||||
|
|
||||||
// Write the file to the allowed list
|
// DevicePermissions is a cgroupv1-style string to represent device access. It
|
||||||
|
// has to be a string for backward compatibility reasons, hence why it has
|
||||||
|
// methods to do set operations.
|
||||||
|
type DevicePermissions string
|
||||||
|
|
||||||
|
const (
|
||||||
|
deviceRead uint = (1 << iota)
|
||||||
|
deviceWrite
|
||||||
|
deviceMknod
|
||||||
|
)
|
||||||
|
|
||||||
|
func (p DevicePermissions) toSet() uint {
|
||||||
|
var set uint
|
||||||
|
for _, perm := range p {
|
||||||
|
switch perm {
|
||||||
|
case 'r':
|
||||||
|
set |= deviceRead
|
||||||
|
case 'w':
|
||||||
|
set |= deviceWrite
|
||||||
|
case 'm':
|
||||||
|
set |= deviceMknod
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return set
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromSet(set uint) DevicePermissions {
|
||||||
|
var perm string
|
||||||
|
if set&deviceRead == deviceRead {
|
||||||
|
perm += "r"
|
||||||
|
}
|
||||||
|
if set&deviceWrite == deviceWrite {
|
||||||
|
perm += "w"
|
||||||
|
}
|
||||||
|
if set&deviceMknod == deviceMknod {
|
||||||
|
perm += "m"
|
||||||
|
}
|
||||||
|
return DevicePermissions(perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Union returns the union of the two sets of DevicePermissions.
|
||||||
|
func (p DevicePermissions) Union(o DevicePermissions) DevicePermissions {
|
||||||
|
lhs := p.toSet()
|
||||||
|
rhs := o.toSet()
|
||||||
|
return fromSet(lhs | rhs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Difference returns the set difference of the two sets of DevicePermissions.
|
||||||
|
// In set notation, A.Difference(B) gives you A\B.
|
||||||
|
func (p DevicePermissions) Difference(o DevicePermissions) DevicePermissions {
|
||||||
|
lhs := p.toSet()
|
||||||
|
rhs := o.toSet()
|
||||||
|
return fromSet(lhs &^ rhs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Intersection computes the intersection of the two sets of DevicePermissions.
|
||||||
|
func (p DevicePermissions) Intersection(o DevicePermissions) DevicePermissions {
|
||||||
|
lhs := p.toSet()
|
||||||
|
rhs := o.toSet()
|
||||||
|
return fromSet(lhs & rhs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEmpty returns whether the set of permissions in a DevicePermissions is
|
||||||
|
// empty.
|
||||||
|
func (p DevicePermissions) IsEmpty() bool {
|
||||||
|
return p == DevicePermissions("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid returns whether the set of permissions is a subset of valid
|
||||||
|
// permissions (namely, {r,w,m}).
|
||||||
|
func (p DevicePermissions) IsValid() bool {
|
||||||
|
return p == fromSet(p.toSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeviceType rune
|
||||||
|
|
||||||
|
const (
|
||||||
|
WildcardDevice DeviceType = 'a'
|
||||||
|
BlockDevice DeviceType = 'b'
|
||||||
|
CharDevice DeviceType = 'c' // or 'u'
|
||||||
|
FifoDevice DeviceType = 'p'
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t DeviceType) IsValid() bool {
|
||||||
|
switch t {
|
||||||
|
case WildcardDevice, BlockDevice, CharDevice, FifoDevice:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t DeviceType) CanMknod() bool {
|
||||||
|
switch t {
|
||||||
|
case BlockDevice, CharDevice, FifoDevice:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t DeviceType) CanCgroup() bool {
|
||||||
|
switch t {
|
||||||
|
case WildcardDevice, BlockDevice, CharDevice:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeviceRule struct {
|
||||||
|
// Type of device ('c' for char, 'b' for block). If set to 'a', this rule
|
||||||
|
// acts as a wildcard and all fields other than Allow are ignored.
|
||||||
|
Type DeviceType `json:"type"`
|
||||||
|
|
||||||
|
// Major is the device's major number.
|
||||||
|
Major int64 `json:"major"`
|
||||||
|
|
||||||
|
// Minor is the device's minor number.
|
||||||
|
Minor int64 `json:"minor"`
|
||||||
|
|
||||||
|
// Permissions is the set of permissions that this rule applies to (in the
|
||||||
|
// cgroupv1 format -- any combination of "rwm").
|
||||||
|
Permissions DevicePermissions `json:"permissions"`
|
||||||
|
|
||||||
|
// Allow specifies whether this rule is allowed.
|
||||||
Allow bool `json:"allow"`
|
Allow bool `json:"allow"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Device) CgroupString() string {
|
func (d *DeviceRule) CgroupString() string {
|
||||||
return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions)
|
var (
|
||||||
}
|
major = strconv.FormatInt(d.Major, 10)
|
||||||
|
minor = strconv.FormatInt(d.Minor, 10)
|
||||||
func (d *Device) Mkdev() int {
|
)
|
||||||
return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12))
|
if d.Major == Wildcard {
|
||||||
}
|
major = "*"
|
||||||
|
|
||||||
// deviceNumberString converts the device number to a string return result.
|
|
||||||
func deviceNumberString(number int64) string {
|
|
||||||
if number == Wildcard {
|
|
||||||
return "*"
|
|
||||||
}
|
}
|
||||||
return fmt.Sprint(number)
|
if d.Minor == Wildcard {
|
||||||
|
minor = "*"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%c %s:%s %s", d.Type, major, minor, d.Permissions)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DeviceRule) Mkdev() (uint64, error) {
|
||||||
|
if d.Major == Wildcard || d.Minor == Wildcard {
|
||||||
|
return 0, errors.New("cannot mkdev() device with wildcards")
|
||||||
|
}
|
||||||
|
return unix.Mkdev(uint32(d.Major), uint32(d.Minor)), nil
|
||||||
}
|
}
|
||||||
|
111
vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go
generated
vendored
111
vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go
generated
vendored
@ -1,111 +0,0 @@
|
|||||||
// +build linux
|
|
||||||
|
|
||||||
package configs
|
|
||||||
|
|
||||||
var (
|
|
||||||
// DefaultSimpleDevices are devices that are to be both allowed and created.
|
|
||||||
DefaultSimpleDevices = []*Device{
|
|
||||||
// /dev/null and zero
|
|
||||||
{
|
|
||||||
Path: "/dev/null",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 1,
|
|
||||||
Minor: 3,
|
|
||||||
Permissions: "rwm",
|
|
||||||
FileMode: 0666,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Path: "/dev/zero",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 1,
|
|
||||||
Minor: 5,
|
|
||||||
Permissions: "rwm",
|
|
||||||
FileMode: 0666,
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
Path: "/dev/full",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 1,
|
|
||||||
Minor: 7,
|
|
||||||
Permissions: "rwm",
|
|
||||||
FileMode: 0666,
|
|
||||||
},
|
|
||||||
|
|
||||||
// consoles and ttys
|
|
||||||
{
|
|
||||||
Path: "/dev/tty",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 5,
|
|
||||||
Minor: 0,
|
|
||||||
Permissions: "rwm",
|
|
||||||
FileMode: 0666,
|
|
||||||
},
|
|
||||||
|
|
||||||
// /dev/urandom,/dev/random
|
|
||||||
{
|
|
||||||
Path: "/dev/urandom",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 1,
|
|
||||||
Minor: 9,
|
|
||||||
Permissions: "rwm",
|
|
||||||
FileMode: 0666,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Path: "/dev/random",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 1,
|
|
||||||
Minor: 8,
|
|
||||||
Permissions: "rwm",
|
|
||||||
FileMode: 0666,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
DefaultAllowedDevices = append([]*Device{
|
|
||||||
// allow mknod for any device
|
|
||||||
{
|
|
||||||
Type: 'c',
|
|
||||||
Major: Wildcard,
|
|
||||||
Minor: Wildcard,
|
|
||||||
Permissions: "m",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Type: 'b',
|
|
||||||
Major: Wildcard,
|
|
||||||
Minor: Wildcard,
|
|
||||||
Permissions: "m",
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
Path: "/dev/console",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 5,
|
|
||||||
Minor: 1,
|
|
||||||
Permissions: "rwm",
|
|
||||||
},
|
|
||||||
// /dev/pts/ - pts namespaces are "coming soon"
|
|
||||||
{
|
|
||||||
Path: "",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 136,
|
|
||||||
Minor: Wildcard,
|
|
||||||
Permissions: "rwm",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Path: "",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 5,
|
|
||||||
Minor: 2,
|
|
||||||
Permissions: "rwm",
|
|
||||||
},
|
|
||||||
|
|
||||||
// tuntap
|
|
||||||
{
|
|
||||||
Path: "",
|
|
||||||
Type: 'c',
|
|
||||||
Major: 10,
|
|
||||||
Minor: 200,
|
|
||||||
Permissions: "rwm",
|
|
||||||
},
|
|
||||||
}, DefaultSimpleDevices...)
|
|
||||||
DefaultAutoCreatedDevices = append([]*Device{}, DefaultSimpleDevices...)
|
|
||||||
)
|
|
36
vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go
generated
vendored
36
vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go
generated
vendored
@ -31,33 +31,33 @@ func DeviceFromPath(path, permissions string) (*configs.Device, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
devType configs.DeviceType
|
||||||
|
mode = stat.Mode
|
||||||
devNumber = uint64(stat.Rdev)
|
devNumber = uint64(stat.Rdev)
|
||||||
major = unix.Major(devNumber)
|
major = unix.Major(devNumber)
|
||||||
minor = unix.Minor(devNumber)
|
minor = unix.Minor(devNumber)
|
||||||
)
|
)
|
||||||
if major == 0 {
|
|
||||||
return nil, ErrNotADevice
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
devType rune
|
|
||||||
mode = stat.Mode
|
|
||||||
)
|
|
||||||
switch {
|
switch {
|
||||||
case mode&unix.S_IFBLK == unix.S_IFBLK:
|
case mode&unix.S_IFBLK == unix.S_IFBLK:
|
||||||
devType = 'b'
|
devType = configs.BlockDevice
|
||||||
case mode&unix.S_IFCHR == unix.S_IFCHR:
|
case mode&unix.S_IFCHR == unix.S_IFCHR:
|
||||||
devType = 'c'
|
devType = configs.CharDevice
|
||||||
|
case mode&unix.S_IFIFO == unix.S_IFIFO:
|
||||||
|
devType = configs.FifoDevice
|
||||||
|
default:
|
||||||
|
return nil, ErrNotADevice
|
||||||
}
|
}
|
||||||
return &configs.Device{
|
return &configs.Device{
|
||||||
Type: devType,
|
DeviceRule: configs.DeviceRule{
|
||||||
Path: path,
|
Type: devType,
|
||||||
Major: int64(major),
|
Major: int64(major),
|
||||||
Minor: int64(minor),
|
Minor: int64(minor),
|
||||||
Permissions: permissions,
|
Permissions: configs.DevicePermissions(permissions),
|
||||||
FileMode: os.FileMode(mode),
|
},
|
||||||
Uid: stat.Uid,
|
Path: path,
|
||||||
Gid: stat.Gid,
|
FileMode: os.FileMode(mode),
|
||||||
|
Uid: stat.Uid,
|
||||||
|
Gid: stat.Gid,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
28
vendor/github.com/opencontainers/runc/libcontainer/nsenter/cloned_binary.c
generated
vendored
28
vendor/github.com/opencontainers/runc/libcontainer/nsenter/cloned_binary.c
generated
vendored
@ -1,7 +1,14 @@
|
|||||||
|
// SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
|
||||||
/*
|
/*
|
||||||
* Copyright (C) 2019 Aleksa Sarai <cyphar@cyphar.com>
|
* Copyright (C) 2019 Aleksa Sarai <cyphar@cyphar.com>
|
||||||
* Copyright (C) 2019 SUSE LLC
|
* Copyright (C) 2019 SUSE LLC
|
||||||
*
|
*
|
||||||
|
* This work is dual licensed under the following licenses. You may use,
|
||||||
|
* redistribute, and/or modify the work under the conditions of either (or
|
||||||
|
* both) licenses.
|
||||||
|
*
|
||||||
|
* === Apache-2.0 ===
|
||||||
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
* You may obtain a copy of the License at
|
* You may obtain a copy of the License at
|
||||||
@ -13,6 +20,23 @@
|
|||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
* === LGPL-2.1-or-later ===
|
||||||
|
*
|
||||||
|
* This library is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
* License as published by the Free Software Foundation; either
|
||||||
|
* version 2.1 of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This library is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public
|
||||||
|
* License along with this library. If not, see
|
||||||
|
* <https://www.gnu.org/licenses/>.
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _GNU_SOURCE
|
#define _GNU_SOURCE
|
||||||
@ -95,8 +119,10 @@ static int is_self_cloned(void)
|
|||||||
struct statfs fsbuf = {};
|
struct statfs fsbuf = {};
|
||||||
|
|
||||||
fd = open("/proc/self/exe", O_RDONLY|O_CLOEXEC);
|
fd = open("/proc/self/exe", O_RDONLY|O_CLOEXEC);
|
||||||
if (fd < 0)
|
if (fd < 0) {
|
||||||
|
fprintf(stderr, "you have no read access to runc binary file\n");
|
||||||
return -ENOTRECOVERABLE;
|
return -ENOTRECOVERABLE;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Is the binary a fully-sealed memfd? We don't need CLONED_BINARY_ENV for
|
* Is the binary a fully-sealed memfd? We don't need CLONED_BINARY_ENV for
|
||||||
|
12
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
generated
vendored
12
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
generated
vendored
@ -714,12 +714,12 @@ void nsexec(void)
|
|||||||
* ready, so we can receive all possible error codes
|
* ready, so we can receive all possible error codes
|
||||||
* generated by children.
|
* generated by children.
|
||||||
*/
|
*/
|
||||||
|
syncfd = sync_child_pipe[1];
|
||||||
|
close(sync_child_pipe[0]);
|
||||||
|
|
||||||
while (!ready) {
|
while (!ready) {
|
||||||
enum sync_t s;
|
enum sync_t s;
|
||||||
|
|
||||||
syncfd = sync_child_pipe[1];
|
|
||||||
close(sync_child_pipe[0]);
|
|
||||||
|
|
||||||
if (read(syncfd, &s, sizeof(s)) != sizeof(s))
|
if (read(syncfd, &s, sizeof(s)) != sizeof(s))
|
||||||
bail("failed to sync with child: next state");
|
bail("failed to sync with child: next state");
|
||||||
|
|
||||||
@ -789,13 +789,13 @@ void nsexec(void)
|
|||||||
|
|
||||||
/* Now sync with grandchild. */
|
/* Now sync with grandchild. */
|
||||||
|
|
||||||
|
syncfd = sync_grandchild_pipe[1];
|
||||||
|
close(sync_grandchild_pipe[0]);
|
||||||
|
|
||||||
ready = false;
|
ready = false;
|
||||||
while (!ready) {
|
while (!ready) {
|
||||||
enum sync_t s;
|
enum sync_t s;
|
||||||
|
|
||||||
syncfd = sync_grandchild_pipe[1];
|
|
||||||
close(sync_grandchild_pipe[0]);
|
|
||||||
|
|
||||||
s = SYNC_GRANDCHILD;
|
s = SYNC_GRANDCHILD;
|
||||||
if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
|
if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
|
||||||
kill(child, SIGKILL);
|
kill(child, SIGKILL);
|
||||||
|
29
vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go
generated
vendored
29
vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go
generated
vendored
@ -4,6 +4,7 @@ package seccomp
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
@ -34,12 +35,12 @@ const (
|
|||||||
// of the init until they join the namespace
|
// of the init until they join the namespace
|
||||||
func InitSeccomp(config *configs.Seccomp) error {
|
func InitSeccomp(config *configs.Seccomp) error {
|
||||||
if config == nil {
|
if config == nil {
|
||||||
return fmt.Errorf("cannot initialize Seccomp - nil config passed")
|
return errors.New("cannot initialize Seccomp - nil config passed")
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultAction, err := getAction(config.DefaultAction)
|
defaultAction, err := getAction(config.DefaultAction, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error initializing seccomp - invalid default action")
|
return errors.New("error initializing seccomp - invalid default action")
|
||||||
}
|
}
|
||||||
|
|
||||||
filter, err := libseccomp.NewFilter(defaultAction)
|
filter, err := libseccomp.NewFilter(defaultAction)
|
||||||
@ -67,7 +68,7 @@ func InitSeccomp(config *configs.Seccomp) error {
|
|||||||
// Add a rule for each syscall
|
// Add a rule for each syscall
|
||||||
for _, call := range config.Syscalls {
|
for _, call := range config.Syscalls {
|
||||||
if call == nil {
|
if call == nil {
|
||||||
return fmt.Errorf("encountered nil syscall while initializing Seccomp")
|
return errors.New("encountered nil syscall while initializing Seccomp")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = matchCall(filter, call); err != nil {
|
if err = matchCall(filter, call); err != nil {
|
||||||
@ -101,22 +102,28 @@ func IsEnabled() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Convert Libcontainer Action to Libseccomp ScmpAction
|
// Convert Libcontainer Action to Libseccomp ScmpAction
|
||||||
func getAction(act configs.Action) (libseccomp.ScmpAction, error) {
|
func getAction(act configs.Action, errnoRet *uint) (libseccomp.ScmpAction, error) {
|
||||||
switch act {
|
switch act {
|
||||||
case configs.Kill:
|
case configs.Kill:
|
||||||
return actKill, nil
|
return actKill, nil
|
||||||
case configs.Errno:
|
case configs.Errno:
|
||||||
|
if errnoRet != nil {
|
||||||
|
return libseccomp.ActErrno.SetReturnCode(int16(*errnoRet)), nil
|
||||||
|
}
|
||||||
return actErrno, nil
|
return actErrno, nil
|
||||||
case configs.Trap:
|
case configs.Trap:
|
||||||
return actTrap, nil
|
return actTrap, nil
|
||||||
case configs.Allow:
|
case configs.Allow:
|
||||||
return actAllow, nil
|
return actAllow, nil
|
||||||
case configs.Trace:
|
case configs.Trace:
|
||||||
|
if errnoRet != nil {
|
||||||
|
return libseccomp.ActTrace.SetReturnCode(int16(*errnoRet)), nil
|
||||||
|
}
|
||||||
return actTrace, nil
|
return actTrace, nil
|
||||||
case configs.Log:
|
case configs.Log:
|
||||||
return actLog, nil
|
return actLog, nil
|
||||||
default:
|
default:
|
||||||
return libseccomp.ActInvalid, fmt.Errorf("invalid action, cannot use in rule")
|
return libseccomp.ActInvalid, errors.New("invalid action, cannot use in rule")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,7 +145,7 @@ func getOperator(op configs.Operator) (libseccomp.ScmpCompareOp, error) {
|
|||||||
case configs.MaskEqualTo:
|
case configs.MaskEqualTo:
|
||||||
return libseccomp.CompareMaskedEqual, nil
|
return libseccomp.CompareMaskedEqual, nil
|
||||||
default:
|
default:
|
||||||
return libseccomp.CompareInvalid, fmt.Errorf("invalid operator, cannot use in rule")
|
return libseccomp.CompareInvalid, errors.New("invalid operator, cannot use in rule")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,7 +154,7 @@ func getCondition(arg *configs.Arg) (libseccomp.ScmpCondition, error) {
|
|||||||
cond := libseccomp.ScmpCondition{}
|
cond := libseccomp.ScmpCondition{}
|
||||||
|
|
||||||
if arg == nil {
|
if arg == nil {
|
||||||
return cond, fmt.Errorf("cannot convert nil to syscall condition")
|
return cond, errors.New("cannot convert nil to syscall condition")
|
||||||
}
|
}
|
||||||
|
|
||||||
op, err := getOperator(arg.Op)
|
op, err := getOperator(arg.Op)
|
||||||
@ -161,11 +168,11 @@ func getCondition(arg *configs.Arg) (libseccomp.ScmpCondition, error) {
|
|||||||
// Add a rule to match a single syscall
|
// Add a rule to match a single syscall
|
||||||
func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error {
|
func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error {
|
||||||
if call == nil || filter == nil {
|
if call == nil || filter == nil {
|
||||||
return fmt.Errorf("cannot use nil as syscall to block")
|
return errors.New("cannot use nil as syscall to block")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(call.Name) == 0 {
|
if len(call.Name) == 0 {
|
||||||
return fmt.Errorf("empty string is not a valid syscall")
|
return errors.New("empty string is not a valid syscall")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we can't resolve the syscall, assume it's not supported on this kernel
|
// If we can't resolve the syscall, assume it's not supported on this kernel
|
||||||
@ -176,7 +183,7 @@ func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Convert the call's action to the libseccomp equivalent
|
// Convert the call's action to the libseccomp equivalent
|
||||||
callAct, err := getAction(call.Action)
|
callAct, err := getAction(call.Action, call.ErrnoRet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("action in seccomp profile is invalid: %s", err)
|
return fmt.Errorf("action in seccomp profile is invalid: %s", err)
|
||||||
}
|
}
|
||||||
|
28
vendor/github.com/opencontainers/runc/libcontainer/user/user.go
generated
vendored
28
vendor/github.com/opencontainers/runc/libcontainer/user/user.go
generated
vendored
@ -162,10 +162,6 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
line := strings.TrimSpace(s.Text())
|
line := strings.TrimSpace(s.Text())
|
||||||
if line == "" {
|
if line == "" {
|
||||||
continue
|
continue
|
||||||
@ -183,6 +179,9 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
|
|||||||
out = append(out, p)
|
out = append(out, p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
@ -221,10 +220,6 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
text := s.Text()
|
text := s.Text()
|
||||||
if text == "" {
|
if text == "" {
|
||||||
continue
|
continue
|
||||||
@ -242,6 +237,9 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
|
|||||||
out = append(out, p)
|
out = append(out, p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
@ -532,10 +530,6 @@ func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
line := strings.TrimSpace(s.Text())
|
line := strings.TrimSpace(s.Text())
|
||||||
if line == "" {
|
if line == "" {
|
||||||
continue
|
continue
|
||||||
@ -549,6 +543,9 @@ func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
|
|||||||
out = append(out, p)
|
out = append(out, p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
@ -586,10 +583,6 @@ func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
line := strings.TrimSpace(s.Text())
|
line := strings.TrimSpace(s.Text())
|
||||||
if line == "" {
|
if line == "" {
|
||||||
continue
|
continue
|
||||||
@ -603,6 +596,9 @@ func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
|
|||||||
out = append(out, p)
|
out = append(out, p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
31
vendor/github.com/opencontainers/runc/vendor.conf
generated
vendored
31
vendor/github.com/opencontainers/runc/vendor.conf
generated
vendored
@ -1,31 +0,0 @@
|
|||||||
# OCI runtime-spec. When updating this, make sure you use a version tag rather
|
|
||||||
# than a commit ID so it's much more obvious what version of the spec we are
|
|
||||||
# using.
|
|
||||||
github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
|
|
||||||
|
|
||||||
# Core libcontainer functionality.
|
|
||||||
github.com/checkpoint-restore/go-criu 17b0214f6c48980c45dc47ecb0cfd6d9e02df723 # v3.11
|
|
||||||
github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7
|
|
||||||
github.com/opencontainers/selinux 5215b1806f52b1fcc2070a8826c542c9d33cd3cf # v1.3.0 (+ CVE-2019-16884)
|
|
||||||
github.com/seccomp/libseccomp-golang 689e3c1541a84461afc49c1c87352a6cedf72e9c # v0.9.1
|
|
||||||
github.com/sirupsen/logrus 8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f # v1.4.1
|
|
||||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
|
||||||
github.com/vishvananda/netlink 1e2e08e8a2dcdacaae3f14ac44c5cfa31361f270
|
|
||||||
|
|
||||||
# systemd integration.
|
|
||||||
github.com/coreos/go-systemd 95778dfbb74eb7e4dbaf43bf7d71809650ef8076 # v19
|
|
||||||
github.com/godbus/dbus 2ff6f7ffd60f0f2410b3105864bdd12c7894f844 # v5.0.1
|
|
||||||
github.com/golang/protobuf 925541529c1fa6821df4e44ce2723319eb2be768 # v1.0.0
|
|
||||||
|
|
||||||
# Command-line interface.
|
|
||||||
github.com/cyphar/filepath-securejoin a261ee33d7a517f054effbf451841abaafe3e0fd # v0.2.2
|
|
||||||
github.com/docker/go-units 47565b4f722fb6ceae66b95f853feed578a4a51c # v0.3.3
|
|
||||||
github.com/urfave/cli cfb38830724cc34fedffe9a2a29fb54fa9169cd1 # v1.20.0
|
|
||||||
golang.org/x/sys 9eafafc0a87e0fd0aeeba439a4573537970c44c7 https://github.com/golang/sys
|
|
||||||
|
|
||||||
# console dependencies
|
|
||||||
github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
|
|
||||||
github.com/pkg/errors ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1
|
|
||||||
|
|
||||||
# ebpf dependencies
|
|
||||||
github.com/cilium/ebpf 95b36a581eed7b0f127306ed1d16cc0ddc06cf67
|
|
7
vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
7
vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
@ -667,9 +667,10 @@ type LinuxSeccompArg struct {
|
|||||||
|
|
||||||
// LinuxSyscall is used to match a syscall in Seccomp
|
// LinuxSyscall is used to match a syscall in Seccomp
|
||||||
type LinuxSyscall struct {
|
type LinuxSyscall struct {
|
||||||
Names []string `json:"names"`
|
Names []string `json:"names"`
|
||||||
Action LinuxSeccompAction `json:"action"`
|
Action LinuxSeccompAction `json:"action"`
|
||||||
Args []LinuxSeccompArg `json:"args,omitempty"`
|
ErrnoRet *uint `json:"errnoRet,omitempty"`
|
||||||
|
Args []LinuxSeccompArg `json:"args,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LinuxIntelRdt has container runtime resource constraints for Intel RDT
|
// LinuxIntelRdt has container runtime resource constraints for Intel RDT
|
||||||
|
2
vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
generated
vendored
2
vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
generated
vendored
@ -11,7 +11,7 @@ const (
|
|||||||
VersionPatch = 2
|
VersionPatch = 2
|
||||||
|
|
||||||
// VersionDev indicates development branch. Releases will be empty string.
|
// VersionDev indicates development branch. Releases will be empty string.
|
||||||
VersionDev = ""
|
VersionDev = "-dev"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Version is the specification version that the package types support.
|
// Version is the specification version that the package types support.
|
||||||
|
22
vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
generated
vendored
22
vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
generated
vendored
@ -1,8 +1,6 @@
|
|||||||
package label
|
package label
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/opencontainers/selinux/go-selinux"
|
"github.com/opencontainers/selinux/go-selinux"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -48,7 +46,7 @@ var PidLabel = selinux.PidLabel
|
|||||||
|
|
||||||
// Init initialises the labeling system
|
// Init initialises the labeling system
|
||||||
func Init() {
|
func Init() {
|
||||||
_ = selinux.GetEnabled()
|
selinux.GetEnabled()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClearLabels will clear all reserved labels
|
// ClearLabels will clear all reserved labels
|
||||||
@ -77,21 +75,3 @@ func ReleaseLabel(label string) error {
|
|||||||
// can be used to set duplicate labels on future container processes
|
// can be used to set duplicate labels on future container processes
|
||||||
// Deprecated: use selinux.DupSecOpt
|
// Deprecated: use selinux.DupSecOpt
|
||||||
var DupSecOpt = selinux.DupSecOpt
|
var DupSecOpt = selinux.DupSecOpt
|
||||||
|
|
||||||
// FormatMountLabel returns a string to be used by the mount command.
|
|
||||||
// The format of this string will be used to alter the labeling of the mountpoint.
|
|
||||||
// The string returned is suitable to be used as the options field of the mount command.
|
|
||||||
// If you need to have additional mount point options, you can pass them in as
|
|
||||||
// the first parameter. Second parameter is the label that you wish to apply
|
|
||||||
// to all content in the mount point.
|
|
||||||
func FormatMountLabel(src, mountLabel string) string {
|
|
||||||
if mountLabel != "" {
|
|
||||||
switch src {
|
|
||||||
case "":
|
|
||||||
src = fmt.Sprintf("context=%q", mountLabel)
|
|
||||||
default:
|
|
||||||
src = fmt.Sprintf("%s,context=%q", src, mountLabel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return src
|
|
||||||
}
|
|
||||||
|
32
vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
generated
vendored
32
vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
generated
vendored
@ -3,6 +3,7 @@
|
|||||||
package label
|
package label
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
"strings"
|
"strings"
|
||||||
@ -42,7 +43,7 @@ func InitLabels(options []string) (plabel string, mlabel string, Err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", "", err
|
||||||
}
|
}
|
||||||
mcsLevel := pcon["level"]
|
|
||||||
mcon, err := selinux.NewContext(mountLabel)
|
mcon, err := selinux.NewContext(mountLabel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", "", err
|
||||||
@ -61,21 +62,16 @@ func InitLabels(options []string) (plabel string, mlabel string, Err error) {
|
|||||||
}
|
}
|
||||||
if con[0] == "filetype" {
|
if con[0] == "filetype" {
|
||||||
mcon["type"] = con[1]
|
mcon["type"] = con[1]
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
pcon[con[0]] = con[1]
|
pcon[con[0]] = con[1]
|
||||||
if con[0] == "level" || con[0] == "user" {
|
if con[0] == "level" || con[0] == "user" {
|
||||||
mcon[con[0]] = con[1]
|
mcon[con[0]] = con[1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if pcon.Get() != processLabel {
|
selinux.ReleaseLabel(processLabel)
|
||||||
if pcon["level"] != mcsLevel {
|
processLabel = pcon.Get()
|
||||||
selinux.ReleaseLabel(processLabel)
|
|
||||||
}
|
|
||||||
processLabel = pcon.Get()
|
|
||||||
selinux.ReserveLabel(processLabel)
|
|
||||||
}
|
|
||||||
mountLabel = mcon.Get()
|
mountLabel = mcon.Get()
|
||||||
|
selinux.ReserveLabel(processLabel)
|
||||||
}
|
}
|
||||||
return processLabel, mountLabel, nil
|
return processLabel, mountLabel, nil
|
||||||
}
|
}
|
||||||
@ -86,6 +82,24 @@ func GenLabels(options string) (string, string, error) {
|
|||||||
return InitLabels(strings.Fields(options))
|
return InitLabels(strings.Fields(options))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FormatMountLabel returns a string to be used by the mount command.
|
||||||
|
// The format of this string will be used to alter the labeling of the mountpoint.
|
||||||
|
// The string returned is suitable to be used as the options field of the mount command.
|
||||||
|
// If you need to have additional mount point options, you can pass them in as
|
||||||
|
// the first parameter. Second parameter is the label that you wish to apply
|
||||||
|
// to all content in the mount point.
|
||||||
|
func FormatMountLabel(src, mountLabel string) string {
|
||||||
|
if mountLabel != "" {
|
||||||
|
switch src {
|
||||||
|
case "":
|
||||||
|
src = fmt.Sprintf("context=%q", mountLabel)
|
||||||
|
default:
|
||||||
|
src = fmt.Sprintf("%s,context=%q", src, mountLabel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return src
|
||||||
|
}
|
||||||
|
|
||||||
// SetFileLabel modifies the "path" label to the specified file label
|
// SetFileLabel modifies the "path" label to the specified file label
|
||||||
func SetFileLabel(path string, fileLabel string) error {
|
func SetFileLabel(path string, fileLabel string) error {
|
||||||
if !selinux.GetEnabled() || fileLabel == "" {
|
if !selinux.GetEnabled() || fileLabel == "" {
|
||||||
|
4
vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
generated
vendored
4
vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
generated
vendored
@ -15,6 +15,10 @@ func GenLabels(options string) (string, string, error) {
|
|||||||
return "", "", nil
|
return "", "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func FormatMountLabel(src string, mountLabel string) string {
|
||||||
|
return src
|
||||||
|
}
|
||||||
|
|
||||||
func SetFileLabel(path string, fileLabel string) error {
|
func SetFileLabel(path string, fileLabel string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
8
vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
generated
vendored
8
vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
generated
vendored
@ -31,9 +31,6 @@ const (
|
|||||||
// Disabled constant to indicate SELinux is disabled
|
// Disabled constant to indicate SELinux is disabled
|
||||||
Disabled = -1
|
Disabled = -1
|
||||||
|
|
||||||
// DefaultCategoryRange is the upper bound on the category range
|
|
||||||
DefaultCategoryRange = uint32(1024)
|
|
||||||
|
|
||||||
contextFile = "/usr/share/containers/selinux/contexts"
|
contextFile = "/usr/share/containers/selinux/contexts"
|
||||||
selinuxDir = "/etc/selinux/"
|
selinuxDir = "/etc/selinux/"
|
||||||
selinuxConfig = selinuxDir + "config"
|
selinuxConfig = selinuxDir + "config"
|
||||||
@ -60,9 +57,6 @@ var (
|
|||||||
// InvalidLabel is returned when an invalid label is specified.
|
// InvalidLabel is returned when an invalid label is specified.
|
||||||
InvalidLabel = errors.New("Invalid Label")
|
InvalidLabel = errors.New("Invalid Label")
|
||||||
|
|
||||||
// CategoryRange allows the upper bound on the category range to be adjusted
|
|
||||||
CategoryRange = DefaultCategoryRange
|
|
||||||
|
|
||||||
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
|
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
|
||||||
roFileLabel string
|
roFileLabel string
|
||||||
state = selinuxState{
|
state = selinuxState{
|
||||||
@ -796,7 +790,7 @@ func ContainerLabels() (processLabel string, fileLabel string) {
|
|||||||
func addMcs(processLabel, fileLabel string) (string, string) {
|
func addMcs(processLabel, fileLabel string) (string, string) {
|
||||||
scon, _ := NewContext(processLabel)
|
scon, _ := NewContext(processLabel)
|
||||||
if scon["level"] != "" {
|
if scon["level"] != "" {
|
||||||
mcs := uniqMcs(CategoryRange)
|
mcs := uniqMcs(1024)
|
||||||
scon["level"] = mcs
|
scon["level"] = mcs
|
||||||
processLabel = scon.Get()
|
processLabel = scon.Get()
|
||||||
scon, _ = NewContext(fileLabel)
|
scon, _ = NewContext(fileLabel)
|
||||||
|
4
vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
generated
vendored
4
vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
generated
vendored
@ -13,8 +13,6 @@ const (
|
|||||||
Permissive = 0
|
Permissive = 0
|
||||||
// Disabled constant to indicate SELinux is disabled
|
// Disabled constant to indicate SELinux is disabled
|
||||||
Disabled = -1
|
Disabled = -1
|
||||||
// DefaultCategoryRange is the upper bound on the category range
|
|
||||||
DefaultCategoryRange = uint32(1024)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -22,8 +20,6 @@ var (
|
|||||||
ErrMCSAlreadyExists = errors.New("MCS label already exists")
|
ErrMCSAlreadyExists = errors.New("MCS label already exists")
|
||||||
// ErrEmptyPath is returned when an empty path has been specified.
|
// ErrEmptyPath is returned when an empty path has been specified.
|
||||||
ErrEmptyPath = errors.New("empty path")
|
ErrEmptyPath = errors.New("empty path")
|
||||||
// CategoryRange allows the upper bound on the category range to be adjusted
|
|
||||||
CategoryRange = DefaultCategoryRange
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Context is a representation of the SELinux label broken into 4 parts
|
// Context is a representation of the SELinux label broken into 4 parts
|
||||||
|
32
vendor/github.com/russross/blackfriday/doc.go
generated
vendored
32
vendor/github.com/russross/blackfriday/doc.go
generated
vendored
@ -1,32 +0,0 @@
|
|||||||
// Package blackfriday is a Markdown processor.
|
|
||||||
//
|
|
||||||
// It translates plain text with simple formatting rules into HTML or LaTeX.
|
|
||||||
//
|
|
||||||
// Sanitized Anchor Names
|
|
||||||
//
|
|
||||||
// Blackfriday includes an algorithm for creating sanitized anchor names
|
|
||||||
// corresponding to a given input text. This algorithm is used to create
|
|
||||||
// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The
|
|
||||||
// algorithm is specified below, so that other packages can create
|
|
||||||
// compatible anchor names and links to those anchors.
|
|
||||||
//
|
|
||||||
// The algorithm iterates over the input text, interpreted as UTF-8,
|
|
||||||
// one Unicode code point (rune) at a time. All runes that are letters (category L)
|
|
||||||
// or numbers (category N) are considered valid characters. They are mapped to
|
|
||||||
// lower case, and included in the output. All other runes are considered
|
|
||||||
// invalid characters. Invalid characters that preceed the first valid character,
|
|
||||||
// as well as invalid character that follow the last valid character
|
|
||||||
// are dropped completely. All other sequences of invalid characters
|
|
||||||
// between two valid characters are replaced with a single dash character '-'.
|
|
||||||
//
|
|
||||||
// SanitizedAnchorName exposes this functionality, and can be used to
|
|
||||||
// create compatible links to the anchor names generated by blackfriday.
|
|
||||||
// This algorithm is also implemented in a small standalone package at
|
|
||||||
// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients
|
|
||||||
// that want a small package and don't need full functionality of blackfriday.
|
|
||||||
package blackfriday
|
|
||||||
|
|
||||||
// NOTE: Keep Sanitized Anchor Name algorithm in sync with package
|
|
||||||
// github.com/shurcooL/sanitized_anchor_name.
|
|
||||||
// Otherwise, users of sanitized_anchor_name will get anchor names
|
|
||||||
// that are incompatible with those generated by blackfriday.
|
|
1
vendor/github.com/russross/blackfriday/go.mod
generated
vendored
1
vendor/github.com/russross/blackfriday/go.mod
generated
vendored
@ -1 +0,0 @@
|
|||||||
module github.com/russross/blackfriday
|
|
938
vendor/github.com/russross/blackfriday/html.go
generated
vendored
938
vendor/github.com/russross/blackfriday/html.go
generated
vendored
@ -1,938 +0,0 @@
|
|||||||
//
|
|
||||||
// Blackfriday Markdown Processor
|
|
||||||
// Available at http://github.com/russross/blackfriday
|
|
||||||
//
|
|
||||||
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
|
||||||
// Distributed under the Simplified BSD License.
|
|
||||||
// See README.md for details.
|
|
||||||
//
|
|
||||||
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// HTML rendering backend
|
|
||||||
//
|
|
||||||
//
|
|
||||||
|
|
||||||
package blackfriday
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Html renderer configuration options.
|
|
||||||
const (
|
|
||||||
HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks
|
|
||||||
HTML_SKIP_STYLE // skip embedded <style> elements
|
|
||||||
HTML_SKIP_IMAGES // skip embedded images
|
|
||||||
HTML_SKIP_LINKS // skip all links
|
|
||||||
HTML_SAFELINK // only link to trusted protocols
|
|
||||||
HTML_NOFOLLOW_LINKS // only link with rel="nofollow"
|
|
||||||
HTML_NOREFERRER_LINKS // only link with rel="noreferrer"
|
|
||||||
HTML_HREF_TARGET_BLANK // add a blank target
|
|
||||||
HTML_TOC // generate a table of contents
|
|
||||||
HTML_OMIT_CONTENTS // skip the main contents (for a standalone table of contents)
|
|
||||||
HTML_COMPLETE_PAGE // generate a complete HTML page
|
|
||||||
HTML_USE_XHTML // generate XHTML output instead of HTML
|
|
||||||
HTML_USE_SMARTYPANTS // enable smart punctuation substitutions
|
|
||||||
HTML_SMARTYPANTS_FRACTIONS // enable smart fractions (with HTML_USE_SMARTYPANTS)
|
|
||||||
HTML_SMARTYPANTS_DASHES // enable smart dashes (with HTML_USE_SMARTYPANTS)
|
|
||||||
HTML_SMARTYPANTS_LATEX_DASHES // enable LaTeX-style dashes (with HTML_USE_SMARTYPANTS and HTML_SMARTYPANTS_DASHES)
|
|
||||||
HTML_SMARTYPANTS_ANGLED_QUOTES // enable angled double quotes (with HTML_USE_SMARTYPANTS) for double quotes rendering
|
|
||||||
HTML_SMARTYPANTS_QUOTES_NBSP // enable "French guillemets" (with HTML_USE_SMARTYPANTS)
|
|
||||||
HTML_FOOTNOTE_RETURN_LINKS // generate a link at the end of a footnote to return to the source
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
alignments = []string{
|
|
||||||
"left",
|
|
||||||
"right",
|
|
||||||
"center",
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: improve this regexp to catch all possible entities:
|
|
||||||
htmlEntity = regexp.MustCompile(`&[a-z]{2,5};`)
|
|
||||||
)
|
|
||||||
|
|
||||||
type HtmlRendererParameters struct {
|
|
||||||
// Prepend this text to each relative URL.
|
|
||||||
AbsolutePrefix string
|
|
||||||
// Add this text to each footnote anchor, to ensure uniqueness.
|
|
||||||
FootnoteAnchorPrefix string
|
|
||||||
// Show this text inside the <a> tag for a footnote return link, if the
|
|
||||||
// HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
|
|
||||||
// <sup>[return]</sup> is used.
|
|
||||||
FootnoteReturnLinkContents string
|
|
||||||
// If set, add this text to the front of each Header ID, to ensure
|
|
||||||
// uniqueness.
|
|
||||||
HeaderIDPrefix string
|
|
||||||
// If set, add this text to the back of each Header ID, to ensure uniqueness.
|
|
||||||
HeaderIDSuffix string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Html is a type that implements the Renderer interface for HTML output.
|
|
||||||
//
|
|
||||||
// Do not create this directly, instead use the HtmlRenderer function.
|
|
||||||
type Html struct {
|
|
||||||
flags int // HTML_* options
|
|
||||||
closeTag string // how to end singleton tags: either " />" or ">"
|
|
||||||
title string // document title
|
|
||||||
css string // optional css file url (used with HTML_COMPLETE_PAGE)
|
|
||||||
|
|
||||||
parameters HtmlRendererParameters
|
|
||||||
|
|
||||||
// table of contents data
|
|
||||||
tocMarker int
|
|
||||||
headerCount int
|
|
||||||
currentLevel int
|
|
||||||
toc *bytes.Buffer
|
|
||||||
|
|
||||||
// Track header IDs to prevent ID collision in a single generation.
|
|
||||||
headerIDs map[string]int
|
|
||||||
|
|
||||||
smartypants *smartypantsRenderer
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
xhtmlClose = " />"
|
|
||||||
htmlClose = ">"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HtmlRenderer creates and configures an Html object, which
|
|
||||||
// satisfies the Renderer interface.
|
|
||||||
//
|
|
||||||
// flags is a set of HTML_* options ORed together.
|
|
||||||
// title is the title of the document, and css is a URL for the document's
|
|
||||||
// stylesheet.
|
|
||||||
// title and css are only used when HTML_COMPLETE_PAGE is selected.
|
|
||||||
func HtmlRenderer(flags int, title string, css string) Renderer {
|
|
||||||
return HtmlRendererWithParameters(flags, title, css, HtmlRendererParameters{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func HtmlRendererWithParameters(flags int, title string,
|
|
||||||
css string, renderParameters HtmlRendererParameters) Renderer {
|
|
||||||
// configure the rendering engine
|
|
||||||
closeTag := htmlClose
|
|
||||||
if flags&HTML_USE_XHTML != 0 {
|
|
||||||
closeTag = xhtmlClose
|
|
||||||
}
|
|
||||||
|
|
||||||
if renderParameters.FootnoteReturnLinkContents == "" {
|
|
||||||
renderParameters.FootnoteReturnLinkContents = `<sup>[return]</sup>`
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Html{
|
|
||||||
flags: flags,
|
|
||||||
closeTag: closeTag,
|
|
||||||
title: title,
|
|
||||||
css: css,
|
|
||||||
parameters: renderParameters,
|
|
||||||
|
|
||||||
headerCount: 0,
|
|
||||||
currentLevel: 0,
|
|
||||||
toc: new(bytes.Buffer),
|
|
||||||
|
|
||||||
headerIDs: make(map[string]int),
|
|
||||||
|
|
||||||
smartypants: smartypants(flags),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Using if statements is a bit faster than a switch statement. As the compiler
|
|
||||||
// improves, this should be unnecessary this is only worthwhile because
|
|
||||||
// attrEscape is the single largest CPU user in normal use.
|
|
||||||
// Also tried using map, but that gave a ~3x slowdown.
|
|
||||||
func escapeSingleChar(char byte) (string, bool) {
|
|
||||||
if char == '"' {
|
|
||||||
return """, true
|
|
||||||
}
|
|
||||||
if char == '&' {
|
|
||||||
return "&", true
|
|
||||||
}
|
|
||||||
if char == '<' {
|
|
||||||
return "<", true
|
|
||||||
}
|
|
||||||
if char == '>' {
|
|
||||||
return ">", true
|
|
||||||
}
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
func attrEscape(out *bytes.Buffer, src []byte) {
|
|
||||||
org := 0
|
|
||||||
for i, ch := range src {
|
|
||||||
if entity, ok := escapeSingleChar(ch); ok {
|
|
||||||
if i > org {
|
|
||||||
// copy all the normal characters since the last escape
|
|
||||||
out.Write(src[org:i])
|
|
||||||
}
|
|
||||||
org = i + 1
|
|
||||||
out.WriteString(entity)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if org < len(src) {
|
|
||||||
out.Write(src[org:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func entityEscapeWithSkip(out *bytes.Buffer, src []byte, skipRanges [][]int) {
|
|
||||||
end := 0
|
|
||||||
for _, rang := range skipRanges {
|
|
||||||
attrEscape(out, src[end:rang[0]])
|
|
||||||
out.Write(src[rang[0]:rang[1]])
|
|
||||||
end = rang[1]
|
|
||||||
}
|
|
||||||
attrEscape(out, src[end:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) GetFlags() int {
|
|
||||||
return options.flags
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) TitleBlock(out *bytes.Buffer, text []byte) {
|
|
||||||
text = bytes.TrimPrefix(text, []byte("% "))
|
|
||||||
text = bytes.Replace(text, []byte("\n% "), []byte("\n"), -1)
|
|
||||||
out.WriteString("<h1 class=\"title\">")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("\n</h1>")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) Header(out *bytes.Buffer, text func() bool, level int, id string) {
|
|
||||||
marker := out.Len()
|
|
||||||
doubleSpace(out)
|
|
||||||
|
|
||||||
if id == "" && options.flags&HTML_TOC != 0 {
|
|
||||||
id = fmt.Sprintf("toc_%d", options.headerCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
if id != "" {
|
|
||||||
id = options.ensureUniqueHeaderID(id)
|
|
||||||
|
|
||||||
if options.parameters.HeaderIDPrefix != "" {
|
|
||||||
id = options.parameters.HeaderIDPrefix + id
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.parameters.HeaderIDSuffix != "" {
|
|
||||||
id = id + options.parameters.HeaderIDSuffix
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteString(fmt.Sprintf("<h%d id=\"%s\">", level, id))
|
|
||||||
} else {
|
|
||||||
out.WriteString(fmt.Sprintf("<h%d>", level))
|
|
||||||
}
|
|
||||||
|
|
||||||
tocMarker := out.Len()
|
|
||||||
if !text() {
|
|
||||||
out.Truncate(marker)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// are we building a table of contents?
|
|
||||||
if options.flags&HTML_TOC != 0 {
|
|
||||||
options.TocHeaderWithAnchor(out.Bytes()[tocMarker:], level, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteString(fmt.Sprintf("</h%d>\n", level))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) BlockHtml(out *bytes.Buffer, text []byte) {
|
|
||||||
if options.flags&HTML_SKIP_HTML != 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
doubleSpace(out)
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteByte('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) HRule(out *bytes.Buffer) {
|
|
||||||
doubleSpace(out)
|
|
||||||
out.WriteString("<hr")
|
|
||||||
out.WriteString(options.closeTag)
|
|
||||||
out.WriteByte('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) BlockCode(out *bytes.Buffer, text []byte, info string) {
|
|
||||||
doubleSpace(out)
|
|
||||||
|
|
||||||
endOfLang := strings.IndexAny(info, "\t ")
|
|
||||||
if endOfLang < 0 {
|
|
||||||
endOfLang = len(info)
|
|
||||||
}
|
|
||||||
lang := info[:endOfLang]
|
|
||||||
if len(lang) == 0 || lang == "." {
|
|
||||||
out.WriteString("<pre><code>")
|
|
||||||
} else {
|
|
||||||
out.WriteString("<pre><code class=\"language-")
|
|
||||||
attrEscape(out, []byte(lang))
|
|
||||||
out.WriteString("\">")
|
|
||||||
}
|
|
||||||
attrEscape(out, text)
|
|
||||||
out.WriteString("</code></pre>\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) BlockQuote(out *bytes.Buffer, text []byte) {
|
|
||||||
doubleSpace(out)
|
|
||||||
out.WriteString("<blockquote>\n")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("</blockquote>\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
|
|
||||||
doubleSpace(out)
|
|
||||||
out.WriteString("<table>\n<thead>\n")
|
|
||||||
out.Write(header)
|
|
||||||
out.WriteString("</thead>\n\n<tbody>\n")
|
|
||||||
out.Write(body)
|
|
||||||
out.WriteString("</tbody>\n</table>\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) TableRow(out *bytes.Buffer, text []byte) {
|
|
||||||
doubleSpace(out)
|
|
||||||
out.WriteString("<tr>\n")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("\n</tr>\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
|
|
||||||
doubleSpace(out)
|
|
||||||
switch align {
|
|
||||||
case TABLE_ALIGNMENT_LEFT:
|
|
||||||
out.WriteString("<th align=\"left\">")
|
|
||||||
case TABLE_ALIGNMENT_RIGHT:
|
|
||||||
out.WriteString("<th align=\"right\">")
|
|
||||||
case TABLE_ALIGNMENT_CENTER:
|
|
||||||
out.WriteString("<th align=\"center\">")
|
|
||||||
default:
|
|
||||||
out.WriteString("<th>")
|
|
||||||
}
|
|
||||||
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("</th>")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) TableCell(out *bytes.Buffer, text []byte, align int) {
|
|
||||||
doubleSpace(out)
|
|
||||||
switch align {
|
|
||||||
case TABLE_ALIGNMENT_LEFT:
|
|
||||||
out.WriteString("<td align=\"left\">")
|
|
||||||
case TABLE_ALIGNMENT_RIGHT:
|
|
||||||
out.WriteString("<td align=\"right\">")
|
|
||||||
case TABLE_ALIGNMENT_CENTER:
|
|
||||||
out.WriteString("<td align=\"center\">")
|
|
||||||
default:
|
|
||||||
out.WriteString("<td>")
|
|
||||||
}
|
|
||||||
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("</td>")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) Footnotes(out *bytes.Buffer, text func() bool) {
|
|
||||||
out.WriteString("<div class=\"footnotes\">\n")
|
|
||||||
options.HRule(out)
|
|
||||||
options.List(out, text, LIST_TYPE_ORDERED)
|
|
||||||
out.WriteString("</div>\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
|
|
||||||
if flags&LIST_ITEM_CONTAINS_BLOCK != 0 || flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
|
|
||||||
doubleSpace(out)
|
|
||||||
}
|
|
||||||
slug := slugify(name)
|
|
||||||
out.WriteString(`<li id="`)
|
|
||||||
out.WriteString(`fn:`)
|
|
||||||
out.WriteString(options.parameters.FootnoteAnchorPrefix)
|
|
||||||
out.Write(slug)
|
|
||||||
out.WriteString(`">`)
|
|
||||||
out.Write(text)
|
|
||||||
if options.flags&HTML_FOOTNOTE_RETURN_LINKS != 0 {
|
|
||||||
out.WriteString(` <a class="footnote-return" href="#`)
|
|
||||||
out.WriteString(`fnref:`)
|
|
||||||
out.WriteString(options.parameters.FootnoteAnchorPrefix)
|
|
||||||
out.Write(slug)
|
|
||||||
out.WriteString(`">`)
|
|
||||||
out.WriteString(options.parameters.FootnoteReturnLinkContents)
|
|
||||||
out.WriteString(`</a>`)
|
|
||||||
}
|
|
||||||
out.WriteString("</li>\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) List(out *bytes.Buffer, text func() bool, flags int) {
|
|
||||||
marker := out.Len()
|
|
||||||
doubleSpace(out)
|
|
||||||
|
|
||||||
if flags&LIST_TYPE_DEFINITION != 0 {
|
|
||||||
out.WriteString("<dl>")
|
|
||||||
} else if flags&LIST_TYPE_ORDERED != 0 {
|
|
||||||
out.WriteString("<ol>")
|
|
||||||
} else {
|
|
||||||
out.WriteString("<ul>")
|
|
||||||
}
|
|
||||||
if !text() {
|
|
||||||
out.Truncate(marker)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if flags&LIST_TYPE_DEFINITION != 0 {
|
|
||||||
out.WriteString("</dl>\n")
|
|
||||||
} else if flags&LIST_TYPE_ORDERED != 0 {
|
|
||||||
out.WriteString("</ol>\n")
|
|
||||||
} else {
|
|
||||||
out.WriteString("</ul>\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) ListItem(out *bytes.Buffer, text []byte, flags int) {
|
|
||||||
if (flags&LIST_ITEM_CONTAINS_BLOCK != 0 && flags&LIST_TYPE_DEFINITION == 0) ||
|
|
||||||
flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
|
|
||||||
doubleSpace(out)
|
|
||||||
}
|
|
||||||
if flags&LIST_TYPE_TERM != 0 {
|
|
||||||
out.WriteString("<dt>")
|
|
||||||
} else if flags&LIST_TYPE_DEFINITION != 0 {
|
|
||||||
out.WriteString("<dd>")
|
|
||||||
} else {
|
|
||||||
out.WriteString("<li>")
|
|
||||||
}
|
|
||||||
out.Write(text)
|
|
||||||
if flags&LIST_TYPE_TERM != 0 {
|
|
||||||
out.WriteString("</dt>\n")
|
|
||||||
} else if flags&LIST_TYPE_DEFINITION != 0 {
|
|
||||||
out.WriteString("</dd>\n")
|
|
||||||
} else {
|
|
||||||
out.WriteString("</li>\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) Paragraph(out *bytes.Buffer, text func() bool) {
|
|
||||||
marker := out.Len()
|
|
||||||
doubleSpace(out)
|
|
||||||
|
|
||||||
out.WriteString("<p>")
|
|
||||||
if !text() {
|
|
||||||
out.Truncate(marker)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
out.WriteString("</p>\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) AutoLink(out *bytes.Buffer, link []byte, kind int) {
|
|
||||||
skipRanges := htmlEntity.FindAllIndex(link, -1)
|
|
||||||
if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) && kind != LINK_TYPE_EMAIL {
|
|
||||||
// mark it but don't link it if it is not a safe link: no smartypants
|
|
||||||
out.WriteString("<tt>")
|
|
||||||
entityEscapeWithSkip(out, link, skipRanges)
|
|
||||||
out.WriteString("</tt>")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteString("<a href=\"")
|
|
||||||
if kind == LINK_TYPE_EMAIL {
|
|
||||||
out.WriteString("mailto:")
|
|
||||||
} else {
|
|
||||||
options.maybeWriteAbsolutePrefix(out, link)
|
|
||||||
}
|
|
||||||
|
|
||||||
entityEscapeWithSkip(out, link, skipRanges)
|
|
||||||
|
|
||||||
var relAttrs []string
|
|
||||||
if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) {
|
|
||||||
relAttrs = append(relAttrs, "nofollow")
|
|
||||||
}
|
|
||||||
if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) {
|
|
||||||
relAttrs = append(relAttrs, "noreferrer")
|
|
||||||
}
|
|
||||||
if len(relAttrs) > 0 {
|
|
||||||
out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
|
|
||||||
}
|
|
||||||
|
|
||||||
// blank target only add to external link
|
|
||||||
if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
|
|
||||||
out.WriteString("\" target=\"_blank")
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteString("\">")
|
|
||||||
|
|
||||||
// Pretty print: if we get an email address as
|
|
||||||
// an actual URI, e.g. `mailto:foo@bar.com`, we don't
|
|
||||||
// want to print the `mailto:` prefix
|
|
||||||
switch {
|
|
||||||
case bytes.HasPrefix(link, []byte("mailto://")):
|
|
||||||
attrEscape(out, link[len("mailto://"):])
|
|
||||||
case bytes.HasPrefix(link, []byte("mailto:")):
|
|
||||||
attrEscape(out, link[len("mailto:"):])
|
|
||||||
default:
|
|
||||||
entityEscapeWithSkip(out, link, skipRanges)
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteString("</a>")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) CodeSpan(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("<code>")
|
|
||||||
attrEscape(out, text)
|
|
||||||
out.WriteString("</code>")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) DoubleEmphasis(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("<strong>")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("</strong>")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) Emphasis(out *bytes.Buffer, text []byte) {
|
|
||||||
if len(text) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
out.WriteString("<em>")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("</em>")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) maybeWriteAbsolutePrefix(out *bytes.Buffer, link []byte) {
|
|
||||||
if options.parameters.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
|
|
||||||
out.WriteString(options.parameters.AbsolutePrefix)
|
|
||||||
if link[0] != '/' {
|
|
||||||
out.WriteByte('/')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
|
|
||||||
if options.flags&HTML_SKIP_IMAGES != 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteString("<img src=\"")
|
|
||||||
options.maybeWriteAbsolutePrefix(out, link)
|
|
||||||
attrEscape(out, link)
|
|
||||||
out.WriteString("\" alt=\"")
|
|
||||||
if len(alt) > 0 {
|
|
||||||
attrEscape(out, alt)
|
|
||||||
}
|
|
||||||
if len(title) > 0 {
|
|
||||||
out.WriteString("\" title=\"")
|
|
||||||
attrEscape(out, title)
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteByte('"')
|
|
||||||
out.WriteString(options.closeTag)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) LineBreak(out *bytes.Buffer) {
|
|
||||||
out.WriteString("<br")
|
|
||||||
out.WriteString(options.closeTag)
|
|
||||||
out.WriteByte('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
|
|
||||||
if options.flags&HTML_SKIP_LINKS != 0 {
|
|
||||||
// write the link text out but don't link it, just mark it with typewriter font
|
|
||||||
out.WriteString("<tt>")
|
|
||||||
attrEscape(out, content)
|
|
||||||
out.WriteString("</tt>")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) {
|
|
||||||
// write the link text out but don't link it, just mark it with typewriter font
|
|
||||||
out.WriteString("<tt>")
|
|
||||||
attrEscape(out, content)
|
|
||||||
out.WriteString("</tt>")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteString("<a href=\"")
|
|
||||||
options.maybeWriteAbsolutePrefix(out, link)
|
|
||||||
attrEscape(out, link)
|
|
||||||
if len(title) > 0 {
|
|
||||||
out.WriteString("\" title=\"")
|
|
||||||
attrEscape(out, title)
|
|
||||||
}
|
|
||||||
var relAttrs []string
|
|
||||||
if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) {
|
|
||||||
relAttrs = append(relAttrs, "nofollow")
|
|
||||||
}
|
|
||||||
if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) {
|
|
||||||
relAttrs = append(relAttrs, "noreferrer")
|
|
||||||
}
|
|
||||||
if len(relAttrs) > 0 {
|
|
||||||
out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
|
|
||||||
}
|
|
||||||
|
|
||||||
// blank target only add to external link
|
|
||||||
if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
|
|
||||||
out.WriteString("\" target=\"_blank")
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteString("\">")
|
|
||||||
out.Write(content)
|
|
||||||
out.WriteString("</a>")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) RawHtmlTag(out *bytes.Buffer, text []byte) {
|
|
||||||
if options.flags&HTML_SKIP_HTML != 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if options.flags&HTML_SKIP_STYLE != 0 && isHtmlTag(text, "style") {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if options.flags&HTML_SKIP_LINKS != 0 && isHtmlTag(text, "a") {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if options.flags&HTML_SKIP_IMAGES != 0 && isHtmlTag(text, "img") {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
out.Write(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) TripleEmphasis(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("<strong><em>")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("</em></strong>")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) StrikeThrough(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("<del>")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("</del>")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
|
|
||||||
slug := slugify(ref)
|
|
||||||
out.WriteString(`<sup class="footnote-ref" id="`)
|
|
||||||
out.WriteString(`fnref:`)
|
|
||||||
out.WriteString(options.parameters.FootnoteAnchorPrefix)
|
|
||||||
out.Write(slug)
|
|
||||||
out.WriteString(`"><a href="#`)
|
|
||||||
out.WriteString(`fn:`)
|
|
||||||
out.WriteString(options.parameters.FootnoteAnchorPrefix)
|
|
||||||
out.Write(slug)
|
|
||||||
out.WriteString(`">`)
|
|
||||||
out.WriteString(strconv.Itoa(id))
|
|
||||||
out.WriteString(`</a></sup>`)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) Entity(out *bytes.Buffer, entity []byte) {
|
|
||||||
out.Write(entity)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) NormalText(out *bytes.Buffer, text []byte) {
|
|
||||||
if options.flags&HTML_USE_SMARTYPANTS != 0 {
|
|
||||||
options.Smartypants(out, text)
|
|
||||||
} else {
|
|
||||||
attrEscape(out, text)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) Smartypants(out *bytes.Buffer, text []byte) {
|
|
||||||
smrt := smartypantsData{false, false}
|
|
||||||
|
|
||||||
// first do normal entity escaping
|
|
||||||
var escaped bytes.Buffer
|
|
||||||
attrEscape(&escaped, text)
|
|
||||||
text = escaped.Bytes()
|
|
||||||
|
|
||||||
mark := 0
|
|
||||||
for i := 0; i < len(text); i++ {
|
|
||||||
if action := options.smartypants[text[i]]; action != nil {
|
|
||||||
if i > mark {
|
|
||||||
out.Write(text[mark:i])
|
|
||||||
}
|
|
||||||
|
|
||||||
previousChar := byte(0)
|
|
||||||
if i > 0 {
|
|
||||||
previousChar = text[i-1]
|
|
||||||
}
|
|
||||||
i += action(out, &smrt, previousChar, text[i:])
|
|
||||||
mark = i + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if mark < len(text) {
|
|
||||||
out.Write(text[mark:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) DocumentHeader(out *bytes.Buffer) {
|
|
||||||
if options.flags&HTML_COMPLETE_PAGE == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ending := ""
|
|
||||||
if options.flags&HTML_USE_XHTML != 0 {
|
|
||||||
out.WriteString("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
|
|
||||||
out.WriteString("\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
|
|
||||||
out.WriteString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
|
|
||||||
ending = " /"
|
|
||||||
} else {
|
|
||||||
out.WriteString("<!DOCTYPE html>\n")
|
|
||||||
out.WriteString("<html>\n")
|
|
||||||
}
|
|
||||||
out.WriteString("<head>\n")
|
|
||||||
out.WriteString(" <title>")
|
|
||||||
options.NormalText(out, []byte(options.title))
|
|
||||||
out.WriteString("</title>\n")
|
|
||||||
out.WriteString(" <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v")
|
|
||||||
out.WriteString(VERSION)
|
|
||||||
out.WriteString("\"")
|
|
||||||
out.WriteString(ending)
|
|
||||||
out.WriteString(">\n")
|
|
||||||
out.WriteString(" <meta charset=\"utf-8\"")
|
|
||||||
out.WriteString(ending)
|
|
||||||
out.WriteString(">\n")
|
|
||||||
if options.css != "" {
|
|
||||||
out.WriteString(" <link rel=\"stylesheet\" type=\"text/css\" href=\"")
|
|
||||||
attrEscape(out, []byte(options.css))
|
|
||||||
out.WriteString("\"")
|
|
||||||
out.WriteString(ending)
|
|
||||||
out.WriteString(">\n")
|
|
||||||
}
|
|
||||||
out.WriteString("</head>\n")
|
|
||||||
out.WriteString("<body>\n")
|
|
||||||
|
|
||||||
options.tocMarker = out.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) DocumentFooter(out *bytes.Buffer) {
|
|
||||||
// finalize and insert the table of contents
|
|
||||||
if options.flags&HTML_TOC != 0 {
|
|
||||||
options.TocFinalize()
|
|
||||||
|
|
||||||
// now we have to insert the table of contents into the document
|
|
||||||
var temp bytes.Buffer
|
|
||||||
|
|
||||||
// start by making a copy of everything after the document header
|
|
||||||
temp.Write(out.Bytes()[options.tocMarker:])
|
|
||||||
|
|
||||||
// now clear the copied material from the main output buffer
|
|
||||||
out.Truncate(options.tocMarker)
|
|
||||||
|
|
||||||
// corner case spacing issue
|
|
||||||
if options.flags&HTML_COMPLETE_PAGE != 0 {
|
|
||||||
out.WriteByte('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert the table of contents
|
|
||||||
out.WriteString("<nav>\n")
|
|
||||||
out.Write(options.toc.Bytes())
|
|
||||||
out.WriteString("</nav>\n")
|
|
||||||
|
|
||||||
// corner case spacing issue
|
|
||||||
if options.flags&HTML_COMPLETE_PAGE == 0 && options.flags&HTML_OMIT_CONTENTS == 0 {
|
|
||||||
out.WriteByte('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
// write out everything that came after it
|
|
||||||
if options.flags&HTML_OMIT_CONTENTS == 0 {
|
|
||||||
out.Write(temp.Bytes())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.flags&HTML_COMPLETE_PAGE != 0 {
|
|
||||||
out.WriteString("\n</body>\n")
|
|
||||||
out.WriteString("</html>\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) TocHeaderWithAnchor(text []byte, level int, anchor string) {
|
|
||||||
for level > options.currentLevel {
|
|
||||||
switch {
|
|
||||||
case bytes.HasSuffix(options.toc.Bytes(), []byte("</li>\n")):
|
|
||||||
// this sublist can nest underneath a header
|
|
||||||
size := options.toc.Len()
|
|
||||||
options.toc.Truncate(size - len("</li>\n"))
|
|
||||||
|
|
||||||
case options.currentLevel > 0:
|
|
||||||
options.toc.WriteString("<li>")
|
|
||||||
}
|
|
||||||
if options.toc.Len() > 0 {
|
|
||||||
options.toc.WriteByte('\n')
|
|
||||||
}
|
|
||||||
options.toc.WriteString("<ul>\n")
|
|
||||||
options.currentLevel++
|
|
||||||
}
|
|
||||||
|
|
||||||
for level < options.currentLevel {
|
|
||||||
options.toc.WriteString("</ul>")
|
|
||||||
if options.currentLevel > 1 {
|
|
||||||
options.toc.WriteString("</li>\n")
|
|
||||||
}
|
|
||||||
options.currentLevel--
|
|
||||||
}
|
|
||||||
|
|
||||||
options.toc.WriteString("<li><a href=\"#")
|
|
||||||
if anchor != "" {
|
|
||||||
options.toc.WriteString(anchor)
|
|
||||||
} else {
|
|
||||||
options.toc.WriteString("toc_")
|
|
||||||
options.toc.WriteString(strconv.Itoa(options.headerCount))
|
|
||||||
}
|
|
||||||
options.toc.WriteString("\">")
|
|
||||||
options.headerCount++
|
|
||||||
|
|
||||||
options.toc.Write(text)
|
|
||||||
|
|
||||||
options.toc.WriteString("</a></li>\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) TocHeader(text []byte, level int) {
|
|
||||||
options.TocHeaderWithAnchor(text, level, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) TocFinalize() {
|
|
||||||
for options.currentLevel > 1 {
|
|
||||||
options.toc.WriteString("</ul></li>\n")
|
|
||||||
options.currentLevel--
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.currentLevel > 0 {
|
|
||||||
options.toc.WriteString("</ul>\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isHtmlTag(tag []byte, tagname string) bool {
|
|
||||||
found, _ := findHtmlTagPos(tag, tagname)
|
|
||||||
return found
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for a character, but ignore it when it's in any kind of quotes, it
|
|
||||||
// might be JavaScript
|
|
||||||
func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
|
|
||||||
inSingleQuote := false
|
|
||||||
inDoubleQuote := false
|
|
||||||
inGraveQuote := false
|
|
||||||
i := start
|
|
||||||
for i < len(html) {
|
|
||||||
switch {
|
|
||||||
case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
|
|
||||||
return i
|
|
||||||
case html[i] == '\'':
|
|
||||||
inSingleQuote = !inSingleQuote
|
|
||||||
case html[i] == '"':
|
|
||||||
inDoubleQuote = !inDoubleQuote
|
|
||||||
case html[i] == '`':
|
|
||||||
inGraveQuote = !inGraveQuote
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return start
|
|
||||||
}
|
|
||||||
|
|
||||||
func findHtmlTagPos(tag []byte, tagname string) (bool, int) {
|
|
||||||
i := 0
|
|
||||||
if i < len(tag) && tag[0] != '<' {
|
|
||||||
return false, -1
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
i = skipSpace(tag, i)
|
|
||||||
|
|
||||||
if i < len(tag) && tag[i] == '/' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
i = skipSpace(tag, i)
|
|
||||||
j := 0
|
|
||||||
for ; i < len(tag); i, j = i+1, j+1 {
|
|
||||||
if j >= len(tagname) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.ToLower(string(tag[i]))[0] != tagname[j] {
|
|
||||||
return false, -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if i == len(tag) {
|
|
||||||
return false, -1
|
|
||||||
}
|
|
||||||
|
|
||||||
rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
|
|
||||||
if rightAngle > i {
|
|
||||||
return true, rightAngle
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func skipUntilChar(text []byte, start int, char byte) int {
|
|
||||||
i := start
|
|
||||||
for i < len(text) && text[i] != char {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
func skipSpace(tag []byte, i int) int {
|
|
||||||
for i < len(tag) && isspace(tag[i]) {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
func skipChar(data []byte, start int, char byte) int {
|
|
||||||
i := start
|
|
||||||
for i < len(data) && data[i] == char {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
func doubleSpace(out *bytes.Buffer) {
|
|
||||||
if out.Len() > 0 {
|
|
||||||
out.WriteByte('\n')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isRelativeLink(link []byte) (yes bool) {
|
|
||||||
// a tag begin with '#'
|
|
||||||
if link[0] == '#' {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// link begin with '/' but not '//', the second maybe a protocol relative link
|
|
||||||
if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// only the root '/'
|
|
||||||
if len(link) == 1 && link[0] == '/' {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// current directory : begin with "./"
|
|
||||||
if bytes.HasPrefix(link, []byte("./")) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// parent directory : begin with "../"
|
|
||||||
if bytes.HasPrefix(link, []byte("../")) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Html) ensureUniqueHeaderID(id string) string {
|
|
||||||
for count, found := options.headerIDs[id]; found; count, found = options.headerIDs[id] {
|
|
||||||
tmp := fmt.Sprintf("%s-%d", id, count+1)
|
|
||||||
|
|
||||||
if _, tmpFound := options.headerIDs[tmp]; !tmpFound {
|
|
||||||
options.headerIDs[id] = count + 1
|
|
||||||
id = tmp
|
|
||||||
} else {
|
|
||||||
id = id + "-1"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, found := options.headerIDs[id]; !found {
|
|
||||||
options.headerIDs[id] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return id
|
|
||||||
}
|
|
334
vendor/github.com/russross/blackfriday/latex.go
generated
vendored
334
vendor/github.com/russross/blackfriday/latex.go
generated
vendored
@ -1,334 +0,0 @@
|
|||||||
//
|
|
||||||
// Blackfriday Markdown Processor
|
|
||||||
// Available at http://github.com/russross/blackfriday
|
|
||||||
//
|
|
||||||
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
|
||||||
// Distributed under the Simplified BSD License.
|
|
||||||
// See README.md for details.
|
|
||||||
//
|
|
||||||
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// LaTeX rendering backend
|
|
||||||
//
|
|
||||||
//
|
|
||||||
|
|
||||||
package blackfriday
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Latex is a type that implements the Renderer interface for LaTeX output.
|
|
||||||
//
|
|
||||||
// Do not create this directly, instead use the LatexRenderer function.
|
|
||||||
type Latex struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
// LatexRenderer creates and configures a Latex object, which
|
|
||||||
// satisfies the Renderer interface.
|
|
||||||
//
|
|
||||||
// flags is a set of LATEX_* options ORed together (currently no such options
|
|
||||||
// are defined).
|
|
||||||
func LatexRenderer(flags int) Renderer {
|
|
||||||
return &Latex{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) GetFlags() int {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// render code chunks using verbatim, or listings if we have a language
|
|
||||||
func (options *Latex) BlockCode(out *bytes.Buffer, text []byte, info string) {
|
|
||||||
if info == "" {
|
|
||||||
out.WriteString("\n\\begin{verbatim}\n")
|
|
||||||
} else {
|
|
||||||
lang := strings.Fields(info)[0]
|
|
||||||
out.WriteString("\n\\begin{lstlisting}[language=")
|
|
||||||
out.WriteString(lang)
|
|
||||||
out.WriteString("]\n")
|
|
||||||
}
|
|
||||||
out.Write(text)
|
|
||||||
if info == "" {
|
|
||||||
out.WriteString("\n\\end{verbatim}\n")
|
|
||||||
} else {
|
|
||||||
out.WriteString("\n\\end{lstlisting}\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) TitleBlock(out *bytes.Buffer, text []byte) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) BlockQuote(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("\n\\begin{quotation}\n")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("\n\\end{quotation}\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) BlockHtml(out *bytes.Buffer, text []byte) {
|
|
||||||
// a pretty lame thing to do...
|
|
||||||
out.WriteString("\n\\begin{verbatim}\n")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("\n\\end{verbatim}\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) Header(out *bytes.Buffer, text func() bool, level int, id string) {
|
|
||||||
marker := out.Len()
|
|
||||||
|
|
||||||
switch level {
|
|
||||||
case 1:
|
|
||||||
out.WriteString("\n\\section{")
|
|
||||||
case 2:
|
|
||||||
out.WriteString("\n\\subsection{")
|
|
||||||
case 3:
|
|
||||||
out.WriteString("\n\\subsubsection{")
|
|
||||||
case 4:
|
|
||||||
out.WriteString("\n\\paragraph{")
|
|
||||||
case 5:
|
|
||||||
out.WriteString("\n\\subparagraph{")
|
|
||||||
case 6:
|
|
||||||
out.WriteString("\n\\textbf{")
|
|
||||||
}
|
|
||||||
if !text() {
|
|
||||||
out.Truncate(marker)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
out.WriteString("}\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) HRule(out *bytes.Buffer) {
|
|
||||||
out.WriteString("\n\\HRule\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) List(out *bytes.Buffer, text func() bool, flags int) {
|
|
||||||
marker := out.Len()
|
|
||||||
if flags&LIST_TYPE_ORDERED != 0 {
|
|
||||||
out.WriteString("\n\\begin{enumerate}\n")
|
|
||||||
} else {
|
|
||||||
out.WriteString("\n\\begin{itemize}\n")
|
|
||||||
}
|
|
||||||
if !text() {
|
|
||||||
out.Truncate(marker)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if flags&LIST_TYPE_ORDERED != 0 {
|
|
||||||
out.WriteString("\n\\end{enumerate}\n")
|
|
||||||
} else {
|
|
||||||
out.WriteString("\n\\end{itemize}\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) ListItem(out *bytes.Buffer, text []byte, flags int) {
|
|
||||||
out.WriteString("\n\\item ")
|
|
||||||
out.Write(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) Paragraph(out *bytes.Buffer, text func() bool) {
|
|
||||||
marker := out.Len()
|
|
||||||
out.WriteString("\n")
|
|
||||||
if !text() {
|
|
||||||
out.Truncate(marker)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
out.WriteString("\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
|
|
||||||
out.WriteString("\n\\begin{tabular}{")
|
|
||||||
for _, elt := range columnData {
|
|
||||||
switch elt {
|
|
||||||
case TABLE_ALIGNMENT_LEFT:
|
|
||||||
out.WriteByte('l')
|
|
||||||
case TABLE_ALIGNMENT_RIGHT:
|
|
||||||
out.WriteByte('r')
|
|
||||||
default:
|
|
||||||
out.WriteByte('c')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out.WriteString("}\n")
|
|
||||||
out.Write(header)
|
|
||||||
out.WriteString(" \\\\\n\\hline\n")
|
|
||||||
out.Write(body)
|
|
||||||
out.WriteString("\n\\end{tabular}\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) TableRow(out *bytes.Buffer, text []byte) {
|
|
||||||
if out.Len() > 0 {
|
|
||||||
out.WriteString(" \\\\\n")
|
|
||||||
}
|
|
||||||
out.Write(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
|
|
||||||
if out.Len() > 0 {
|
|
||||||
out.WriteString(" & ")
|
|
||||||
}
|
|
||||||
out.Write(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) TableCell(out *bytes.Buffer, text []byte, align int) {
|
|
||||||
if out.Len() > 0 {
|
|
||||||
out.WriteString(" & ")
|
|
||||||
}
|
|
||||||
out.Write(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: this
|
|
||||||
func (options *Latex) Footnotes(out *bytes.Buffer, text func() bool) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) AutoLink(out *bytes.Buffer, link []byte, kind int) {
|
|
||||||
out.WriteString("\\href{")
|
|
||||||
if kind == LINK_TYPE_EMAIL {
|
|
||||||
out.WriteString("mailto:")
|
|
||||||
}
|
|
||||||
out.Write(link)
|
|
||||||
out.WriteString("}{")
|
|
||||||
out.Write(link)
|
|
||||||
out.WriteString("}")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) CodeSpan(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("\\texttt{")
|
|
||||||
escapeSpecialChars(out, text)
|
|
||||||
out.WriteString("}")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) DoubleEmphasis(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("\\textbf{")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("}")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) Emphasis(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("\\textit{")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("}")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
|
|
||||||
if bytes.HasPrefix(link, []byte("http://")) || bytes.HasPrefix(link, []byte("https://")) {
|
|
||||||
// treat it like a link
|
|
||||||
out.WriteString("\\href{")
|
|
||||||
out.Write(link)
|
|
||||||
out.WriteString("}{")
|
|
||||||
out.Write(alt)
|
|
||||||
out.WriteString("}")
|
|
||||||
} else {
|
|
||||||
out.WriteString("\\includegraphics{")
|
|
||||||
out.Write(link)
|
|
||||||
out.WriteString("}")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) LineBreak(out *bytes.Buffer) {
|
|
||||||
out.WriteString(" \\\\\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
|
|
||||||
out.WriteString("\\href{")
|
|
||||||
out.Write(link)
|
|
||||||
out.WriteString("}{")
|
|
||||||
out.Write(content)
|
|
||||||
out.WriteString("}")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) RawHtmlTag(out *bytes.Buffer, tag []byte) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) TripleEmphasis(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("\\textbf{\\textit{")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("}}")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) StrikeThrough(out *bytes.Buffer, text []byte) {
|
|
||||||
out.WriteString("\\sout{")
|
|
||||||
out.Write(text)
|
|
||||||
out.WriteString("}")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: this
|
|
||||||
func (options *Latex) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func needsBackslash(c byte) bool {
|
|
||||||
for _, r := range []byte("_{}%$&\\~#") {
|
|
||||||
if c == r {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func escapeSpecialChars(out *bytes.Buffer, text []byte) {
|
|
||||||
for i := 0; i < len(text); i++ {
|
|
||||||
// directly copy normal characters
|
|
||||||
org := i
|
|
||||||
|
|
||||||
for i < len(text) && !needsBackslash(text[i]) {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i > org {
|
|
||||||
out.Write(text[org:i])
|
|
||||||
}
|
|
||||||
|
|
||||||
// escape a character
|
|
||||||
if i >= len(text) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
out.WriteByte('\\')
|
|
||||||
out.WriteByte(text[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) Entity(out *bytes.Buffer, entity []byte) {
|
|
||||||
// TODO: convert this into a unicode character or something
|
|
||||||
out.Write(entity)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) NormalText(out *bytes.Buffer, text []byte) {
|
|
||||||
escapeSpecialChars(out, text)
|
|
||||||
}
|
|
||||||
|
|
||||||
// header and footer
|
|
||||||
func (options *Latex) DocumentHeader(out *bytes.Buffer) {
|
|
||||||
out.WriteString("\\documentclass{article}\n")
|
|
||||||
out.WriteString("\n")
|
|
||||||
out.WriteString("\\usepackage{graphicx}\n")
|
|
||||||
out.WriteString("\\usepackage{listings}\n")
|
|
||||||
out.WriteString("\\usepackage[margin=1in]{geometry}\n")
|
|
||||||
out.WriteString("\\usepackage[utf8]{inputenc}\n")
|
|
||||||
out.WriteString("\\usepackage{verbatim}\n")
|
|
||||||
out.WriteString("\\usepackage[normalem]{ulem}\n")
|
|
||||||
out.WriteString("\\usepackage{hyperref}\n")
|
|
||||||
out.WriteString("\n")
|
|
||||||
out.WriteString("\\hypersetup{colorlinks,%\n")
|
|
||||||
out.WriteString(" citecolor=black,%\n")
|
|
||||||
out.WriteString(" filecolor=black,%\n")
|
|
||||||
out.WriteString(" linkcolor=black,%\n")
|
|
||||||
out.WriteString(" urlcolor=black,%\n")
|
|
||||||
out.WriteString(" pdfstartview=FitH,%\n")
|
|
||||||
out.WriteString(" breaklinks=true,%\n")
|
|
||||||
out.WriteString(" pdfauthor={Blackfriday Markdown Processor v")
|
|
||||||
out.WriteString(VERSION)
|
|
||||||
out.WriteString("}}\n")
|
|
||||||
out.WriteString("\n")
|
|
||||||
out.WriteString("\\newcommand{\\HRule}{\\rule{\\linewidth}{0.5mm}}\n")
|
|
||||||
out.WriteString("\\addtolength{\\parskip}{0.5\\baselineskip}\n")
|
|
||||||
out.WriteString("\\parindent=0pt\n")
|
|
||||||
out.WriteString("\n")
|
|
||||||
out.WriteString("\\begin{document}\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (options *Latex) DocumentFooter(out *bytes.Buffer) {
|
|
||||||
out.WriteString("\n\\end{document}\n")
|
|
||||||
}
|
|
@ -1,6 +1,4 @@
|
|||||||
Blackfriday
|
Blackfriday [](https://travis-ci.org/russross/blackfriday)
|
||||||
[![Build Status][BuildSVG]][BuildURL]
|
|
||||||
[![Godoc][GodocV2SVG]][GodocV2URL]
|
|
||||||
===========
|
===========
|
||||||
|
|
||||||
Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
|
Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
|
||||||
@ -18,12 +16,18 @@ It started as a translation from C of [Sundown][3].
|
|||||||
Installation
|
Installation
|
||||||
------------
|
------------
|
||||||
|
|
||||||
Blackfriday is compatible with any modern Go release. With Go and git installed:
|
Blackfriday is compatible with any modern Go release. With Go 1.7 and git
|
||||||
|
installed:
|
||||||
|
|
||||||
go get -u gopkg.in/russross/blackfriday.v2
|
go get gopkg.in/russross/blackfriday.v2
|
||||||
|
|
||||||
will download, compile, and install the package into your `$GOPATH` directory
|
will download, compile, and install the package into your `$GOPATH`
|
||||||
hierarchy.
|
directory hierarchy. Alternatively, you can achieve the same if you
|
||||||
|
import it into a project:
|
||||||
|
|
||||||
|
import "gopkg.in/russross/blackfriday.v2"
|
||||||
|
|
||||||
|
and `go get` without parameters.
|
||||||
|
|
||||||
|
|
||||||
Versions
|
Versions
|
||||||
@ -34,7 +38,7 @@ developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and
|
|||||||
documentation is available at
|
documentation is available at
|
||||||
https://godoc.org/gopkg.in/russross/blackfriday.v2.
|
https://godoc.org/gopkg.in/russross/blackfriday.v2.
|
||||||
|
|
||||||
It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`,
|
It is `go get`-able via via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`,
|
||||||
but we highly recommend using package management tool like [dep][7] or
|
but we highly recommend using package management tool like [dep][7] or
|
||||||
[Glide][8] and make use of semantic versioning. With package management you
|
[Glide][8] and make use of semantic versioning. With package management you
|
||||||
should import `github.com/russross/blackfriday` and specify that you're using
|
should import `github.com/russross/blackfriday` and specify that you're using
|
||||||
@ -58,43 +62,9 @@ Potential drawbacks:
|
|||||||
v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
|
v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
|
||||||
tracking.
|
tracking.
|
||||||
|
|
||||||
If you are still interested in the legacy `v1`, you can import it from
|
|
||||||
`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found
|
|
||||||
here: https://godoc.org/github.com/russross/blackfriday
|
|
||||||
|
|
||||||
### Known issue with `dep`
|
|
||||||
|
|
||||||
There is a known problem with using Blackfriday v1 _transitively_ and `dep`.
|
|
||||||
Currently `dep` prioritizes semver versions over anything else, and picks the
|
|
||||||
latest one, plus it does not apply a `[[constraint]]` specifier to transitively
|
|
||||||
pulled in packages. So if you're using something that uses Blackfriday v1, but
|
|
||||||
that something does not use `dep` yet, you will get Blackfriday v2 pulled in and
|
|
||||||
your first dependency will fail to build.
|
|
||||||
|
|
||||||
There are couple of fixes for it, documented here:
|
|
||||||
https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version
|
|
||||||
|
|
||||||
Meanwhile, `dep` team is working on a more general solution to the constraints
|
|
||||||
on transitive dependencies problem: https://github.com/golang/dep/issues/1124.
|
|
||||||
|
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
-----
|
-----
|
||||||
|
|
||||||
### v1
|
|
||||||
|
|
||||||
For basic usage, it is as simple as getting your input into a byte
|
|
||||||
slice and calling:
|
|
||||||
|
|
||||||
output := blackfriday.MarkdownBasic(input)
|
|
||||||
|
|
||||||
This renders it with no extensions enabled. To get a more useful
|
|
||||||
feature set, use this instead:
|
|
||||||
|
|
||||||
output := blackfriday.MarkdownCommon(input)
|
|
||||||
|
|
||||||
### v2
|
|
||||||
|
|
||||||
For the most sensible markdown processing, it is as simple as getting your input
|
For the most sensible markdown processing, it is as simple as getting your input
|
||||||
into a byte slice and calling:
|
into a byte slice and calling:
|
||||||
|
|
||||||
@ -121,7 +91,7 @@ Here's an example of simple usage of Blackfriday together with Bluemonday:
|
|||||||
```go
|
```go
|
||||||
import (
|
import (
|
||||||
"github.com/microcosm-cc/bluemonday"
|
"github.com/microcosm-cc/bluemonday"
|
||||||
"gopkg.in/russross/blackfriday.v2"
|
"github.com/russross/blackfriday"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ...
|
// ...
|
||||||
@ -129,21 +99,11 @@ unsafe := blackfriday.Run(input)
|
|||||||
html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
|
html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Custom options, v1
|
### Custom options
|
||||||
|
|
||||||
If you want to customize the set of options, first get a renderer
|
|
||||||
(currently only the HTML output engine), then use it to
|
|
||||||
call the more general `Markdown` function. For examples, see the
|
|
||||||
implementations of `MarkdownBasic` and `MarkdownCommon` in
|
|
||||||
`markdown.go`.
|
|
||||||
|
|
||||||
### Custom options, v2
|
|
||||||
|
|
||||||
If you want to customize the set of options, use `blackfriday.WithExtensions`,
|
If you want to customize the set of options, use `blackfriday.WithExtensions`,
|
||||||
`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
|
`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
|
||||||
|
|
||||||
### `blackfriday-tool`
|
|
||||||
|
|
||||||
You can also check out `blackfriday-tool` for a more complete example
|
You can also check out `blackfriday-tool` for a more complete example
|
||||||
of how to use it. Download and install it using:
|
of how to use it. Download and install it using:
|
||||||
|
|
||||||
@ -163,22 +123,6 @@ installed in `$GOPATH/bin`. This is a statically-linked binary that
|
|||||||
can be copied to wherever you need it without worrying about
|
can be copied to wherever you need it without worrying about
|
||||||
dependencies and library versions.
|
dependencies and library versions.
|
||||||
|
|
||||||
### Sanitized anchor names
|
|
||||||
|
|
||||||
Blackfriday includes an algorithm for creating sanitized anchor names
|
|
||||||
corresponding to a given input text. This algorithm is used to create
|
|
||||||
anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The
|
|
||||||
algorithm has a specification, so that other packages can create
|
|
||||||
compatible anchor names and links to those anchors.
|
|
||||||
|
|
||||||
The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names.
|
|
||||||
|
|
||||||
[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to
|
|
||||||
create compatible links to the anchor names generated by blackfriday.
|
|
||||||
This algorithm is also implemented in a small standalone package at
|
|
||||||
[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients
|
|
||||||
that want a small package and don't need full functionality of blackfriday.
|
|
||||||
|
|
||||||
|
|
||||||
Features
|
Features
|
||||||
--------
|
--------
|
||||||
@ -246,7 +190,7 @@ implements the following extensions:
|
|||||||
and supply a language (to make syntax highlighting simple). Just
|
and supply a language (to make syntax highlighting simple). Just
|
||||||
mark it like this:
|
mark it like this:
|
||||||
|
|
||||||
``` go
|
```go
|
||||||
func getTrue() bool {
|
func getTrue() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -255,15 +199,6 @@ implements the following extensions:
|
|||||||
You can use 3 or more backticks to mark the beginning of the
|
You can use 3 or more backticks to mark the beginning of the
|
||||||
block, and the same number to mark the end of the block.
|
block, and the same number to mark the end of the block.
|
||||||
|
|
||||||
To preserve classes of fenced code blocks while using the bluemonday
|
|
||||||
HTML sanitizer, use the following policy:
|
|
||||||
|
|
||||||
``` go
|
|
||||||
p := bluemonday.UGCPolicy()
|
|
||||||
p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code")
|
|
||||||
html := p.SanitizeBytes(unsafe)
|
|
||||||
```
|
|
||||||
|
|
||||||
* **Definition lists**. A simple definition list is made of a single-line
|
* **Definition lists**. A simple definition list is made of a single-line
|
||||||
term followed by a colon and the definition for that term.
|
term followed by a colon and the definition for that term.
|
||||||
|
|
||||||
@ -289,10 +224,8 @@ implements the following extensions:
|
|||||||
* **Strikethrough**. Use two tildes (`~~`) to mark text that
|
* **Strikethrough**. Use two tildes (`~~`) to mark text that
|
||||||
should be crossed out.
|
should be crossed out.
|
||||||
|
|
||||||
* **Hard line breaks**. With this extension enabled (it is off by
|
* **Hard line breaks**. With this extension enabled newlines in the input
|
||||||
default in the `MarkdownBasic` and `MarkdownCommon` convenience
|
translate into line breaks in the output. This extension is off by default.
|
||||||
functions), newlines in the input translate into line breaks in
|
|
||||||
the output.
|
|
||||||
|
|
||||||
* **Smart quotes**. Smartypants-style punctuation substitution is
|
* **Smart quotes**. Smartypants-style punctuation substitution is
|
||||||
supported, turning normal double- and single-quote marks into
|
supported, turning normal double- and single-quote marks into
|
||||||
@ -328,24 +261,20 @@ are a few of note:
|
|||||||
* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
|
* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
|
||||||
but for markdown.
|
but for markdown.
|
||||||
|
|
||||||
* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex):
|
* [LaTeX output](https://github.com/Ambrevar/Blackfriday-LaTeX):
|
||||||
renders output as LaTeX.
|
renders output as LaTeX.
|
||||||
|
|
||||||
* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience
|
* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer.
|
||||||
integration with the [Chroma](https://github.com/alecthomas/chroma) code
|
|
||||||
highlighting library. bfchroma is only compatible with v2 of Blackfriday and
|
|
||||||
provides a drop-in renderer ready to use with Blackfriday, as well as
|
|
||||||
options and means for further customization.
|
|
||||||
|
|
||||||
|
|
||||||
TODO
|
Todo
|
||||||
----
|
----
|
||||||
|
|
||||||
* More unit testing
|
* More unit testing
|
||||||
* Improve Unicode support. It does not understand all Unicode
|
* Improve unicode support. It does not understand all unicode
|
||||||
rules (about what constitutes a letter, a punctuation symbol,
|
rules (about what constitutes a letter, a punctuation symbol,
|
||||||
etc.), so it may fail to detect word boundaries correctly in
|
etc.), so it may fail to detect word boundaries correctly in
|
||||||
some instances. It is safe on all UTF-8 input.
|
some instances. It is safe on all utf-8 input.
|
||||||
|
|
||||||
|
|
||||||
License
|
License
|
||||||
@ -360,10 +289,3 @@ License
|
|||||||
[4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func"
|
[4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func"
|
||||||
[5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
|
[5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
|
||||||
[6]: https://labix.org/gopkg.in "gopkg.in"
|
[6]: https://labix.org/gopkg.in "gopkg.in"
|
||||||
[7]: https://github.com/golang/dep/ "dep"
|
|
||||||
[8]: https://github.com/Masterminds/glide "Glide"
|
|
||||||
|
|
||||||
[BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master
|
|
||||||
[BuildURL]: https://travis-ci.org/russross/blackfriday
|
|
||||||
[GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg
|
|
||||||
[GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2
|
|
File diff suppressed because it is too large
Load Diff
18
vendor/github.com/russross/blackfriday/v2/doc.go
generated
vendored
Normal file
18
vendor/github.com/russross/blackfriday/v2/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
// Package blackfriday is a markdown processor.
|
||||||
|
//
|
||||||
|
// It translates plain text with simple formatting rules into an AST, which can
|
||||||
|
// then be further processed to HTML (provided by Blackfriday itself) or other
|
||||||
|
// formats (provided by the community).
|
||||||
|
//
|
||||||
|
// The simplest way to invoke Blackfriday is to call the Run function. It will
|
||||||
|
// take a text input and produce a text output in HTML (or other format).
|
||||||
|
//
|
||||||
|
// A slightly more sophisticated way to use Blackfriday is to create a Markdown
|
||||||
|
// processor and to call Parse, which returns a syntax tree for the input
|
||||||
|
// document. You can leverage Blackfriday's parsing for content extraction from
|
||||||
|
// markdown documents. You can assign a custom renderer and set various options
|
||||||
|
// to the Markdown processor.
|
||||||
|
//
|
||||||
|
// If you're interested in calling Blackfriday from command line, see
|
||||||
|
// https://github.com/russross/blackfriday-tool.
|
||||||
|
package blackfriday
|
34
vendor/github.com/russross/blackfriday/v2/esc.go
generated
vendored
Normal file
34
vendor/github.com/russross/blackfriday/v2/esc.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package blackfriday
|
||||||
|
|
||||||
|
import (
|
||||||
|
"html"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var htmlEscaper = [256][]byte{
|
||||||
|
'&': []byte("&"),
|
||||||
|
'<': []byte("<"),
|
||||||
|
'>': []byte(">"),
|
||||||
|
'"': []byte("""),
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeHTML(w io.Writer, s []byte) {
|
||||||
|
var start, end int
|
||||||
|
for end < len(s) {
|
||||||
|
escSeq := htmlEscaper[s[end]]
|
||||||
|
if escSeq != nil {
|
||||||
|
w.Write(s[start:end])
|
||||||
|
w.Write(escSeq)
|
||||||
|
start = end + 1
|
||||||
|
}
|
||||||
|
end++
|
||||||
|
}
|
||||||
|
if start < len(s) && end <= len(s) {
|
||||||
|
w.Write(s[start:end])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func escLink(w io.Writer, text []byte) {
|
||||||
|
unesc := html.UnescapeString(string(text))
|
||||||
|
escapeHTML(w, []byte(unesc))
|
||||||
|
}
|
1
vendor/github.com/russross/blackfriday/v2/go.mod
generated
vendored
Normal file
1
vendor/github.com/russross/blackfriday/v2/go.mod
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
module github.com/russross/blackfriday/v2
|
949
vendor/github.com/russross/blackfriday/v2/html.go
generated
vendored
Normal file
949
vendor/github.com/russross/blackfriday/v2/html.go
generated
vendored
Normal file
@ -0,0 +1,949 @@
|
|||||||
|
//
|
||||||
|
// Blackfriday Markdown Processor
|
||||||
|
// Available at http://github.com/russross/blackfriday
|
||||||
|
//
|
||||||
|
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||||||
|
// Distributed under the Simplified BSD License.
|
||||||
|
// See README.md for details.
|
||||||
|
//
|
||||||
|
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// HTML rendering backend
|
||||||
|
//
|
||||||
|
//
|
||||||
|
|
||||||
|
package blackfriday
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTMLFlags control optional behavior of HTML renderer.
|
||||||
|
type HTMLFlags int
|
||||||
|
|
||||||
|
// HTML renderer configuration options.
|
||||||
|
const (
|
||||||
|
HTMLFlagsNone HTMLFlags = 0
|
||||||
|
SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks
|
||||||
|
SkipImages // Skip embedded images
|
||||||
|
SkipLinks // Skip all links
|
||||||
|
Safelink // Only link to trusted protocols
|
||||||
|
NofollowLinks // Only link with rel="nofollow"
|
||||||
|
NoreferrerLinks // Only link with rel="noreferrer"
|
||||||
|
NoopenerLinks // Only link with rel="noopener"
|
||||||
|
HrefTargetBlank // Add a blank target
|
||||||
|
CompletePage // Generate a complete HTML page
|
||||||
|
UseXHTML // Generate XHTML output instead of HTML
|
||||||
|
FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source
|
||||||
|
Smartypants // Enable smart punctuation substitutions
|
||||||
|
SmartypantsFractions // Enable smart fractions (with Smartypants)
|
||||||
|
SmartypantsDashes // Enable smart dashes (with Smartypants)
|
||||||
|
SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants)
|
||||||
|
SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering
|
||||||
|
SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants)
|
||||||
|
TOC // Generate a table of contents
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" +
|
||||||
|
processingInstruction + "|" + declaration + "|" + cdata + ")"
|
||||||
|
closeTag = "</" + tagName + "\\s*[>]"
|
||||||
|
openTag = "<" + tagName + attribute + "*" + "\\s*/?>"
|
||||||
|
attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)"
|
||||||
|
attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")"
|
||||||
|
attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")"
|
||||||
|
attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*"
|
||||||
|
cdata = "<!\\[CDATA\\[[\\s\\S]*?\\]\\]>"
|
||||||
|
declaration = "<![A-Z]+" + "\\s+[^>]*>"
|
||||||
|
doubleQuotedValue = "\"[^\"]*\""
|
||||||
|
htmlComment = "<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->"
|
||||||
|
processingInstruction = "[<][?].*?[?][>]"
|
||||||
|
singleQuotedValue = "'[^']*'"
|
||||||
|
tagName = "[A-Za-z][A-Za-z0-9-]*"
|
||||||
|
unquotedValue = "[^\"'=<>`\\x00-\\x20]+"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTMLRendererParameters is a collection of supplementary parameters tweaking
|
||||||
|
// the behavior of various parts of HTML renderer.
|
||||||
|
type HTMLRendererParameters struct {
|
||||||
|
// Prepend this text to each relative URL.
|
||||||
|
AbsolutePrefix string
|
||||||
|
// Add this text to each footnote anchor, to ensure uniqueness.
|
||||||
|
FootnoteAnchorPrefix string
|
||||||
|
// Show this text inside the <a> tag for a footnote return link, if the
|
||||||
|
// HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
|
||||||
|
// <sup>[return]</sup> is used.
|
||||||
|
FootnoteReturnLinkContents string
|
||||||
|
// If set, add this text to the front of each Heading ID, to ensure
|
||||||
|
// uniqueness.
|
||||||
|
HeadingIDPrefix string
|
||||||
|
// If set, add this text to the back of each Heading ID, to ensure uniqueness.
|
||||||
|
HeadingIDSuffix string
|
||||||
|
// Increase heading levels: if the offset is 1, <h1> becomes <h2> etc.
|
||||||
|
// Negative offset is also valid.
|
||||||
|
// Resulting levels are clipped between 1 and 6.
|
||||||
|
HeadingLevelOffset int
|
||||||
|
|
||||||
|
Title string // Document title (used if CompletePage is set)
|
||||||
|
CSS string // Optional CSS file URL (used if CompletePage is set)
|
||||||
|
Icon string // Optional icon file URL (used if CompletePage is set)
|
||||||
|
|
||||||
|
Flags HTMLFlags // Flags allow customizing this renderer's behavior
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTMLRenderer is a type that implements the Renderer interface for HTML output.
|
||||||
|
//
|
||||||
|
// Do not create this directly, instead use the NewHTMLRenderer function.
|
||||||
|
type HTMLRenderer struct {
|
||||||
|
HTMLRendererParameters
|
||||||
|
|
||||||
|
closeTag string // how to end singleton tags: either " />" or ">"
|
||||||
|
|
||||||
|
// Track heading IDs to prevent ID collision in a single generation.
|
||||||
|
headingIDs map[string]int
|
||||||
|
|
||||||
|
lastOutputLen int
|
||||||
|
disableTags int
|
||||||
|
|
||||||
|
sr *SPRenderer
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
xhtmlClose = " />"
|
||||||
|
htmlClose = ">"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewHTMLRenderer creates and configures an HTMLRenderer object, which
|
||||||
|
// satisfies the Renderer interface.
|
||||||
|
func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer {
|
||||||
|
// configure the rendering engine
|
||||||
|
closeTag := htmlClose
|
||||||
|
if params.Flags&UseXHTML != 0 {
|
||||||
|
closeTag = xhtmlClose
|
||||||
|
}
|
||||||
|
|
||||||
|
if params.FootnoteReturnLinkContents == "" {
|
||||||
|
params.FootnoteReturnLinkContents = `<sup>[return]</sup>`
|
||||||
|
}
|
||||||
|
|
||||||
|
return &HTMLRenderer{
|
||||||
|
HTMLRendererParameters: params,
|
||||||
|
|
||||||
|
closeTag: closeTag,
|
||||||
|
headingIDs: make(map[string]int),
|
||||||
|
|
||||||
|
sr: NewSmartypantsRenderer(params.Flags),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isHTMLTag(tag []byte, tagname string) bool {
|
||||||
|
found, _ := findHTMLTagPos(tag, tagname)
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for a character, but ignore it when it's in any kind of quotes, it
|
||||||
|
// might be JavaScript
|
||||||
|
func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
|
||||||
|
inSingleQuote := false
|
||||||
|
inDoubleQuote := false
|
||||||
|
inGraveQuote := false
|
||||||
|
i := start
|
||||||
|
for i < len(html) {
|
||||||
|
switch {
|
||||||
|
case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
|
||||||
|
return i
|
||||||
|
case html[i] == '\'':
|
||||||
|
inSingleQuote = !inSingleQuote
|
||||||
|
case html[i] == '"':
|
||||||
|
inDoubleQuote = !inDoubleQuote
|
||||||
|
case html[i] == '`':
|
||||||
|
inGraveQuote = !inGraveQuote
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return start
|
||||||
|
}
|
||||||
|
|
||||||
|
func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
|
||||||
|
i := 0
|
||||||
|
if i < len(tag) && tag[0] != '<' {
|
||||||
|
return false, -1
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
i = skipSpace(tag, i)
|
||||||
|
|
||||||
|
if i < len(tag) && tag[i] == '/' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
i = skipSpace(tag, i)
|
||||||
|
j := 0
|
||||||
|
for ; i < len(tag); i, j = i+1, j+1 {
|
||||||
|
if j >= len(tagname) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.ToLower(string(tag[i]))[0] != tagname[j] {
|
||||||
|
return false, -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == len(tag) {
|
||||||
|
return false, -1
|
||||||
|
}
|
||||||
|
|
||||||
|
rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
|
||||||
|
if rightAngle >= i {
|
||||||
|
return true, rightAngle
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func skipSpace(tag []byte, i int) int {
|
||||||
|
for i < len(tag) && isspace(tag[i]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRelativeLink(link []byte) (yes bool) {
|
||||||
|
// a tag begin with '#'
|
||||||
|
if link[0] == '#' {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// link begin with '/' but not '//', the second maybe a protocol relative link
|
||||||
|
if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// only the root '/'
|
||||||
|
if len(link) == 1 && link[0] == '/' {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// current directory : begin with "./"
|
||||||
|
if bytes.HasPrefix(link, []byte("./")) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// parent directory : begin with "../"
|
||||||
|
if bytes.HasPrefix(link, []byte("../")) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string {
|
||||||
|
for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
|
||||||
|
tmp := fmt.Sprintf("%s-%d", id, count+1)
|
||||||
|
|
||||||
|
if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
|
||||||
|
r.headingIDs[id] = count + 1
|
||||||
|
id = tmp
|
||||||
|
} else {
|
||||||
|
id = id + "-1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, found := r.headingIDs[id]; !found {
|
||||||
|
r.headingIDs[id] = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte {
|
||||||
|
if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
|
||||||
|
newDest := r.AbsolutePrefix
|
||||||
|
if link[0] != '/' {
|
||||||
|
newDest += "/"
|
||||||
|
}
|
||||||
|
newDest += string(link)
|
||||||
|
return []byte(newDest)
|
||||||
|
}
|
||||||
|
return link
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string {
|
||||||
|
if isRelativeLink(link) {
|
||||||
|
return attrs
|
||||||
|
}
|
||||||
|
val := []string{}
|
||||||
|
if flags&NofollowLinks != 0 {
|
||||||
|
val = append(val, "nofollow")
|
||||||
|
}
|
||||||
|
if flags&NoreferrerLinks != 0 {
|
||||||
|
val = append(val, "noreferrer")
|
||||||
|
}
|
||||||
|
if flags&NoopenerLinks != 0 {
|
||||||
|
val = append(val, "noopener")
|
||||||
|
}
|
||||||
|
if flags&HrefTargetBlank != 0 {
|
||||||
|
attrs = append(attrs, "target=\"_blank\"")
|
||||||
|
}
|
||||||
|
if len(val) == 0 {
|
||||||
|
return attrs
|
||||||
|
}
|
||||||
|
attr := fmt.Sprintf("rel=%q", strings.Join(val, " "))
|
||||||
|
return append(attrs, attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMailto(link []byte) bool {
|
||||||
|
return bytes.HasPrefix(link, []byte("mailto:"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func needSkipLink(flags HTMLFlags, dest []byte) bool {
|
||||||
|
if flags&SkipLinks != 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSmartypantable(node *Node) bool {
|
||||||
|
pt := node.Parent.Type
|
||||||
|
return pt != Link && pt != CodeBlock && pt != Code
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendLanguageAttr(attrs []string, info []byte) []string {
|
||||||
|
if len(info) == 0 {
|
||||||
|
return attrs
|
||||||
|
}
|
||||||
|
endOfLang := bytes.IndexAny(info, "\t ")
|
||||||
|
if endOfLang < 0 {
|
||||||
|
endOfLang = len(info)
|
||||||
|
}
|
||||||
|
return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) {
|
||||||
|
w.Write(name)
|
||||||
|
if len(attrs) > 0 {
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
w.Write([]byte(strings.Join(attrs, " ")))
|
||||||
|
}
|
||||||
|
w.Write(gtBytes)
|
||||||
|
r.lastOutputLen = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func footnoteRef(prefix string, node *Node) []byte {
|
||||||
|
urlFrag := prefix + string(slugify(node.Destination))
|
||||||
|
anchor := fmt.Sprintf(`<a href="#fn:%s">%d</a>`, urlFrag, node.NoteID)
|
||||||
|
return []byte(fmt.Sprintf(`<sup class="footnote-ref" id="fnref:%s">%s</sup>`, urlFrag, anchor))
|
||||||
|
}
|
||||||
|
|
||||||
|
func footnoteItem(prefix string, slug []byte) []byte {
|
||||||
|
return []byte(fmt.Sprintf(`<li id="fn:%s%s">`, prefix, slug))
|
||||||
|
}
|
||||||
|
|
||||||
|
func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte {
|
||||||
|
const format = ` <a class="footnote-return" href="#fnref:%s%s">%s</a>`
|
||||||
|
return []byte(fmt.Sprintf(format, prefix, slug, returnLink))
|
||||||
|
}
|
||||||
|
|
||||||
|
func itemOpenCR(node *Node) bool {
|
||||||
|
if node.Prev == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ld := node.Parent.ListData
|
||||||
|
return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func skipParagraphTags(node *Node) bool {
|
||||||
|
grandparent := node.Parent.Parent
|
||||||
|
if grandparent == nil || grandparent.Type != List {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0
|
||||||
|
return grandparent.Type == List && tightOrTerm
|
||||||
|
}
|
||||||
|
|
||||||
|
func cellAlignment(align CellAlignFlags) string {
|
||||||
|
switch align {
|
||||||
|
case TableAlignmentLeft:
|
||||||
|
return "left"
|
||||||
|
case TableAlignmentRight:
|
||||||
|
return "right"
|
||||||
|
case TableAlignmentCenter:
|
||||||
|
return "center"
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HTMLRenderer) out(w io.Writer, text []byte) {
|
||||||
|
if r.disableTags > 0 {
|
||||||
|
w.Write(htmlTagRe.ReplaceAll(text, []byte{}))
|
||||||
|
} else {
|
||||||
|
w.Write(text)
|
||||||
|
}
|
||||||
|
r.lastOutputLen = len(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HTMLRenderer) cr(w io.Writer) {
|
||||||
|
if r.lastOutputLen > 0 {
|
||||||
|
r.out(w, nlBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
nlBytes = []byte{'\n'}
|
||||||
|
gtBytes = []byte{'>'}
|
||||||
|
spaceBytes = []byte{' '}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
brTag = []byte("<br>")
|
||||||
|
brXHTMLTag = []byte("<br />")
|
||||||
|
emTag = []byte("<em>")
|
||||||
|
emCloseTag = []byte("</em>")
|
||||||
|
strongTag = []byte("<strong>")
|
||||||
|
strongCloseTag = []byte("</strong>")
|
||||||
|
delTag = []byte("<del>")
|
||||||
|
delCloseTag = []byte("</del>")
|
||||||
|
ttTag = []byte("<tt>")
|
||||||
|
ttCloseTag = []byte("</tt>")
|
||||||
|
aTag = []byte("<a")
|
||||||
|
aCloseTag = []byte("</a>")
|
||||||
|
preTag = []byte("<pre>")
|
||||||
|
preCloseTag = []byte("</pre>")
|
||||||
|
codeTag = []byte("<code>")
|
||||||
|
codeCloseTag = []byte("</code>")
|
||||||
|
pTag = []byte("<p>")
|
||||||
|
pCloseTag = []byte("</p>")
|
||||||
|
blockquoteTag = []byte("<blockquote>")
|
||||||
|
blockquoteCloseTag = []byte("</blockquote>")
|
||||||
|
hrTag = []byte("<hr>")
|
||||||
|
hrXHTMLTag = []byte("<hr />")
|
||||||
|
ulTag = []byte("<ul>")
|
||||||
|
ulCloseTag = []byte("</ul>")
|
||||||
|
olTag = []byte("<ol>")
|
||||||
|
olCloseTag = []byte("</ol>")
|
||||||
|
dlTag = []byte("<dl>")
|
||||||
|
dlCloseTag = []byte("</dl>")
|
||||||
|
liTag = []byte("<li>")
|
||||||
|
liCloseTag = []byte("</li>")
|
||||||
|
ddTag = []byte("<dd>")
|
||||||
|
ddCloseTag = []byte("</dd>")
|
||||||
|
dtTag = []byte("<dt>")
|
||||||
|
dtCloseTag = []byte("</dt>")
|
||||||
|
tableTag = []byte("<table>")
|
||||||
|
tableCloseTag = []byte("</table>")
|
||||||
|
tdTag = []byte("<td")
|
||||||
|
tdCloseTag = []byte("</td>")
|
||||||
|
thTag = []byte("<th")
|
||||||
|
thCloseTag = []byte("</th>")
|
||||||
|
theadTag = []byte("<thead>")
|
||||||
|
theadCloseTag = []byte("</thead>")
|
||||||
|
tbodyTag = []byte("<tbody>")
|
||||||
|
tbodyCloseTag = []byte("</tbody>")
|
||||||
|
trTag = []byte("<tr>")
|
||||||
|
trCloseTag = []byte("</tr>")
|
||||||
|
h1Tag = []byte("<h1")
|
||||||
|
h1CloseTag = []byte("</h1>")
|
||||||
|
h2Tag = []byte("<h2")
|
||||||
|
h2CloseTag = []byte("</h2>")
|
||||||
|
h3Tag = []byte("<h3")
|
||||||
|
h3CloseTag = []byte("</h3>")
|
||||||
|
h4Tag = []byte("<h4")
|
||||||
|
h4CloseTag = []byte("</h4>")
|
||||||
|
h5Tag = []byte("<h5")
|
||||||
|
h5CloseTag = []byte("</h5>")
|
||||||
|
h6Tag = []byte("<h6")
|
||||||
|
h6CloseTag = []byte("</h6>")
|
||||||
|
|
||||||
|
footnotesDivBytes = []byte("\n<div class=\"footnotes\">\n\n")
|
||||||
|
footnotesCloseDivBytes = []byte("\n</div>\n")
|
||||||
|
)
|
||||||
|
|
||||||
|
func headingTagsFromLevel(level int) ([]byte, []byte) {
|
||||||
|
if level <= 1 {
|
||||||
|
return h1Tag, h1CloseTag
|
||||||
|
}
|
||||||
|
switch level {
|
||||||
|
case 2:
|
||||||
|
return h2Tag, h2CloseTag
|
||||||
|
case 3:
|
||||||
|
return h3Tag, h3CloseTag
|
||||||
|
case 4:
|
||||||
|
return h4Tag, h4CloseTag
|
||||||
|
case 5:
|
||||||
|
return h5Tag, h5CloseTag
|
||||||
|
}
|
||||||
|
return h6Tag, h6CloseTag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HTMLRenderer) outHRTag(w io.Writer) {
|
||||||
|
if r.Flags&UseXHTML == 0 {
|
||||||
|
r.out(w, hrTag)
|
||||||
|
} else {
|
||||||
|
r.out(w, hrXHTMLTag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderNode is a default renderer of a single node of a syntax tree. For
|
||||||
|
// block nodes it will be called twice: first time with entering=true, second
|
||||||
|
// time with entering=false, so that it could know when it's working on an open
|
||||||
|
// tag and when on close. It writes the result to w.
|
||||||
|
//
|
||||||
|
// The return value is a way to tell the calling walker to adjust its walk
|
||||||
|
// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
|
||||||
|
// can ask the walker to skip a subtree of this node by returning SkipChildren.
|
||||||
|
// The typical behavior is to return GoToNext, which asks for the usual
|
||||||
|
// traversal to the next node.
|
||||||
|
func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus {
|
||||||
|
attrs := []string{}
|
||||||
|
switch node.Type {
|
||||||
|
case Text:
|
||||||
|
if r.Flags&Smartypants != 0 {
|
||||||
|
var tmp bytes.Buffer
|
||||||
|
escapeHTML(&tmp, node.Literal)
|
||||||
|
r.sr.Process(w, tmp.Bytes())
|
||||||
|
} else {
|
||||||
|
if node.Parent.Type == Link {
|
||||||
|
escLink(w, node.Literal)
|
||||||
|
} else {
|
||||||
|
escapeHTML(w, node.Literal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case Softbreak:
|
||||||
|
r.cr(w)
|
||||||
|
// TODO: make it configurable via out(renderer.softbreak)
|
||||||
|
case Hardbreak:
|
||||||
|
if r.Flags&UseXHTML == 0 {
|
||||||
|
r.out(w, brTag)
|
||||||
|
} else {
|
||||||
|
r.out(w, brXHTMLTag)
|
||||||
|
}
|
||||||
|
r.cr(w)
|
||||||
|
case Emph:
|
||||||
|
if entering {
|
||||||
|
r.out(w, emTag)
|
||||||
|
} else {
|
||||||
|
r.out(w, emCloseTag)
|
||||||
|
}
|
||||||
|
case Strong:
|
||||||
|
if entering {
|
||||||
|
r.out(w, strongTag)
|
||||||
|
} else {
|
||||||
|
r.out(w, strongCloseTag)
|
||||||
|
}
|
||||||
|
case Del:
|
||||||
|
if entering {
|
||||||
|
r.out(w, delTag)
|
||||||
|
} else {
|
||||||
|
r.out(w, delCloseTag)
|
||||||
|
}
|
||||||
|
case HTMLSpan:
|
||||||
|
if r.Flags&SkipHTML != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r.out(w, node.Literal)
|
||||||
|
case Link:
|
||||||
|
// mark it but don't link it if it is not a safe link: no smartypants
|
||||||
|
dest := node.LinkData.Destination
|
||||||
|
if needSkipLink(r.Flags, dest) {
|
||||||
|
if entering {
|
||||||
|
r.out(w, ttTag)
|
||||||
|
} else {
|
||||||
|
r.out(w, ttCloseTag)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if entering {
|
||||||
|
dest = r.addAbsPrefix(dest)
|
||||||
|
var hrefBuf bytes.Buffer
|
||||||
|
hrefBuf.WriteString("href=\"")
|
||||||
|
escLink(&hrefBuf, dest)
|
||||||
|
hrefBuf.WriteByte('"')
|
||||||
|
attrs = append(attrs, hrefBuf.String())
|
||||||
|
if node.NoteID != 0 {
|
||||||
|
r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
attrs = appendLinkAttrs(attrs, r.Flags, dest)
|
||||||
|
if len(node.LinkData.Title) > 0 {
|
||||||
|
var titleBuff bytes.Buffer
|
||||||
|
titleBuff.WriteString("title=\"")
|
||||||
|
escapeHTML(&titleBuff, node.LinkData.Title)
|
||||||
|
titleBuff.WriteByte('"')
|
||||||
|
attrs = append(attrs, titleBuff.String())
|
||||||
|
}
|
||||||
|
r.tag(w, aTag, attrs)
|
||||||
|
} else {
|
||||||
|
if node.NoteID != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r.out(w, aCloseTag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case Image:
|
||||||
|
if r.Flags&SkipImages != 0 {
|
||||||
|
return SkipChildren
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
dest := node.LinkData.Destination
|
||||||
|
dest = r.addAbsPrefix(dest)
|
||||||
|
if r.disableTags == 0 {
|
||||||
|
//if options.safe && potentiallyUnsafe(dest) {
|
||||||
|
//out(w, `<img src="" alt="`)
|
||||||
|
//} else {
|
||||||
|
r.out(w, []byte(`<img src="`))
|
||||||
|
escLink(w, dest)
|
||||||
|
r.out(w, []byte(`" alt="`))
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
r.disableTags++
|
||||||
|
} else {
|
||||||
|
r.disableTags--
|
||||||
|
if r.disableTags == 0 {
|
||||||
|
if node.LinkData.Title != nil {
|
||||||
|
r.out(w, []byte(`" title="`))
|
||||||
|
escapeHTML(w, node.LinkData.Title)
|
||||||
|
}
|
||||||
|
r.out(w, []byte(`" />`))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case Code:
|
||||||
|
r.out(w, codeTag)
|
||||||
|
escapeHTML(w, node.Literal)
|
||||||
|
r.out(w, codeCloseTag)
|
||||||
|
case Document:
|
||||||
|
break
|
||||||
|
case Paragraph:
|
||||||
|
if skipParagraphTags(node) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
// TODO: untangle this clusterfuck about when the newlines need
|
||||||
|
// to be added and when not.
|
||||||
|
if node.Prev != nil {
|
||||||
|
switch node.Prev.Type {
|
||||||
|
case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule:
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if node.Parent.Type == BlockQuote && node.Prev == nil {
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
r.out(w, pTag)
|
||||||
|
} else {
|
||||||
|
r.out(w, pCloseTag)
|
||||||
|
if !(node.Parent.Type == Item && node.Next == nil) {
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case BlockQuote:
|
||||||
|
if entering {
|
||||||
|
r.cr(w)
|
||||||
|
r.out(w, blockquoteTag)
|
||||||
|
} else {
|
||||||
|
r.out(w, blockquoteCloseTag)
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
case HTMLBlock:
|
||||||
|
if r.Flags&SkipHTML != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r.cr(w)
|
||||||
|
r.out(w, node.Literal)
|
||||||
|
r.cr(w)
|
||||||
|
case Heading:
|
||||||
|
headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level
|
||||||
|
openTag, closeTag := headingTagsFromLevel(headingLevel)
|
||||||
|
if entering {
|
||||||
|
if node.IsTitleblock {
|
||||||
|
attrs = append(attrs, `class="title"`)
|
||||||
|
}
|
||||||
|
if node.HeadingID != "" {
|
||||||
|
id := r.ensureUniqueHeadingID(node.HeadingID)
|
||||||
|
if r.HeadingIDPrefix != "" {
|
||||||
|
id = r.HeadingIDPrefix + id
|
||||||
|
}
|
||||||
|
if r.HeadingIDSuffix != "" {
|
||||||
|
id = id + r.HeadingIDSuffix
|
||||||
|
}
|
||||||
|
attrs = append(attrs, fmt.Sprintf(`id="%s"`, id))
|
||||||
|
}
|
||||||
|
r.cr(w)
|
||||||
|
r.tag(w, openTag, attrs)
|
||||||
|
} else {
|
||||||
|
r.out(w, closeTag)
|
||||||
|
if !(node.Parent.Type == Item && node.Next == nil) {
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case HorizontalRule:
|
||||||
|
r.cr(w)
|
||||||
|
r.outHRTag(w)
|
||||||
|
r.cr(w)
|
||||||
|
case List:
|
||||||
|
openTag := ulTag
|
||||||
|
closeTag := ulCloseTag
|
||||||
|
if node.ListFlags&ListTypeOrdered != 0 {
|
||||||
|
openTag = olTag
|
||||||
|
closeTag = olCloseTag
|
||||||
|
}
|
||||||
|
if node.ListFlags&ListTypeDefinition != 0 {
|
||||||
|
openTag = dlTag
|
||||||
|
closeTag = dlCloseTag
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
if node.IsFootnotesList {
|
||||||
|
r.out(w, footnotesDivBytes)
|
||||||
|
r.outHRTag(w)
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
r.cr(w)
|
||||||
|
if node.Parent.Type == Item && node.Parent.Parent.Tight {
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
r.tag(w, openTag[:len(openTag)-1], attrs)
|
||||||
|
r.cr(w)
|
||||||
|
} else {
|
||||||
|
r.out(w, closeTag)
|
||||||
|
//cr(w)
|
||||||
|
//if node.parent.Type != Item {
|
||||||
|
// cr(w)
|
||||||
|
//}
|
||||||
|
if node.Parent.Type == Item && node.Next != nil {
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
if node.Parent.Type == Document || node.Parent.Type == BlockQuote {
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
if node.IsFootnotesList {
|
||||||
|
r.out(w, footnotesCloseDivBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case Item:
|
||||||
|
openTag := liTag
|
||||||
|
closeTag := liCloseTag
|
||||||
|
if node.ListFlags&ListTypeDefinition != 0 {
|
||||||
|
openTag = ddTag
|
||||||
|
closeTag = ddCloseTag
|
||||||
|
}
|
||||||
|
if node.ListFlags&ListTypeTerm != 0 {
|
||||||
|
openTag = dtTag
|
||||||
|
closeTag = dtCloseTag
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
if itemOpenCR(node) {
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
if node.ListData.RefLink != nil {
|
||||||
|
slug := slugify(node.ListData.RefLink)
|
||||||
|
r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r.out(w, openTag)
|
||||||
|
} else {
|
||||||
|
if node.ListData.RefLink != nil {
|
||||||
|
slug := slugify(node.ListData.RefLink)
|
||||||
|
if r.Flags&FootnoteReturnLinks != 0 {
|
||||||
|
r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.out(w, closeTag)
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
case CodeBlock:
|
||||||
|
attrs = appendLanguageAttr(attrs, node.Info)
|
||||||
|
r.cr(w)
|
||||||
|
r.out(w, preTag)
|
||||||
|
r.tag(w, codeTag[:len(codeTag)-1], attrs)
|
||||||
|
escapeHTML(w, node.Literal)
|
||||||
|
r.out(w, codeCloseTag)
|
||||||
|
r.out(w, preCloseTag)
|
||||||
|
if node.Parent.Type != Item {
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
case Table:
|
||||||
|
if entering {
|
||||||
|
r.cr(w)
|
||||||
|
r.out(w, tableTag)
|
||||||
|
} else {
|
||||||
|
r.out(w, tableCloseTag)
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
case TableCell:
|
||||||
|
openTag := tdTag
|
||||||
|
closeTag := tdCloseTag
|
||||||
|
if node.IsHeader {
|
||||||
|
openTag = thTag
|
||||||
|
closeTag = thCloseTag
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
align := cellAlignment(node.Align)
|
||||||
|
if align != "" {
|
||||||
|
attrs = append(attrs, fmt.Sprintf(`align="%s"`, align))
|
||||||
|
}
|
||||||
|
if node.Prev == nil {
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
r.tag(w, openTag, attrs)
|
||||||
|
} else {
|
||||||
|
r.out(w, closeTag)
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
case TableHead:
|
||||||
|
if entering {
|
||||||
|
r.cr(w)
|
||||||
|
r.out(w, theadTag)
|
||||||
|
} else {
|
||||||
|
r.out(w, theadCloseTag)
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
case TableBody:
|
||||||
|
if entering {
|
||||||
|
r.cr(w)
|
||||||
|
r.out(w, tbodyTag)
|
||||||
|
// XXX: this is to adhere to a rather silly test. Should fix test.
|
||||||
|
if node.FirstChild == nil {
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
r.out(w, tbodyCloseTag)
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
case TableRow:
|
||||||
|
if entering {
|
||||||
|
r.cr(w)
|
||||||
|
r.out(w, trTag)
|
||||||
|
} else {
|
||||||
|
r.out(w, trCloseTag)
|
||||||
|
r.cr(w)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("Unknown node type " + node.Type.String())
|
||||||
|
}
|
||||||
|
return GoToNext
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderHeader writes HTML document preamble and TOC if requested.
|
||||||
|
func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) {
|
||||||
|
r.writeDocumentHeader(w)
|
||||||
|
if r.Flags&TOC != 0 {
|
||||||
|
r.writeTOC(w, ast)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderFooter writes HTML document footer.
|
||||||
|
func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) {
|
||||||
|
if r.Flags&CompletePage == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
io.WriteString(w, "\n</body>\n</html>\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) {
|
||||||
|
if r.Flags&CompletePage == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ending := ""
|
||||||
|
if r.Flags&UseXHTML != 0 {
|
||||||
|
io.WriteString(w, "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
|
||||||
|
io.WriteString(w, "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
|
||||||
|
io.WriteString(w, "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
|
||||||
|
ending = " /"
|
||||||
|
} else {
|
||||||
|
io.WriteString(w, "<!DOCTYPE html>\n")
|
||||||
|
io.WriteString(w, "<html>\n")
|
||||||
|
}
|
||||||
|
io.WriteString(w, "<head>\n")
|
||||||
|
io.WriteString(w, " <title>")
|
||||||
|
if r.Flags&Smartypants != 0 {
|
||||||
|
r.sr.Process(w, []byte(r.Title))
|
||||||
|
} else {
|
||||||
|
escapeHTML(w, []byte(r.Title))
|
||||||
|
}
|
||||||
|
io.WriteString(w, "</title>\n")
|
||||||
|
io.WriteString(w, " <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v")
|
||||||
|
io.WriteString(w, Version)
|
||||||
|
io.WriteString(w, "\"")
|
||||||
|
io.WriteString(w, ending)
|
||||||
|
io.WriteString(w, ">\n")
|
||||||
|
io.WriteString(w, " <meta charset=\"utf-8\"")
|
||||||
|
io.WriteString(w, ending)
|
||||||
|
io.WriteString(w, ">\n")
|
||||||
|
if r.CSS != "" {
|
||||||
|
io.WriteString(w, " <link rel=\"stylesheet\" type=\"text/css\" href=\"")
|
||||||
|
escapeHTML(w, []byte(r.CSS))
|
||||||
|
io.WriteString(w, "\"")
|
||||||
|
io.WriteString(w, ending)
|
||||||
|
io.WriteString(w, ">\n")
|
||||||
|
}
|
||||||
|
if r.Icon != "" {
|
||||||
|
io.WriteString(w, " <link rel=\"icon\" type=\"image/x-icon\" href=\"")
|
||||||
|
escapeHTML(w, []byte(r.Icon))
|
||||||
|
io.WriteString(w, "\"")
|
||||||
|
io.WriteString(w, ending)
|
||||||
|
io.WriteString(w, ">\n")
|
||||||
|
}
|
||||||
|
io.WriteString(w, "</head>\n")
|
||||||
|
io.WriteString(w, "<body>\n\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) {
|
||||||
|
buf := bytes.Buffer{}
|
||||||
|
|
||||||
|
inHeading := false
|
||||||
|
tocLevel := 0
|
||||||
|
headingCount := 0
|
||||||
|
|
||||||
|
ast.Walk(func(node *Node, entering bool) WalkStatus {
|
||||||
|
if node.Type == Heading && !node.HeadingData.IsTitleblock {
|
||||||
|
inHeading = entering
|
||||||
|
if entering {
|
||||||
|
node.HeadingID = fmt.Sprintf("toc_%d", headingCount)
|
||||||
|
if node.Level == tocLevel {
|
||||||
|
buf.WriteString("</li>\n\n<li>")
|
||||||
|
} else if node.Level < tocLevel {
|
||||||
|
for node.Level < tocLevel {
|
||||||
|
tocLevel--
|
||||||
|
buf.WriteString("</li>\n</ul>")
|
||||||
|
}
|
||||||
|
buf.WriteString("</li>\n\n<li>")
|
||||||
|
} else {
|
||||||
|
for node.Level > tocLevel {
|
||||||
|
tocLevel++
|
||||||
|
buf.WriteString("\n<ul>\n<li>")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(&buf, `<a href="#toc_%d">`, headingCount)
|
||||||
|
headingCount++
|
||||||
|
} else {
|
||||||
|
buf.WriteString("</a>")
|
||||||
|
}
|
||||||
|
return GoToNext
|
||||||
|
}
|
||||||
|
|
||||||
|
if inHeading {
|
||||||
|
return r.RenderNode(&buf, node, entering)
|
||||||
|
}
|
||||||
|
|
||||||
|
return GoToNext
|
||||||
|
})
|
||||||
|
|
||||||
|
for ; tocLevel > 0; tocLevel-- {
|
||||||
|
buf.WriteString("</li>\n</ul>")
|
||||||
|
}
|
||||||
|
|
||||||
|
if buf.Len() > 0 {
|
||||||
|
io.WriteString(w, "<nav>\n")
|
||||||
|
w.Write(buf.Bytes())
|
||||||
|
io.WriteString(w, "\n\n</nav>\n")
|
||||||
|
}
|
||||||
|
r.lastOutputLen = buf.Len()
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -1,103 +1,93 @@
|
|||||||
//
|
|
||||||
// Blackfriday Markdown Processor
|
// Blackfriday Markdown Processor
|
||||||
// Available at http://github.com/russross/blackfriday
|
// Available at http://github.com/russross/blackfriday
|
||||||
//
|
//
|
||||||
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||||||
// Distributed under the Simplified BSD License.
|
// Distributed under the Simplified BSD License.
|
||||||
// See README.md for details.
|
// See README.md for details.
|
||||||
//
|
|
||||||
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Markdown parsing and processing
|
|
||||||
//
|
|
||||||
//
|
|
||||||
|
|
||||||
package blackfriday
|
package blackfriday
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
const VERSION = "1.5"
|
//
|
||||||
|
// Markdown parsing and processing
|
||||||
|
//
|
||||||
|
|
||||||
|
// Version string of the package. Appears in the rendered document when
|
||||||
|
// CompletePage flag is on.
|
||||||
|
const Version = "2.0"
|
||||||
|
|
||||||
|
// Extensions is a bitwise or'ed collection of enabled Blackfriday's
|
||||||
|
// extensions.
|
||||||
|
type Extensions int
|
||||||
|
|
||||||
// These are the supported markdown parsing extensions.
|
// These are the supported markdown parsing extensions.
|
||||||
// OR these values together to select multiple extensions.
|
// OR these values together to select multiple extensions.
|
||||||
const (
|
const (
|
||||||
EXTENSION_NO_INTRA_EMPHASIS = 1 << iota // ignore emphasis markers inside words
|
NoExtensions Extensions = 0
|
||||||
EXTENSION_TABLES // render tables
|
NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words
|
||||||
EXTENSION_FENCED_CODE // render fenced code blocks
|
Tables // Render tables
|
||||||
EXTENSION_AUTOLINK // detect embedded URLs that are not explicitly marked
|
FencedCode // Render fenced code blocks
|
||||||
EXTENSION_STRIKETHROUGH // strikethrough text using ~~test~~
|
Autolink // Detect embedded URLs that are not explicitly marked
|
||||||
EXTENSION_LAX_HTML_BLOCKS // loosen up HTML block parsing rules
|
Strikethrough // Strikethrough text using ~~test~~
|
||||||
EXTENSION_SPACE_HEADERS // be strict about prefix header rules
|
LaxHTMLBlocks // Loosen up HTML block parsing rules
|
||||||
EXTENSION_HARD_LINE_BREAK // translate newlines into line breaks
|
SpaceHeadings // Be strict about prefix heading rules
|
||||||
EXTENSION_TAB_SIZE_EIGHT // expand tabs to eight spaces instead of four
|
HardLineBreak // Translate newlines into line breaks
|
||||||
EXTENSION_FOOTNOTES // Pandoc-style footnotes
|
TabSizeEight // Expand tabs to eight spaces instead of four
|
||||||
EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
|
Footnotes // Pandoc-style footnotes
|
||||||
EXTENSION_HEADER_IDS // specify header IDs with {#id}
|
NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
|
||||||
EXTENSION_TITLEBLOCK // Titleblock ala pandoc
|
HeadingIDs // specify heading IDs with {#id}
|
||||||
EXTENSION_AUTO_HEADER_IDS // Create the header ID from the text
|
Titleblock // Titleblock ala pandoc
|
||||||
EXTENSION_BACKSLASH_LINE_BREAK // translate trailing backslashes into line breaks
|
AutoHeadingIDs // Create the heading ID from the text
|
||||||
EXTENSION_DEFINITION_LISTS // render definition lists
|
BackslashLineBreak // Translate trailing backslashes into line breaks
|
||||||
EXTENSION_JOIN_LINES // delete newline and join lines
|
DefinitionLists // Render definition lists
|
||||||
|
|
||||||
commonHtmlFlags = 0 |
|
CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants |
|
||||||
HTML_USE_XHTML |
|
SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes
|
||||||
HTML_USE_SMARTYPANTS |
|
|
||||||
HTML_SMARTYPANTS_FRACTIONS |
|
|
||||||
HTML_SMARTYPANTS_DASHES |
|
|
||||||
HTML_SMARTYPANTS_LATEX_DASHES
|
|
||||||
|
|
||||||
commonExtensions = 0 |
|
CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
|
||||||
EXTENSION_NO_INTRA_EMPHASIS |
|
Autolink | Strikethrough | SpaceHeadings | HeadingIDs |
|
||||||
EXTENSION_TABLES |
|
BackslashLineBreak | DefinitionLists
|
||||||
EXTENSION_FENCED_CODE |
|
|
||||||
EXTENSION_AUTOLINK |
|
|
||||||
EXTENSION_STRIKETHROUGH |
|
|
||||||
EXTENSION_SPACE_HEADERS |
|
|
||||||
EXTENSION_HEADER_IDS |
|
|
||||||
EXTENSION_BACKSLASH_LINE_BREAK |
|
|
||||||
EXTENSION_DEFINITION_LISTS
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// These are the possible flag values for the link renderer.
|
// ListType contains bitwise or'ed flags for list and list item objects.
|
||||||
// Only a single one of these values will be used; they are not ORed together.
|
type ListType int
|
||||||
// These are mostly of interest if you are writing a new output format.
|
|
||||||
const (
|
|
||||||
LINK_TYPE_NOT_AUTOLINK = iota
|
|
||||||
LINK_TYPE_NORMAL
|
|
||||||
LINK_TYPE_EMAIL
|
|
||||||
)
|
|
||||||
|
|
||||||
// These are the possible flag values for the ListItem renderer.
|
// These are the possible flag values for the ListItem renderer.
|
||||||
// Multiple flag values may be ORed together.
|
// Multiple flag values may be ORed together.
|
||||||
// These are mostly of interest if you are writing a new output format.
|
// These are mostly of interest if you are writing a new output format.
|
||||||
const (
|
const (
|
||||||
LIST_TYPE_ORDERED = 1 << iota
|
ListTypeOrdered ListType = 1 << iota
|
||||||
LIST_TYPE_DEFINITION
|
ListTypeDefinition
|
||||||
LIST_TYPE_TERM
|
ListTypeTerm
|
||||||
LIST_ITEM_CONTAINS_BLOCK
|
|
||||||
LIST_ITEM_BEGINNING_OF_LIST
|
ListItemContainsBlock
|
||||||
LIST_ITEM_END_OF_LIST
|
ListItemBeginningOfList // TODO: figure out if this is of any use now
|
||||||
|
ListItemEndOfList
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// CellAlignFlags holds a type of alignment in a table cell.
|
||||||
|
type CellAlignFlags int
|
||||||
|
|
||||||
// These are the possible flag values for the table cell renderer.
|
// These are the possible flag values for the table cell renderer.
|
||||||
// Only a single one of these values will be used; they are not ORed together.
|
// Only a single one of these values will be used; they are not ORed together.
|
||||||
// These are mostly of interest if you are writing a new output format.
|
// These are mostly of interest if you are writing a new output format.
|
||||||
const (
|
const (
|
||||||
TABLE_ALIGNMENT_LEFT = 1 << iota
|
TableAlignmentLeft CellAlignFlags = 1 << iota
|
||||||
TABLE_ALIGNMENT_RIGHT
|
TableAlignmentRight
|
||||||
TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
|
TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight)
|
||||||
)
|
)
|
||||||
|
|
||||||
// The size of a tab stop.
|
// The size of a tab stop.
|
||||||
const (
|
const (
|
||||||
TAB_SIZE_DEFAULT = 4
|
TabSizeDefault = 4
|
||||||
TAB_SIZE_EIGHT = 8
|
TabSizeDouble = 8
|
||||||
)
|
)
|
||||||
|
|
||||||
// blockTags is a set of tags that are recognized as HTML block tags.
|
// blockTags is a set of tags that are recognized as HTML block tags.
|
||||||
@ -145,86 +135,66 @@ var blockTags = map[string]struct{}{
|
|||||||
"video": {},
|
"video": {},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Renderer is the rendering interface.
|
// Renderer is the rendering interface. This is mostly of interest if you are
|
||||||
// This is mostly of interest if you are implementing a new rendering format.
|
// implementing a new rendering format.
|
||||||
//
|
//
|
||||||
// When a byte slice is provided, it contains the (rendered) contents of the
|
// Only an HTML implementation is provided in this repository, see the README
|
||||||
// element.
|
// for external implementations.
|
||||||
//
|
|
||||||
// When a callback is provided instead, it will write the contents of the
|
|
||||||
// respective element directly to the output buffer and return true on success.
|
|
||||||
// If the callback returns false, the rendering function should reset the
|
|
||||||
// output buffer as though it had never been called.
|
|
||||||
//
|
|
||||||
// Currently Html and Latex implementations are provided
|
|
||||||
type Renderer interface {
|
type Renderer interface {
|
||||||
// block-level callbacks
|
// RenderNode is the main rendering method. It will be called once for
|
||||||
BlockCode(out *bytes.Buffer, text []byte, infoString string)
|
// every leaf node and twice for every non-leaf node (first with
|
||||||
BlockQuote(out *bytes.Buffer, text []byte)
|
// entering=true, then with entering=false). The method should write its
|
||||||
BlockHtml(out *bytes.Buffer, text []byte)
|
// rendition of the node to the supplied writer w.
|
||||||
Header(out *bytes.Buffer, text func() bool, level int, id string)
|
RenderNode(w io.Writer, node *Node, entering bool) WalkStatus
|
||||||
HRule(out *bytes.Buffer)
|
|
||||||
List(out *bytes.Buffer, text func() bool, flags int)
|
|
||||||
ListItem(out *bytes.Buffer, text []byte, flags int)
|
|
||||||
Paragraph(out *bytes.Buffer, text func() bool)
|
|
||||||
Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
|
|
||||||
TableRow(out *bytes.Buffer, text []byte)
|
|
||||||
TableHeaderCell(out *bytes.Buffer, text []byte, flags int)
|
|
||||||
TableCell(out *bytes.Buffer, text []byte, flags int)
|
|
||||||
Footnotes(out *bytes.Buffer, text func() bool)
|
|
||||||
FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)
|
|
||||||
TitleBlock(out *bytes.Buffer, text []byte)
|
|
||||||
|
|
||||||
// Span-level callbacks
|
// RenderHeader is a method that allows the renderer to produce some
|
||||||
AutoLink(out *bytes.Buffer, link []byte, kind int)
|
// content preceding the main body of the output document. The header is
|
||||||
CodeSpan(out *bytes.Buffer, text []byte)
|
// understood in the broad sense here. For example, the default HTML
|
||||||
DoubleEmphasis(out *bytes.Buffer, text []byte)
|
// renderer will write not only the HTML document preamble, but also the
|
||||||
Emphasis(out *bytes.Buffer, text []byte)
|
// table of contents if it was requested.
|
||||||
Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
|
//
|
||||||
LineBreak(out *bytes.Buffer)
|
// The method will be passed an entire document tree, in case a particular
|
||||||
Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
|
// implementation needs to inspect it to produce output.
|
||||||
RawHtmlTag(out *bytes.Buffer, tag []byte)
|
//
|
||||||
TripleEmphasis(out *bytes.Buffer, text []byte)
|
// The output should be written to the supplied writer w. If your
|
||||||
StrikeThrough(out *bytes.Buffer, text []byte)
|
// implementation has no header to write, supply an empty implementation.
|
||||||
FootnoteRef(out *bytes.Buffer, ref []byte, id int)
|
RenderHeader(w io.Writer, ast *Node)
|
||||||
|
|
||||||
// Low-level callbacks
|
// RenderFooter is a symmetric counterpart of RenderHeader.
|
||||||
Entity(out *bytes.Buffer, entity []byte)
|
RenderFooter(w io.Writer, ast *Node)
|
||||||
NormalText(out *bytes.Buffer, text []byte)
|
|
||||||
|
|
||||||
// Header and footer
|
|
||||||
DocumentHeader(out *bytes.Buffer)
|
|
||||||
DocumentFooter(out *bytes.Buffer)
|
|
||||||
|
|
||||||
GetFlags() int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Callback functions for inline parsing. One such function is defined
|
// Callback functions for inline parsing. One such function is defined
|
||||||
// for each character that triggers a response when parsing inline data.
|
// for each character that triggers a response when parsing inline data.
|
||||||
type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
|
type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node)
|
||||||
|
|
||||||
// Parser holds runtime state used by the parser.
|
// Markdown is a type that holds extensions and the runtime state used by
|
||||||
// This is constructed by the Markdown function.
|
// Parse, and the renderer. You can not use it directly, construct it with New.
|
||||||
type parser struct {
|
type Markdown struct {
|
||||||
r Renderer
|
renderer Renderer
|
||||||
refOverride ReferenceOverrideFunc
|
referenceOverride ReferenceOverrideFunc
|
||||||
refs map[string]*reference
|
refs map[string]*reference
|
||||||
inlineCallback [256]inlineParser
|
inlineCallback [256]inlineParser
|
||||||
flags int
|
extensions Extensions
|
||||||
nesting int
|
nesting int
|
||||||
maxNesting int
|
maxNesting int
|
||||||
insideLink bool
|
insideLink bool
|
||||||
|
|
||||||
// Footnotes need to be ordered as well as available to quickly check for
|
// Footnotes need to be ordered as well as available to quickly check for
|
||||||
// presence. If a ref is also a footnote, it's stored both in refs and here
|
// presence. If a ref is also a footnote, it's stored both in refs and here
|
||||||
// in notes. Slice is nil if footnotes not enabled.
|
// in notes. Slice is nil if footnotes not enabled.
|
||||||
notes []*reference
|
notes []*reference
|
||||||
notesRecord map[string]struct{}
|
|
||||||
|
doc *Node
|
||||||
|
tip *Node // = doc
|
||||||
|
oldTip *Node
|
||||||
|
lastMatchedContainer *Node // = doc
|
||||||
|
allClosed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) getRef(refid string) (ref *reference, found bool) {
|
func (p *Markdown) getRef(refid string) (ref *reference, found bool) {
|
||||||
if p.refOverride != nil {
|
if p.referenceOverride != nil {
|
||||||
r, overridden := p.refOverride(refid)
|
r, overridden := p.referenceOverride(refid)
|
||||||
if overridden {
|
if overridden {
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return nil, false
|
return nil, false
|
||||||
@ -232,7 +202,7 @@ func (p *parser) getRef(refid string) (ref *reference, found bool) {
|
|||||||
return &reference{
|
return &reference{
|
||||||
link: []byte(r.Link),
|
link: []byte(r.Link),
|
||||||
title: []byte(r.Title),
|
title: []byte(r.Title),
|
||||||
noteId: 0,
|
noteID: 0,
|
||||||
hasBlock: false,
|
hasBlock: false,
|
||||||
text: []byte(r.Text)}, true
|
text: []byte(r.Text)}, true
|
||||||
}
|
}
|
||||||
@ -242,9 +212,34 @@ func (p *parser) getRef(refid string) (ref *reference, found bool) {
|
|||||||
return ref, found
|
return ref, found
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) isFootnote(ref *reference) bool {
|
func (p *Markdown) finalize(block *Node) {
|
||||||
_, ok := p.notesRecord[string(ref.link)]
|
above := block.Parent
|
||||||
return ok
|
block.open = false
|
||||||
|
p.tip = above
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Markdown) addChild(node NodeType, offset uint32) *Node {
|
||||||
|
return p.addExistingChild(NewNode(node), offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node {
|
||||||
|
for !p.tip.canContain(node.Type) {
|
||||||
|
p.finalize(p.tip)
|
||||||
|
}
|
||||||
|
p.tip.AppendChild(node)
|
||||||
|
p.tip = node
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Markdown) closeUnmatchedBlocks() {
|
||||||
|
if !p.allClosed {
|
||||||
|
for p.oldTip != p.lastMatchedContainer {
|
||||||
|
parent := p.oldTip.Parent
|
||||||
|
p.finalize(p.oldTip)
|
||||||
|
p.oldTip = parent
|
||||||
|
}
|
||||||
|
p.allClosed = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -271,102 +266,27 @@ type Reference struct {
|
|||||||
// See the documentation in Options for more details on use-case.
|
// See the documentation in Options for more details on use-case.
|
||||||
type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
|
type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
|
||||||
|
|
||||||
// Options represents configurable overrides and callbacks (in addition to the
|
// New constructs a Markdown processor. You can use the same With* functions as
|
||||||
// extension flag set) for configuring a Markdown parse.
|
// for Run() to customize parser's behavior and the renderer.
|
||||||
type Options struct {
|
func New(opts ...Option) *Markdown {
|
||||||
// Extensions is a flag set of bit-wise ORed extension bits. See the
|
var p Markdown
|
||||||
// EXTENSION_* flags defined in this package.
|
for _, opt := range opts {
|
||||||
Extensions int
|
opt(&p)
|
||||||
|
|
||||||
// ReferenceOverride is an optional function callback that is called every
|
|
||||||
// time a reference is resolved.
|
|
||||||
//
|
|
||||||
// In Markdown, the link reference syntax can be made to resolve a link to
|
|
||||||
// a reference instead of an inline URL, in one of the following ways:
|
|
||||||
//
|
|
||||||
// * [link text][refid]
|
|
||||||
// * [refid][]
|
|
||||||
//
|
|
||||||
// Usually, the refid is defined at the bottom of the Markdown document. If
|
|
||||||
// this override function is provided, the refid is passed to the override
|
|
||||||
// function first, before consulting the defined refids at the bottom. If
|
|
||||||
// the override function indicates an override did not occur, the refids at
|
|
||||||
// the bottom will be used to fill in the link details.
|
|
||||||
ReferenceOverride ReferenceOverrideFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarkdownBasic is a convenience function for simple rendering.
|
|
||||||
// It processes markdown input with no extensions enabled.
|
|
||||||
func MarkdownBasic(input []byte) []byte {
|
|
||||||
// set up the HTML renderer
|
|
||||||
htmlFlags := HTML_USE_XHTML
|
|
||||||
renderer := HtmlRenderer(htmlFlags, "", "")
|
|
||||||
|
|
||||||
// set up the parser
|
|
||||||
return MarkdownOptions(input, renderer, Options{Extensions: 0})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call Markdown with most useful extensions enabled
|
|
||||||
// MarkdownCommon is a convenience function for simple rendering.
|
|
||||||
// It processes markdown input with common extensions enabled, including:
|
|
||||||
//
|
|
||||||
// * Smartypants processing with smart fractions and LaTeX dashes
|
|
||||||
//
|
|
||||||
// * Intra-word emphasis suppression
|
|
||||||
//
|
|
||||||
// * Tables
|
|
||||||
//
|
|
||||||
// * Fenced code blocks
|
|
||||||
//
|
|
||||||
// * Autolinking
|
|
||||||
//
|
|
||||||
// * Strikethrough support
|
|
||||||
//
|
|
||||||
// * Strict header parsing
|
|
||||||
//
|
|
||||||
// * Custom Header IDs
|
|
||||||
func MarkdownCommon(input []byte) []byte {
|
|
||||||
// set up the HTML renderer
|
|
||||||
renderer := HtmlRenderer(commonHtmlFlags, "", "")
|
|
||||||
return MarkdownOptions(input, renderer, Options{
|
|
||||||
Extensions: commonExtensions})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Markdown is the main rendering function.
|
|
||||||
// It parses and renders a block of markdown-encoded text.
|
|
||||||
// The supplied Renderer is used to format the output, and extensions dictates
|
|
||||||
// which non-standard extensions are enabled.
|
|
||||||
//
|
|
||||||
// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
|
|
||||||
// LatexRenderer, respectively.
|
|
||||||
func Markdown(input []byte, renderer Renderer, extensions int) []byte {
|
|
||||||
return MarkdownOptions(input, renderer, Options{
|
|
||||||
Extensions: extensions})
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarkdownOptions is just like Markdown but takes additional options through
|
|
||||||
// the Options struct.
|
|
||||||
func MarkdownOptions(input []byte, renderer Renderer, opts Options) []byte {
|
|
||||||
// no point in parsing if we can't render
|
|
||||||
if renderer == nil {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
extensions := opts.Extensions
|
|
||||||
|
|
||||||
// fill in the render structure
|
|
||||||
p := new(parser)
|
|
||||||
p.r = renderer
|
|
||||||
p.flags = extensions
|
|
||||||
p.refOverride = opts.ReferenceOverride
|
|
||||||
p.refs = make(map[string]*reference)
|
p.refs = make(map[string]*reference)
|
||||||
p.maxNesting = 16
|
p.maxNesting = 16
|
||||||
p.insideLink = false
|
p.insideLink = false
|
||||||
|
docNode := NewNode(Document)
|
||||||
|
p.doc = docNode
|
||||||
|
p.tip = docNode
|
||||||
|
p.oldTip = docNode
|
||||||
|
p.lastMatchedContainer = docNode
|
||||||
|
p.allClosed = true
|
||||||
// register inline parsers
|
// register inline parsers
|
||||||
|
p.inlineCallback[' '] = maybeLineBreak
|
||||||
p.inlineCallback['*'] = emphasis
|
p.inlineCallback['*'] = emphasis
|
||||||
p.inlineCallback['_'] = emphasis
|
p.inlineCallback['_'] = emphasis
|
||||||
if extensions&EXTENSION_STRIKETHROUGH != 0 {
|
if p.extensions&Strikethrough != 0 {
|
||||||
p.inlineCallback['~'] = emphasis
|
p.inlineCallback['~'] = emphasis
|
||||||
}
|
}
|
||||||
p.inlineCallback['`'] = codeSpan
|
p.inlineCallback['`'] = codeSpan
|
||||||
@ -375,116 +295,166 @@ func MarkdownOptions(input []byte, renderer Renderer, opts Options) []byte {
|
|||||||
p.inlineCallback['<'] = leftAngle
|
p.inlineCallback['<'] = leftAngle
|
||||||
p.inlineCallback['\\'] = escape
|
p.inlineCallback['\\'] = escape
|
||||||
p.inlineCallback['&'] = entity
|
p.inlineCallback['&'] = entity
|
||||||
|
p.inlineCallback['!'] = maybeImage
|
||||||
if extensions&EXTENSION_AUTOLINK != 0 {
|
p.inlineCallback['^'] = maybeInlineFootnote
|
||||||
p.inlineCallback[':'] = autoLink
|
if p.extensions&Autolink != 0 {
|
||||||
|
p.inlineCallback['h'] = maybeAutoLink
|
||||||
|
p.inlineCallback['m'] = maybeAutoLink
|
||||||
|
p.inlineCallback['f'] = maybeAutoLink
|
||||||
|
p.inlineCallback['H'] = maybeAutoLink
|
||||||
|
p.inlineCallback['M'] = maybeAutoLink
|
||||||
|
p.inlineCallback['F'] = maybeAutoLink
|
||||||
}
|
}
|
||||||
|
if p.extensions&Footnotes != 0 {
|
||||||
if extensions&EXTENSION_FOOTNOTES != 0 {
|
|
||||||
p.notes = make([]*reference, 0)
|
p.notes = make([]*reference, 0)
|
||||||
p.notesRecord = make(map[string]struct{})
|
|
||||||
}
|
}
|
||||||
|
return &p
|
||||||
first := firstPass(p, input)
|
|
||||||
second := secondPass(p, first)
|
|
||||||
return second
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// first pass:
|
// Option customizes the Markdown processor's default behavior.
|
||||||
// - normalize newlines
|
type Option func(*Markdown)
|
||||||
// - extract references (outside of fenced code blocks)
|
|
||||||
// - expand tabs (outside of fenced code blocks)
|
// WithRenderer allows you to override the default renderer.
|
||||||
// - copy everything else
|
func WithRenderer(r Renderer) Option {
|
||||||
func firstPass(p *parser, input []byte) []byte {
|
return func(p *Markdown) {
|
||||||
var out bytes.Buffer
|
p.renderer = r
|
||||||
tabSize := TAB_SIZE_DEFAULT
|
|
||||||
if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
|
|
||||||
tabSize = TAB_SIZE_EIGHT
|
|
||||||
}
|
}
|
||||||
beg := 0
|
|
||||||
lastFencedCodeBlockEnd := 0
|
|
||||||
for beg < len(input) {
|
|
||||||
// Find end of this line, then process the line.
|
|
||||||
end := beg
|
|
||||||
for end < len(input) && input[end] != '\n' && input[end] != '\r' {
|
|
||||||
end++
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
|
||||||
// track fenced code block boundaries to suppress tab expansion
|
|
||||||
// and reference extraction inside them:
|
|
||||||
if beg >= lastFencedCodeBlockEnd {
|
|
||||||
if i := p.fencedCodeBlock(&out, input[beg:], false); i > 0 {
|
|
||||||
lastFencedCodeBlockEnd = beg + i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// add the line body if present
|
|
||||||
if end > beg {
|
|
||||||
if end < lastFencedCodeBlockEnd { // Do not expand tabs while inside fenced code blocks.
|
|
||||||
out.Write(input[beg:end])
|
|
||||||
} else if refEnd := isReference(p, input[beg:], tabSize); refEnd > 0 {
|
|
||||||
beg += refEnd
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
expandTabs(&out, input[beg:end], tabSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if end < len(input) && input[end] == '\r' {
|
|
||||||
end++
|
|
||||||
}
|
|
||||||
if end < len(input) && input[end] == '\n' {
|
|
||||||
end++
|
|
||||||
}
|
|
||||||
out.WriteByte('\n')
|
|
||||||
|
|
||||||
beg = end
|
|
||||||
}
|
|
||||||
|
|
||||||
// empty input?
|
|
||||||
if out.Len() == 0 {
|
|
||||||
out.WriteByte('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
return out.Bytes()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// second pass: actual rendering
|
// WithExtensions allows you to pick some of the many extensions provided by
|
||||||
func secondPass(p *parser, input []byte) []byte {
|
// Blackfriday. You can bitwise OR them.
|
||||||
var output bytes.Buffer
|
func WithExtensions(e Extensions) Option {
|
||||||
|
return func(p *Markdown) {
|
||||||
|
p.extensions = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
p.r.DocumentHeader(&output)
|
// WithNoExtensions turns off all extensions and custom behavior.
|
||||||
p.block(&output, input)
|
func WithNoExtensions() Option {
|
||||||
|
return func(p *Markdown) {
|
||||||
if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
|
p.extensions = NoExtensions
|
||||||
p.r.Footnotes(&output, func() bool {
|
p.renderer = NewHTMLRenderer(HTMLRendererParameters{
|
||||||
flags := LIST_ITEM_BEGINNING_OF_LIST
|
Flags: HTMLFlagsNone,
|
||||||
for i := 0; i < len(p.notes); i += 1 {
|
|
||||||
ref := p.notes[i]
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if ref.hasBlock {
|
|
||||||
flags |= LIST_ITEM_CONTAINS_BLOCK
|
|
||||||
p.block(&buf, ref.title)
|
|
||||||
} else {
|
|
||||||
p.inline(&buf, ref.title)
|
|
||||||
}
|
|
||||||
p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags)
|
|
||||||
flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
p.r.DocumentFooter(&output)
|
// WithRefOverride sets an optional function callback that is called every
|
||||||
|
// time a reference is resolved.
|
||||||
if p.nesting != 0 {
|
//
|
||||||
panic("Nesting level did not end at zero")
|
// In Markdown, the link reference syntax can be made to resolve a link to
|
||||||
|
// a reference instead of an inline URL, in one of the following ways:
|
||||||
|
//
|
||||||
|
// * [link text][refid]
|
||||||
|
// * [refid][]
|
||||||
|
//
|
||||||
|
// Usually, the refid is defined at the bottom of the Markdown document. If
|
||||||
|
// this override function is provided, the refid is passed to the override
|
||||||
|
// function first, before consulting the defined refids at the bottom. If
|
||||||
|
// the override function indicates an override did not occur, the refids at
|
||||||
|
// the bottom will be used to fill in the link details.
|
||||||
|
func WithRefOverride(o ReferenceOverrideFunc) Option {
|
||||||
|
return func(p *Markdown) {
|
||||||
|
p.referenceOverride = o
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return output.Bytes()
|
// Run is the main entry point to Blackfriday. It parses and renders a
|
||||||
|
// block of markdown-encoded text.
|
||||||
|
//
|
||||||
|
// The simplest invocation of Run takes one argument, input:
|
||||||
|
// output := Run(input)
|
||||||
|
// This will parse the input with CommonExtensions enabled and render it with
|
||||||
|
// the default HTMLRenderer (with CommonHTMLFlags).
|
||||||
|
//
|
||||||
|
// Variadic arguments opts can customize the default behavior. Since Markdown
|
||||||
|
// type does not contain exported fields, you can not use it directly. Instead,
|
||||||
|
// use the With* functions. For example, this will call the most basic
|
||||||
|
// functionality, with no extensions:
|
||||||
|
// output := Run(input, WithNoExtensions())
|
||||||
|
//
|
||||||
|
// You can use any number of With* arguments, even contradicting ones. They
|
||||||
|
// will be applied in order of appearance and the latter will override the
|
||||||
|
// former:
|
||||||
|
// output := Run(input, WithNoExtensions(), WithExtensions(exts),
|
||||||
|
// WithRenderer(yourRenderer))
|
||||||
|
func Run(input []byte, opts ...Option) []byte {
|
||||||
|
r := NewHTMLRenderer(HTMLRendererParameters{
|
||||||
|
Flags: CommonHTMLFlags,
|
||||||
|
})
|
||||||
|
optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)}
|
||||||
|
optList = append(optList, opts...)
|
||||||
|
parser := New(optList...)
|
||||||
|
ast := parser.Parse(input)
|
||||||
|
var buf bytes.Buffer
|
||||||
|
parser.renderer.RenderHeader(&buf, ast)
|
||||||
|
ast.Walk(func(node *Node, entering bool) WalkStatus {
|
||||||
|
return parser.renderer.RenderNode(&buf, node, entering)
|
||||||
|
})
|
||||||
|
parser.renderer.RenderFooter(&buf, ast)
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse is an entry point to the parsing part of Blackfriday. It takes an
|
||||||
|
// input markdown document and produces a syntax tree for its contents. This
|
||||||
|
// tree can then be rendered with a default or custom renderer, or
|
||||||
|
// analyzed/transformed by the caller to whatever non-standard needs they have.
|
||||||
|
// The return value is the root node of the syntax tree.
|
||||||
|
func (p *Markdown) Parse(input []byte) *Node {
|
||||||
|
p.block(input)
|
||||||
|
// Walk the tree and finish up some of unfinished blocks
|
||||||
|
for p.tip != nil {
|
||||||
|
p.finalize(p.tip)
|
||||||
|
}
|
||||||
|
// Walk the tree again and process inline markdown in each block
|
||||||
|
p.doc.Walk(func(node *Node, entering bool) WalkStatus {
|
||||||
|
if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell {
|
||||||
|
p.inline(node, node.content)
|
||||||
|
node.content = nil
|
||||||
|
}
|
||||||
|
return GoToNext
|
||||||
|
})
|
||||||
|
p.parseRefsToAST()
|
||||||
|
return p.doc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Markdown) parseRefsToAST() {
|
||||||
|
if p.extensions&Footnotes == 0 || len(p.notes) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.tip = p.doc
|
||||||
|
block := p.addBlock(List, nil)
|
||||||
|
block.IsFootnotesList = true
|
||||||
|
block.ListFlags = ListTypeOrdered
|
||||||
|
flags := ListItemBeginningOfList
|
||||||
|
// Note: this loop is intentionally explicit, not range-form. This is
|
||||||
|
// because the body of the loop will append nested footnotes to p.notes and
|
||||||
|
// we need to process those late additions. Range form would only walk over
|
||||||
|
// the fixed initial set.
|
||||||
|
for i := 0; i < len(p.notes); i++ {
|
||||||
|
ref := p.notes[i]
|
||||||
|
p.addExistingChild(ref.footnote, 0)
|
||||||
|
block := ref.footnote
|
||||||
|
block.ListFlags = flags | ListTypeOrdered
|
||||||
|
block.RefLink = ref.link
|
||||||
|
if ref.hasBlock {
|
||||||
|
flags |= ListItemContainsBlock
|
||||||
|
p.block(ref.title)
|
||||||
|
} else {
|
||||||
|
p.inline(block, ref.title)
|
||||||
|
}
|
||||||
|
flags &^= ListItemBeginningOfList | ListItemContainsBlock
|
||||||
|
}
|
||||||
|
above := block.Parent
|
||||||
|
finalizeList(block)
|
||||||
|
p.tip = above
|
||||||
|
block.Walk(func(node *Node, entering bool) WalkStatus {
|
||||||
|
if node.Type == Paragraph || node.Type == Heading {
|
||||||
|
p.inline(node, node.content)
|
||||||
|
node.content = nil
|
||||||
|
}
|
||||||
|
return GoToNext
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -510,24 +480,62 @@ func secondPass(p *parser, input []byte) []byte {
|
|||||||
// [^note]: This is the explanation.
|
// [^note]: This is the explanation.
|
||||||
//
|
//
|
||||||
// Footnotes should be placed at the end of the document in an ordered list.
|
// Footnotes should be placed at the end of the document in an ordered list.
|
||||||
// Inline footnotes such as:
|
// Finally, there are inline footnotes such as:
|
||||||
//
|
//
|
||||||
// Inline footnotes^[Not supported.] also exist.
|
// Inline footnotes^[Also supported.] provide a quick inline explanation,
|
||||||
|
// but are rendered at the bottom of the document.
|
||||||
//
|
//
|
||||||
// are not yet supported.
|
|
||||||
|
|
||||||
// References are parsed and stored in this struct.
|
// reference holds all information necessary for a reference-style links or
|
||||||
|
// footnotes.
|
||||||
|
//
|
||||||
|
// Consider this markdown with reference-style links:
|
||||||
|
//
|
||||||
|
// [link][ref]
|
||||||
|
//
|
||||||
|
// [ref]: /url/ "tooltip title"
|
||||||
|
//
|
||||||
|
// It will be ultimately converted to this HTML:
|
||||||
|
//
|
||||||
|
// <p><a href=\"/url/\" title=\"title\">link</a></p>
|
||||||
|
//
|
||||||
|
// And a reference structure will be populated as follows:
|
||||||
|
//
|
||||||
|
// p.refs["ref"] = &reference{
|
||||||
|
// link: "/url/",
|
||||||
|
// title: "tooltip title",
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Alternatively, reference can contain information about a footnote. Consider
|
||||||
|
// this markdown:
|
||||||
|
//
|
||||||
|
// Text needing a footnote.[^a]
|
||||||
|
//
|
||||||
|
// [^a]: This is the note
|
||||||
|
//
|
||||||
|
// A reference structure will be populated as follows:
|
||||||
|
//
|
||||||
|
// p.refs["a"] = &reference{
|
||||||
|
// link: "a",
|
||||||
|
// title: "This is the note",
|
||||||
|
// noteID: <some positive int>,
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// TODO: As you can see, it begs for splitting into two dedicated structures
|
||||||
|
// for refs and for footnotes.
|
||||||
type reference struct {
|
type reference struct {
|
||||||
link []byte
|
link []byte
|
||||||
title []byte
|
title []byte
|
||||||
noteId int // 0 if not a footnote ref
|
noteID int // 0 if not a footnote ref
|
||||||
hasBlock bool
|
hasBlock bool
|
||||||
text []byte
|
footnote *Node // a link to the Item node within a list of footnotes
|
||||||
|
|
||||||
|
text []byte // only gets populated by refOverride feature with Reference.Text
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *reference) String() string {
|
func (r *reference) String() string {
|
||||||
return fmt.Sprintf("{link: %q, title: %q, text: %q, noteId: %d, hasBlock: %v}",
|
return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}",
|
||||||
r.link, r.title, r.text, r.noteId, r.hasBlock)
|
r.link, r.title, r.text, r.noteID, r.hasBlock)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check whether or not data starts with a reference link.
|
// Check whether or not data starts with a reference link.
|
||||||
@ -535,7 +543,7 @@ func (r *reference) String() string {
|
|||||||
// (in the render struct).
|
// (in the render struct).
|
||||||
// Returns the number of bytes to skip to move past it,
|
// Returns the number of bytes to skip to move past it,
|
||||||
// or zero if the first line is not a reference.
|
// or zero if the first line is not a reference.
|
||||||
func isReference(p *parser, data []byte, tabSize int) int {
|
func isReference(p *Markdown, data []byte, tabSize int) int {
|
||||||
// up to 3 optional leading spaces
|
// up to 3 optional leading spaces
|
||||||
if len(data) < 4 {
|
if len(data) < 4 {
|
||||||
return 0
|
return 0
|
||||||
@ -545,18 +553,18 @@ func isReference(p *parser, data []byte, tabSize int) int {
|
|||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
|
|
||||||
noteId := 0
|
noteID := 0
|
||||||
|
|
||||||
// id part: anything but a newline between brackets
|
// id part: anything but a newline between brackets
|
||||||
if data[i] != '[' {
|
if data[i] != '[' {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
if p.flags&EXTENSION_FOOTNOTES != 0 {
|
if p.extensions&Footnotes != 0 {
|
||||||
if i < len(data) && data[i] == '^' {
|
if i < len(data) && data[i] == '^' {
|
||||||
// we can set it to anything here because the proper noteIds will
|
// we can set it to anything here because the proper noteIds will
|
||||||
// be assigned later during the second pass. It just has to be != 0
|
// be assigned later during the second pass. It just has to be != 0
|
||||||
noteId = 1
|
noteID = 1
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -568,7 +576,11 @@ func isReference(p *parser, data []byte, tabSize int) int {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
idEnd := i
|
idEnd := i
|
||||||
|
// footnotes can have empty ID, like this: [^], but a reference can not be
|
||||||
|
// empty like this: []. Break early if it's not a footnote and there's no ID
|
||||||
|
if noteID == 0 && idOffset == idEnd {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
// spacer: colon (space | tab)* newline? (space | tab)*
|
// spacer: colon (space | tab)* newline? (space | tab)*
|
||||||
i++
|
i++
|
||||||
if i >= len(data) || data[i] != ':' {
|
if i >= len(data) || data[i] != ':' {
|
||||||
@ -599,7 +611,7 @@ func isReference(p *parser, data []byte, tabSize int) int {
|
|||||||
hasBlock bool
|
hasBlock bool
|
||||||
)
|
)
|
||||||
|
|
||||||
if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 {
|
if p.extensions&Footnotes != 0 && noteID != 0 {
|
||||||
linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
|
linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
|
||||||
lineEnd = linkEnd
|
lineEnd = linkEnd
|
||||||
} else {
|
} else {
|
||||||
@ -612,11 +624,11 @@ func isReference(p *parser, data []byte, tabSize int) int {
|
|||||||
// a valid ref has been found
|
// a valid ref has been found
|
||||||
|
|
||||||
ref := &reference{
|
ref := &reference{
|
||||||
noteId: noteId,
|
noteID: noteID,
|
||||||
hasBlock: hasBlock,
|
hasBlock: hasBlock,
|
||||||
}
|
}
|
||||||
|
|
||||||
if noteId > 0 {
|
if noteID > 0 {
|
||||||
// reusing the link field for the id since footnotes don't have links
|
// reusing the link field for the id since footnotes don't have links
|
||||||
ref.link = data[idOffset:idEnd]
|
ref.link = data[idOffset:idEnd]
|
||||||
// if footnote, it's not really a title, it's the contained text
|
// if footnote, it's not really a title, it's the contained text
|
||||||
@ -634,15 +646,12 @@ func isReference(p *parser, data []byte, tabSize int) int {
|
|||||||
return lineEnd
|
return lineEnd
|
||||||
}
|
}
|
||||||
|
|
||||||
func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
|
func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
|
||||||
// link: whitespace-free sequence, optionally between angle brackets
|
// link: whitespace-free sequence, optionally between angle brackets
|
||||||
if data[i] == '<' {
|
if data[i] == '<' {
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
linkOffset = i
|
linkOffset = i
|
||||||
if i == len(data) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
|
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
@ -705,13 +714,13 @@ func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffse
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// The first bit of this logic is the same as (*parser).listItem, but the rest
|
// The first bit of this logic is the same as Parser.listItem, but the rest
|
||||||
// is much simpler. This function simply finds the entire block and shifts it
|
// is much simpler. This function simply finds the entire block and shifts it
|
||||||
// over by one tab if it is indeed a block (just returns the line if it's not).
|
// over by one tab if it is indeed a block (just returns the line if it's not).
|
||||||
// blockEnd is the end of the section in the input buffer, and contents is the
|
// blockEnd is the end of the section in the input buffer, and contents is the
|
||||||
// extracted text that was shifted over one tab. It will need to be rendered at
|
// extracted text that was shifted over one tab. It will need to be rendered at
|
||||||
// the end of the document.
|
// the end of the document.
|
||||||
func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
|
func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
|
||||||
if i == 0 || len(data) == 0 {
|
if i == 0 || len(data) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -812,7 +821,7 @@ func ishorizontalspace(c byte) bool {
|
|||||||
return c == ' ' || c == '\t'
|
return c == ' ' || c == '\t'
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test if a character is a vertical whitespace character.
|
// Test if a character is a vertical character.
|
||||||
func isverticalspace(c byte) bool {
|
func isverticalspace(c byte) bool {
|
||||||
return c == '\n' || c == '\r' || c == '\f' || c == '\v'
|
return c == '\n' || c == '\r' || c == '\f' || c == '\v'
|
||||||
}
|
}
|
354
vendor/github.com/russross/blackfriday/v2/node.go
generated
vendored
Normal file
354
vendor/github.com/russross/blackfriday/v2/node.go
generated
vendored
Normal file
@ -0,0 +1,354 @@
|
|||||||
|
package blackfriday
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NodeType specifies a type of a single node of a syntax tree. Usually one
|
||||||
|
// node (and its type) corresponds to a single markdown feature, e.g. emphasis
|
||||||
|
// or code block.
|
||||||
|
type NodeType int
|
||||||
|
|
||||||
|
// Constants for identifying different types of nodes. See NodeType.
|
||||||
|
const (
|
||||||
|
Document NodeType = iota
|
||||||
|
BlockQuote
|
||||||
|
List
|
||||||
|
Item
|
||||||
|
Paragraph
|
||||||
|
Heading
|
||||||
|
HorizontalRule
|
||||||
|
Emph
|
||||||
|
Strong
|
||||||
|
Del
|
||||||
|
Link
|
||||||
|
Image
|
||||||
|
Text
|
||||||
|
HTMLBlock
|
||||||
|
CodeBlock
|
||||||
|
Softbreak
|
||||||
|
Hardbreak
|
||||||
|
Code
|
||||||
|
HTMLSpan
|
||||||
|
Table
|
||||||
|
TableCell
|
||||||
|
TableHead
|
||||||
|
TableBody
|
||||||
|
TableRow
|
||||||
|
)
|
||||||
|
|
||||||
|
var nodeTypeNames = []string{
|
||||||
|
Document: "Document",
|
||||||
|
BlockQuote: "BlockQuote",
|
||||||
|
List: "List",
|
||||||
|
Item: "Item",
|
||||||
|
Paragraph: "Paragraph",
|
||||||
|
Heading: "Heading",
|
||||||
|
HorizontalRule: "HorizontalRule",
|
||||||
|
Emph: "Emph",
|
||||||
|
Strong: "Strong",
|
||||||
|
Del: "Del",
|
||||||
|
Link: "Link",
|
||||||
|
Image: "Image",
|
||||||
|
Text: "Text",
|
||||||
|
HTMLBlock: "HTMLBlock",
|
||||||
|
CodeBlock: "CodeBlock",
|
||||||
|
Softbreak: "Softbreak",
|
||||||
|
Hardbreak: "Hardbreak",
|
||||||
|
Code: "Code",
|
||||||
|
HTMLSpan: "HTMLSpan",
|
||||||
|
Table: "Table",
|
||||||
|
TableCell: "TableCell",
|
||||||
|
TableHead: "TableHead",
|
||||||
|
TableBody: "TableBody",
|
||||||
|
TableRow: "TableRow",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t NodeType) String() string {
|
||||||
|
return nodeTypeNames[t]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListData contains fields relevant to a List and Item node type.
|
||||||
|
type ListData struct {
|
||||||
|
ListFlags ListType
|
||||||
|
Tight bool // Skip <p>s around list item data if true
|
||||||
|
BulletChar byte // '*', '+' or '-' in bullet lists
|
||||||
|
Delimiter byte // '.' or ')' after the number in ordered lists
|
||||||
|
RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering
|
||||||
|
IsFootnotesList bool // This is a list of footnotes
|
||||||
|
}
|
||||||
|
|
||||||
|
// LinkData contains fields relevant to a Link node type.
|
||||||
|
type LinkData struct {
|
||||||
|
Destination []byte // Destination is what goes into a href
|
||||||
|
Title []byte // Title is the tooltip thing that goes in a title attribute
|
||||||
|
NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote
|
||||||
|
Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil.
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeBlockData contains fields relevant to a CodeBlock node type.
|
||||||
|
type CodeBlockData struct {
|
||||||
|
IsFenced bool // Specifies whether it's a fenced code block or an indented one
|
||||||
|
Info []byte // This holds the info string
|
||||||
|
FenceChar byte
|
||||||
|
FenceLength int
|
||||||
|
FenceOffset int
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableCellData contains fields relevant to a TableCell node type.
|
||||||
|
type TableCellData struct {
|
||||||
|
IsHeader bool // This tells if it's under the header row
|
||||||
|
Align CellAlignFlags // This holds the value for align attribute
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeadingData contains fields relevant to a Heading node type.
|
||||||
|
type HeadingData struct {
|
||||||
|
Level int // This holds the heading level number
|
||||||
|
HeadingID string // This might hold heading ID, if present
|
||||||
|
IsTitleblock bool // Specifies whether it's a title block
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node is a single element in the abstract syntax tree of the parsed document.
|
||||||
|
// It holds connections to the structurally neighboring nodes and, for certain
|
||||||
|
// types of nodes, additional information that might be needed when rendering.
|
||||||
|
type Node struct {
|
||||||
|
Type NodeType // Determines the type of the node
|
||||||
|
Parent *Node // Points to the parent
|
||||||
|
FirstChild *Node // Points to the first child, if any
|
||||||
|
LastChild *Node // Points to the last child, if any
|
||||||
|
Prev *Node // Previous sibling; nil if it's the first child
|
||||||
|
Next *Node // Next sibling; nil if it's the last child
|
||||||
|
|
||||||
|
Literal []byte // Text contents of the leaf nodes
|
||||||
|
|
||||||
|
HeadingData // Populated if Type is Heading
|
||||||
|
ListData // Populated if Type is List
|
||||||
|
CodeBlockData // Populated if Type is CodeBlock
|
||||||
|
LinkData // Populated if Type is Link
|
||||||
|
TableCellData // Populated if Type is TableCell
|
||||||
|
|
||||||
|
content []byte // Markdown content of the block nodes
|
||||||
|
open bool // Specifies an open block node that has not been finished to process yet
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNode allocates a node of a specified type.
|
||||||
|
func NewNode(typ NodeType) *Node {
|
||||||
|
return &Node{
|
||||||
|
Type: typ,
|
||||||
|
open: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Node) String() string {
|
||||||
|
ellipsis := ""
|
||||||
|
snippet := n.Literal
|
||||||
|
if len(snippet) > 16 {
|
||||||
|
snippet = snippet[:16]
|
||||||
|
ellipsis = "..."
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlink removes node 'n' from the tree.
|
||||||
|
// It panics if the node is nil.
|
||||||
|
func (n *Node) Unlink() {
|
||||||
|
if n.Prev != nil {
|
||||||
|
n.Prev.Next = n.Next
|
||||||
|
} else if n.Parent != nil {
|
||||||
|
n.Parent.FirstChild = n.Next
|
||||||
|
}
|
||||||
|
if n.Next != nil {
|
||||||
|
n.Next.Prev = n.Prev
|
||||||
|
} else if n.Parent != nil {
|
||||||
|
n.Parent.LastChild = n.Prev
|
||||||
|
}
|
||||||
|
n.Parent = nil
|
||||||
|
n.Next = nil
|
||||||
|
n.Prev = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendChild adds a node 'child' as a child of 'n'.
|
||||||
|
// It panics if either node is nil.
|
||||||
|
func (n *Node) AppendChild(child *Node) {
|
||||||
|
child.Unlink()
|
||||||
|
child.Parent = n
|
||||||
|
if n.LastChild != nil {
|
||||||
|
n.LastChild.Next = child
|
||||||
|
child.Prev = n.LastChild
|
||||||
|
n.LastChild = child
|
||||||
|
} else {
|
||||||
|
n.FirstChild = child
|
||||||
|
n.LastChild = child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertBefore inserts 'sibling' immediately before 'n'.
|
||||||
|
// It panics if either node is nil.
|
||||||
|
func (n *Node) InsertBefore(sibling *Node) {
|
||||||
|
sibling.Unlink()
|
||||||
|
sibling.Prev = n.Prev
|
||||||
|
if sibling.Prev != nil {
|
||||||
|
sibling.Prev.Next = sibling
|
||||||
|
}
|
||||||
|
sibling.Next = n
|
||||||
|
n.Prev = sibling
|
||||||
|
sibling.Parent = n.Parent
|
||||||
|
if sibling.Prev == nil {
|
||||||
|
sibling.Parent.FirstChild = sibling
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Node) isContainer() bool {
|
||||||
|
switch n.Type {
|
||||||
|
case Document:
|
||||||
|
fallthrough
|
||||||
|
case BlockQuote:
|
||||||
|
fallthrough
|
||||||
|
case List:
|
||||||
|
fallthrough
|
||||||
|
case Item:
|
||||||
|
fallthrough
|
||||||
|
case Paragraph:
|
||||||
|
fallthrough
|
||||||
|
case Heading:
|
||||||
|
fallthrough
|
||||||
|
case Emph:
|
||||||
|
fallthrough
|
||||||
|
case Strong:
|
||||||
|
fallthrough
|
||||||
|
case Del:
|
||||||
|
fallthrough
|
||||||
|
case Link:
|
||||||
|
fallthrough
|
||||||
|
case Image:
|
||||||
|
fallthrough
|
||||||
|
case Table:
|
||||||
|
fallthrough
|
||||||
|
case TableHead:
|
||||||
|
fallthrough
|
||||||
|
case TableBody:
|
||||||
|
fallthrough
|
||||||
|
case TableRow:
|
||||||
|
fallthrough
|
||||||
|
case TableCell:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Node) canContain(t NodeType) bool {
|
||||||
|
if n.Type == List {
|
||||||
|
return t == Item
|
||||||
|
}
|
||||||
|
if n.Type == Document || n.Type == BlockQuote || n.Type == Item {
|
||||||
|
return t != Item
|
||||||
|
}
|
||||||
|
if n.Type == Table {
|
||||||
|
return t == TableHead || t == TableBody
|
||||||
|
}
|
||||||
|
if n.Type == TableHead || n.Type == TableBody {
|
||||||
|
return t == TableRow
|
||||||
|
}
|
||||||
|
if n.Type == TableRow {
|
||||||
|
return t == TableCell
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// WalkStatus allows NodeVisitor to have some control over the tree traversal.
|
||||||
|
// It is returned from NodeVisitor and different values allow Node.Walk to
|
||||||
|
// decide which node to go to next.
|
||||||
|
type WalkStatus int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// GoToNext is the default traversal of every node.
|
||||||
|
GoToNext WalkStatus = iota
|
||||||
|
// SkipChildren tells walker to skip all children of current node.
|
||||||
|
SkipChildren
|
||||||
|
// Terminate tells walker to terminate the traversal.
|
||||||
|
Terminate
|
||||||
|
)
|
||||||
|
|
||||||
|
// NodeVisitor is a callback to be called when traversing the syntax tree.
|
||||||
|
// Called twice for every node: once with entering=true when the branch is
|
||||||
|
// first visited, then with entering=false after all the children are done.
|
||||||
|
type NodeVisitor func(node *Node, entering bool) WalkStatus
|
||||||
|
|
||||||
|
// Walk is a convenience method that instantiates a walker and starts a
|
||||||
|
// traversal of subtree rooted at n.
|
||||||
|
func (n *Node) Walk(visitor NodeVisitor) {
|
||||||
|
w := newNodeWalker(n)
|
||||||
|
for w.current != nil {
|
||||||
|
status := visitor(w.current, w.entering)
|
||||||
|
switch status {
|
||||||
|
case GoToNext:
|
||||||
|
w.next()
|
||||||
|
case SkipChildren:
|
||||||
|
w.entering = false
|
||||||
|
w.next()
|
||||||
|
case Terminate:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type nodeWalker struct {
|
||||||
|
current *Node
|
||||||
|
root *Node
|
||||||
|
entering bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNodeWalker(root *Node) *nodeWalker {
|
||||||
|
return &nodeWalker{
|
||||||
|
current: root,
|
||||||
|
root: root,
|
||||||
|
entering: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nw *nodeWalker) next() {
|
||||||
|
if (!nw.current.isContainer() || !nw.entering) && nw.current == nw.root {
|
||||||
|
nw.current = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if nw.entering && nw.current.isContainer() {
|
||||||
|
if nw.current.FirstChild != nil {
|
||||||
|
nw.current = nw.current.FirstChild
|
||||||
|
nw.entering = true
|
||||||
|
} else {
|
||||||
|
nw.entering = false
|
||||||
|
}
|
||||||
|
} else if nw.current.Next == nil {
|
||||||
|
nw.current = nw.current.Parent
|
||||||
|
nw.entering = false
|
||||||
|
} else {
|
||||||
|
nw.current = nw.current.Next
|
||||||
|
nw.entering = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dump(ast *Node) {
|
||||||
|
fmt.Println(dumpString(ast))
|
||||||
|
}
|
||||||
|
|
||||||
|
func dumpR(ast *Node, depth int) string {
|
||||||
|
if ast == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
indent := bytes.Repeat([]byte("\t"), depth)
|
||||||
|
content := ast.Literal
|
||||||
|
if content == nil {
|
||||||
|
content = ast.content
|
||||||
|
}
|
||||||
|
result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content)
|
||||||
|
for n := ast.FirstChild; n != nil; n = n.Next {
|
||||||
|
result += dumpR(n, depth+1)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func dumpString(ast *Node) string {
|
||||||
|
return dumpR(ast, 0)
|
||||||
|
}
|
@ -17,11 +17,14 @@ package blackfriday
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
type smartypantsData struct {
|
// SPRenderer is a struct containing state of a Smartypants renderer.
|
||||||
|
type SPRenderer struct {
|
||||||
inSingleQuote bool
|
inSingleQuote bool
|
||||||
inDoubleQuote bool
|
inDoubleQuote bool
|
||||||
|
callbacks [256]smartCallback
|
||||||
}
|
}
|
||||||
|
|
||||||
func wordBoundary(c byte) bool {
|
func wordBoundary(c byte) bool {
|
||||||
@ -118,7 +121,7 @@ func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
if len(text) >= 2 {
|
if len(text) >= 2 {
|
||||||
t1 := tolower(text[1])
|
t1 := tolower(text[1])
|
||||||
|
|
||||||
@ -127,7 +130,7 @@ func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byt
|
|||||||
if len(text) >= 3 {
|
if len(text) >= 3 {
|
||||||
nextChar = text[2]
|
nextChar = text[2]
|
||||||
}
|
}
|
||||||
if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote, false) {
|
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -152,7 +155,7 @@ func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byt
|
|||||||
if len(text) > 1 {
|
if len(text) > 1 {
|
||||||
nextChar = text[1]
|
nextChar = text[1]
|
||||||
}
|
}
|
||||||
if smartQuoteHelper(out, previousChar, nextChar, 's', &smrt.inSingleQuote, false) {
|
if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,7 +163,7 @@ func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byt
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartParens(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
if len(text) >= 3 {
|
if len(text) >= 3 {
|
||||||
t1 := tolower(text[1])
|
t1 := tolower(text[1])
|
||||||
t2 := tolower(text[2])
|
t2 := tolower(text[2])
|
||||||
@ -185,7 +188,7 @@ func smartParens(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, te
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartDash(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
if len(text) >= 2 {
|
if len(text) >= 2 {
|
||||||
if text[1] == '-' {
|
if text[1] == '-' {
|
||||||
out.WriteString("—")
|
out.WriteString("—")
|
||||||
@ -202,7 +205,7 @@ func smartDash(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartDashLatex(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
|
if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
|
||||||
out.WriteString("—")
|
out.WriteString("—")
|
||||||
return 2
|
return 2
|
||||||
@ -216,13 +219,13 @@ func smartDashLatex(out *bytes.Buffer, smrt *smartypantsData, previousChar byte,
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartAmpVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte, quote byte, addNBSP bool) int {
|
func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int {
|
||||||
if bytes.HasPrefix(text, []byte(""")) {
|
if bytes.HasPrefix(text, []byte(""")) {
|
||||||
nextChar := byte(0)
|
nextChar := byte(0)
|
||||||
if len(text) >= 7 {
|
if len(text) >= 7 {
|
||||||
nextChar = text[6]
|
nextChar = text[6]
|
||||||
}
|
}
|
||||||
if smartQuoteHelper(out, previousChar, nextChar, quote, &smrt.inDoubleQuote, addNBSP) {
|
if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) {
|
||||||
return 5
|
return 5
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -235,18 +238,18 @@ func smartAmpVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartAmp(angledQuotes, addNBSP bool) func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int {
|
||||||
var quote byte = 'd'
|
var quote byte = 'd'
|
||||||
if angledQuotes {
|
if angledQuotes {
|
||||||
quote = 'a'
|
quote = 'a'
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
return func(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
return smartAmpVariant(out, smrt, previousChar, text, quote, addNBSP)
|
return r.smartAmpVariant(out, previousChar, text, quote, addNBSP)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartPeriod(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
|
if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
|
||||||
out.WriteString("…")
|
out.WriteString("…")
|
||||||
return 2
|
return 2
|
||||||
@ -261,13 +264,13 @@ func smartPeriod(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, te
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartBacktick(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
if len(text) >= 2 && text[1] == '`' {
|
if len(text) >= 2 && text[1] == '`' {
|
||||||
nextChar := byte(0)
|
nextChar := byte(0)
|
||||||
if len(text) >= 3 {
|
if len(text) >= 3 {
|
||||||
nextChar = text[2]
|
nextChar = text[2]
|
||||||
}
|
}
|
||||||
if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote, false) {
|
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -276,7 +279,7 @@ func smartBacktick(out *bytes.Buffer, smrt *smartypantsData, previousChar byte,
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartNumberGeneric(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
||||||
// is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
|
// is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
|
||||||
// note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8)
|
// note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8)
|
||||||
@ -318,7 +321,7 @@ func smartNumberGeneric(out *bytes.Buffer, smrt *smartypantsData, previousChar b
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartNumber(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
||||||
if text[0] == '1' && text[1] == '/' && text[2] == '2' {
|
if text[0] == '1' && text[1] == '/' && text[2] == '2' {
|
||||||
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
|
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
|
||||||
@ -346,27 +349,27 @@ func smartNumber(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, te
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartDoubleQuoteVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte, quote byte) int {
|
func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int {
|
||||||
nextChar := byte(0)
|
nextChar := byte(0)
|
||||||
if len(text) > 1 {
|
if len(text) > 1 {
|
||||||
nextChar = text[1]
|
nextChar = text[1]
|
||||||
}
|
}
|
||||||
if !smartQuoteHelper(out, previousChar, nextChar, quote, &smrt.inDoubleQuote, false) {
|
if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) {
|
||||||
out.WriteString(""")
|
out.WriteString(""")
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartDoubleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
return smartDoubleQuoteVariant(out, smrt, previousChar, text, 'd')
|
return r.smartDoubleQuoteVariant(out, previousChar, text, 'd')
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartAngledDoubleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
return smartDoubleQuoteVariant(out, smrt, previousChar, text, 'a')
|
return r.smartDoubleQuoteVariant(out, previousChar, text, 'a')
|
||||||
}
|
}
|
||||||
|
|
||||||
func smartLeftAngle(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
|
func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||||
i := 0
|
i := 0
|
||||||
|
|
||||||
for i < len(text) && text[i] != '>' {
|
for i < len(text) && text[i] != '>' {
|
||||||
@ -377,54 +380,78 @@ func smartLeftAngle(out *bytes.Buffer, smrt *smartypantsData, previousChar byte,
|
|||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
|
|
||||||
type smartCallback func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int
|
type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int
|
||||||
|
|
||||||
type smartypantsRenderer [256]smartCallback
|
// NewSmartypantsRenderer constructs a Smartypants renderer object.
|
||||||
|
func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer {
|
||||||
|
var (
|
||||||
|
r SPRenderer
|
||||||
|
|
||||||
var (
|
smartAmpAngled = r.smartAmp(true, false)
|
||||||
smartAmpAngled = smartAmp(true, false)
|
smartAmpAngledNBSP = r.smartAmp(true, true)
|
||||||
smartAmpAngledNBSP = smartAmp(true, true)
|
smartAmpRegular = r.smartAmp(false, false)
|
||||||
smartAmpRegular = smartAmp(false, false)
|
smartAmpRegularNBSP = r.smartAmp(false, true)
|
||||||
smartAmpRegularNBSP = smartAmp(false, true)
|
|
||||||
)
|
|
||||||
|
|
||||||
func smartypants(flags int) *smartypantsRenderer {
|
addNBSP = flags&SmartypantsQuotesNBSP != 0
|
||||||
r := new(smartypantsRenderer)
|
)
|
||||||
addNBSP := flags&HTML_SMARTYPANTS_QUOTES_NBSP != 0
|
|
||||||
if flags&HTML_SMARTYPANTS_ANGLED_QUOTES == 0 {
|
if flags&SmartypantsAngledQuotes == 0 {
|
||||||
r['"'] = smartDoubleQuote
|
r.callbacks['"'] = r.smartDoubleQuote
|
||||||
if !addNBSP {
|
if !addNBSP {
|
||||||
r['&'] = smartAmpRegular
|
r.callbacks['&'] = smartAmpRegular
|
||||||
} else {
|
} else {
|
||||||
r['&'] = smartAmpRegularNBSP
|
r.callbacks['&'] = smartAmpRegularNBSP
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
r['"'] = smartAngledDoubleQuote
|
r.callbacks['"'] = r.smartAngledDoubleQuote
|
||||||
if !addNBSP {
|
if !addNBSP {
|
||||||
r['&'] = smartAmpAngled
|
r.callbacks['&'] = smartAmpAngled
|
||||||
} else {
|
} else {
|
||||||
r['&'] = smartAmpAngledNBSP
|
r.callbacks['&'] = smartAmpAngledNBSP
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r['\''] = smartSingleQuote
|
r.callbacks['\''] = r.smartSingleQuote
|
||||||
r['('] = smartParens
|
r.callbacks['('] = r.smartParens
|
||||||
if flags&HTML_SMARTYPANTS_DASHES != 0 {
|
if flags&SmartypantsDashes != 0 {
|
||||||
if flags&HTML_SMARTYPANTS_LATEX_DASHES == 0 {
|
if flags&SmartypantsLatexDashes == 0 {
|
||||||
r['-'] = smartDash
|
r.callbacks['-'] = r.smartDash
|
||||||
} else {
|
} else {
|
||||||
r['-'] = smartDashLatex
|
r.callbacks['-'] = r.smartDashLatex
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r['.'] = smartPeriod
|
r.callbacks['.'] = r.smartPeriod
|
||||||
if flags&HTML_SMARTYPANTS_FRACTIONS == 0 {
|
if flags&SmartypantsFractions == 0 {
|
||||||
r['1'] = smartNumber
|
r.callbacks['1'] = r.smartNumber
|
||||||
r['3'] = smartNumber
|
r.callbacks['3'] = r.smartNumber
|
||||||
} else {
|
} else {
|
||||||
for ch := '1'; ch <= '9'; ch++ {
|
for ch := '1'; ch <= '9'; ch++ {
|
||||||
r[ch] = smartNumberGeneric
|
r.callbacks[ch] = r.smartNumberGeneric
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r['<'] = smartLeftAngle
|
r.callbacks['<'] = r.smartLeftAngle
|
||||||
r['`'] = smartBacktick
|
r.callbacks['`'] = r.smartBacktick
|
||||||
return r
|
return &r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process is the entry point of the Smartypants renderer.
|
||||||
|
func (r *SPRenderer) Process(w io.Writer, text []byte) {
|
||||||
|
mark := 0
|
||||||
|
for i := 0; i < len(text); i++ {
|
||||||
|
if action := r.callbacks[text[i]]; action != nil {
|
||||||
|
if i > mark {
|
||||||
|
w.Write(text[mark:i])
|
||||||
|
}
|
||||||
|
previousChar := byte(0)
|
||||||
|
if i > 0 {
|
||||||
|
previousChar = text[i-1]
|
||||||
|
}
|
||||||
|
var tmp bytes.Buffer
|
||||||
|
i += action(&tmp, previousChar, text[i:])
|
||||||
|
w.Write(tmp.Bytes())
|
||||||
|
mark = i + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if mark < len(text) {
|
||||||
|
w.Write(text[mark:])
|
||||||
|
}
|
||||||
}
|
}
|
21
vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE
generated
vendored
Normal file
21
vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2015 Dmitri Shuralyov
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
36
vendor/github.com/shurcooL/sanitized_anchor_name/README.md
generated
vendored
Normal file
36
vendor/github.com/shurcooL/sanitized_anchor_name/README.md
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
sanitized_anchor_name
|
||||||
|
=====================
|
||||||
|
|
||||||
|
[](https://travis-ci.org/shurcooL/sanitized_anchor_name) [](https://godoc.org/github.com/shurcooL/sanitized_anchor_name)
|
||||||
|
|
||||||
|
Package sanitized_anchor_name provides a func to create sanitized anchor names.
|
||||||
|
|
||||||
|
Its logic can be reused by multiple packages to create interoperable anchor names
|
||||||
|
and links to those anchors.
|
||||||
|
|
||||||
|
At this time, it does not try to ensure that generated anchor names
|
||||||
|
are unique, that responsibility falls on the caller.
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get -u github.com/shurcooL/sanitized_anchor_name
|
||||||
|
```
|
||||||
|
|
||||||
|
Example
|
||||||
|
-------
|
||||||
|
|
||||||
|
```Go
|
||||||
|
anchorName := sanitized_anchor_name.Create("This is a header")
|
||||||
|
|
||||||
|
fmt.Println(anchorName)
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// this-is-a-header
|
||||||
|
```
|
||||||
|
|
||||||
|
License
|
||||||
|
-------
|
||||||
|
|
||||||
|
- [MIT License](LICENSE)
|
1
vendor/github.com/shurcooL/sanitized_anchor_name/go.mod
generated
vendored
Normal file
1
vendor/github.com/shurcooL/sanitized_anchor_name/go.mod
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
module github.com/shurcooL/sanitized_anchor_name
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user