Merge pull request #8221 from AkihiroSuda/misc-20230307
go.mod: {hcsshim, containerd/*, go-restful/v3, mergo, klauspost/compress, opencontainers/*}@latest
This commit is contained in:
commit
744809b7ed
30
go.mod
30
go.mod
@ -6,19 +6,19 @@ require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652
|
||||
github.com/Microsoft/go-winio v0.6.0
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.5
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.7
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.5.4
|
||||
github.com/containerd/aufs v1.0.0
|
||||
github.com/containerd/btrfs/v2 v2.0.0
|
||||
github.com/containerd/cgroups/v3 v3.0.0
|
||||
github.com/containerd/cgroups/v3 v3.0.1
|
||||
github.com/containerd/console v1.0.3
|
||||
github.com/containerd/continuity v0.3.0
|
||||
github.com/containerd/fifo v1.0.0
|
||||
github.com/containerd/fifo v1.1.0
|
||||
github.com/containerd/go-cni v1.1.9
|
||||
github.com/containerd/go-runc v1.0.0
|
||||
github.com/containerd/imgcrypt v1.1.7
|
||||
github.com/containerd/nri v0.3.0
|
||||
github.com/containerd/ttrpc v1.1.1-0.20230127163717-32fab2374638
|
||||
github.com/containerd/ttrpc v1.2.0
|
||||
github.com/containerd/typeurl/v2 v2.1.0
|
||||
github.com/containerd/zfs v1.0.0
|
||||
github.com/containernetworking/cni v1.1.2
|
||||
@ -28,16 +28,16 @@ require (
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c
|
||||
github.com/docker/go-metrics v0.0.1
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/emicklei/go-restful/v3 v3.9.0
|
||||
github.com/emicklei/go-restful/v3 v3.10.1
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/imdario/mergo v0.3.12
|
||||
github.com/imdario/mergo v0.3.13
|
||||
github.com/intel/goresctrl v0.3.0
|
||||
github.com/klauspost/compress v1.15.11
|
||||
github.com/klauspost/compress v1.16.0
|
||||
github.com/minio/sha256-simd v1.0.0
|
||||
github.com/moby/locker v1.0.1
|
||||
github.com/moby/sys/mountinfo v0.6.2
|
||||
@ -47,15 +47,15 @@ require (
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b
|
||||
github.com/opencontainers/runc v1.1.4
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb
|
||||
github.com/opencontainers/runtime-spec v1.1.0-rc.1
|
||||
// ATM the runtime-tools commit we need are beyond the latest tag.
|
||||
// We use a replace to handle that until a new version is tagged.
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626
|
||||
github.com/opencontainers/selinux v1.10.2
|
||||
github.com/opencontainers/selinux v1.11.0
|
||||
github.com/pelletier/go-toml v1.9.5
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/tchap/go-patricia/v2 v2.3.1
|
||||
github.com/urfave/cli v1.22.12
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
@ -68,9 +68,9 @@ require (
|
||||
go.opentelemetry.io/otel/sdk v1.12.0
|
||||
go.opentelemetry.io/otel/trace v1.12.0
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/sys v0.5.0
|
||||
google.golang.org/genproto v0.0.0-20230131230820-1c016267d619
|
||||
google.golang.org/grpc v1.52.3
|
||||
golang.org/x/sys v0.6.0
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4
|
||||
google.golang.org/grpc v1.53.0
|
||||
google.golang.org/protobuf v1.28.1
|
||||
k8s.io/api v0.26.2
|
||||
k8s.io/apimachinery v0.26.2
|
||||
@ -86,7 +86,7 @@ require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cilium/ebpf v0.9.1 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/containerd/typeurl v1.0.2 // indirect
|
||||
@ -128,7 +128,7 @@ require (
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/net v0.7.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect
|
||||
golang.org/x/oauth2 v0.4.0 // indirect
|
||||
golang.org/x/term v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
|
61
go.sum
61
go.sum
@ -20,15 +20,15 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
|
||||
cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU=
|
||||
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
|
||||
cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
@ -89,8 +89,8 @@ github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT
|
||||
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
|
||||
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||
github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.5 h1:JfkknPHBtfdC2Ezd+jpl8Kicw7UyhvUSzoy6xsqirwY=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.5/go.mod h1:NNb9uh/cgA52AVhc9+Y7U+YyHQS9nHHA4STcAjdg2xk=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
@ -146,8 +146,9 @@ github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6
|
||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||
@ -197,8 +198,8 @@ github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f2
|
||||
github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/cgroups/v3 v3.0.0 h1:EzSABvwGkHxdwtZsNBWl8M3iNh+0lMJb/fmtyoSXlsc=
|
||||
github.com/containerd/cgroups/v3 v3.0.0/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw=
|
||||
github.com/containerd/cgroups/v3 v3.0.1 h1:4hfGvu8rfGIwVIDd+nLzn/B9ZXx4BcCjzt5ToenJRaE=
|
||||
github.com/containerd/cgroups/v3 v3.0.1/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
||||
@ -239,8 +240,9 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv
|
||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||
github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||
github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU=
|
||||
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
|
||||
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
|
||||
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
|
||||
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
|
||||
github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
|
||||
@ -274,8 +276,8 @@ github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0x
|
||||
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||
github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
|
||||
github.com/containerd/ttrpc v1.1.1-0.20230127163717-32fab2374638 h1:8eVDsirmav+3F0gKY9q/NboZuAqLg++AzW5/HZKKUY4=
|
||||
github.com/containerd/ttrpc v1.1.1-0.20230127163717-32fab2374638/go.mod h1:YYyNVhZrTMiaf51Vj6WhAJqJw+vl/nzABhj8pWrzle4=
|
||||
github.com/containerd/ttrpc v1.2.0 h1:we4O9wzVsBA1HUVRGU8CWFsbjy2P/U2g9raVu5XXNI0=
|
||||
github.com/containerd/ttrpc v1.2.0/go.mod h1:YYyNVhZrTMiaf51Vj6WhAJqJw+vl/nzABhj8pWrzle4=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
|
||||
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
|
||||
@ -375,8 +377,8 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7fo
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
|
||||
github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
@ -601,8 +603,9 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
|
||||
github.com/intel/goresctrl v0.3.0 h1:K2D3GOzihV7xSBedGxONSlaw/un1LZgWsc9IfqipN4c=
|
||||
@ -634,8 +637,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c=
|
||||
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
|
||||
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4 h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@ -788,8 +791,9 @@ github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.m
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb h1:1xSVPOd7/UA+39/hXEGnBJ13p6JFB0E1EvQFlrRDOXI=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.1.0-rc.1 h1:wHa9jroFfKGQqFHj0I1fMRKLl0pfj+ynAqBxo3v6u9w=
|
||||
github.com/opencontainers/runtime-spec v1.1.0-rc.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20221026201742-946c877fa809 h1:WSwkWIIS4s+E/dPF6HuVZ/hnq1WfXN371eESjREnU8k=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20221026201742-946c877fa809/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI=
|
||||
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
||||
@ -798,8 +802,8 @@ github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xA
|
||||
github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4=
|
||||
github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ=
|
||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
@ -930,8 +934,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
@ -1188,8 +1193,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk=
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -1305,8 +1310,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@ -1487,8 +1492,8 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 h1:p0kMzw6AG0JEzd7Z+kXqOiLhC6gjUQTbtS2zR0Q3DbI=
|
||||
google.golang.org/genproto v0.0.0-20230131230820-1c016267d619/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
@ -1517,8 +1522,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ=
|
||||
google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
|
||||
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -4,21 +4,21 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.5
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.7
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1
|
||||
github.com/containerd/cgroups/v3 v3.0.0
|
||||
github.com/containerd/cgroups/v3 v3.0.1
|
||||
github.com/containerd/containerd v1.7.0-beta.0 // see replace; the actual version of containerd is replaced with the code at the root of this repository
|
||||
github.com/containerd/go-runc v1.0.0
|
||||
github.com/containerd/ttrpc v1.1.1-0.20230127163717-32fab2374638
|
||||
github.com/containerd/ttrpc v1.2.0
|
||||
github.com/containerd/typeurl/v2 v2.1.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb
|
||||
github.com/opencontainers/runtime-spec v1.1.0-rc.1
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/stretchr/testify v1.8.2
|
||||
go.opentelemetry.io/otel v1.12.0
|
||||
go.opentelemetry.io/otel/sdk v1.12.0
|
||||
golang.org/x/sys v0.5.0
|
||||
golang.org/x/sys v0.6.0
|
||||
)
|
||||
|
||||
require (
|
||||
@ -28,7 +28,7 @@ require (
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/containerd/fifo v1.0.0 // indirect
|
||||
github.com/containerd/fifo v1.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
@ -42,14 +42,14 @@ require (
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/klauspost/compress v1.16.0 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/sys/signal v0.7.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/opencontainers/selinux v1.10.2 // indirect
|
||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
@ -60,8 +60,8 @@ require (
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/tools v0.5.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 // indirect
|
||||
google.golang.org/grpc v1.52.3 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
|
||||
google.golang.org/grpc v1.53.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -28,7 +28,6 @@ import (
|
||||
|
||||
"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
"github.com/containerd/cgroups/v3"
|
||||
"github.com/containerd/cgroups/v3/cgroup1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
@ -51,7 +50,7 @@ func SwapControllerAvailable() bool {
|
||||
p := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes"
|
||||
if cgroups.Mode() == cgroups.Unified {
|
||||
// memory.swap.max does not exist in the cgroup root, so we check /sys/fs/cgroup/<SELF>/memory.swap.max
|
||||
_, unified, err := cgroup1.ParseCgroupFileUnified("/proc/self/cgroup")
|
||||
_, unified, err := cgroups.ParseCgroupFileUnified("/proc/self/cgroup")
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to parse /proc/self/cgroup: %w", err)
|
||||
logrus.WithError(err).Warn(warn)
|
||||
|
2
vendor/github.com/Microsoft/hcsshim/ext4/dmverity/dmverity.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/ext4/dmverity/dmverity.go
generated
vendored
@ -99,8 +99,10 @@ func MerkleTree(r io.Reader) ([]byte, error) {
|
||||
nextLevel.Write(h)
|
||||
}
|
||||
|
||||
if nextLevel.Len()%blockSize != 0 {
|
||||
padding := bytes.Repeat([]byte{0}, blockSize-(nextLevel.Len()%blockSize))
|
||||
nextLevel.Write(padding)
|
||||
}
|
||||
|
||||
layers = append(layers, nextLevel.Bytes())
|
||||
currentLevel = bufio.NewReaderSize(nextLevel, MerkleTreeBufioSize)
|
||||
|
1
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go
generated
vendored
@ -249,6 +249,7 @@ func ConvertAndComputeRootDigest(r io.Reader) (string, error) {
|
||||
defer func() {
|
||||
_ = os.Remove(out.Name())
|
||||
}()
|
||||
defer out.Close()
|
||||
|
||||
options := []Option{
|
||||
ConvertWhiteout,
|
||||
|
216
vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go
generated
vendored
Normal file
216
vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go
generated
vendored
Normal file
@ -0,0 +1,216 @@
|
||||
package wclayer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/hcsshim/internal/longpath"
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
type baseLayerReader struct {
|
||||
s *trace.Span
|
||||
root string
|
||||
result chan *fileEntry
|
||||
proceed chan bool
|
||||
currentFile *os.File
|
||||
backupReader *winio.BackupFileReader
|
||||
}
|
||||
|
||||
func newBaseLayerReader(root string, s *trace.Span) (r *baseLayerReader) {
|
||||
r = &baseLayerReader{
|
||||
s: s,
|
||||
root: root,
|
||||
result: make(chan *fileEntry),
|
||||
proceed: make(chan bool),
|
||||
}
|
||||
go r.walk()
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *baseLayerReader) walkUntilCancelled() error {
|
||||
root, err := longpath.LongAbs(r.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.root = root
|
||||
|
||||
err = filepath.Walk(filepath.Join(r.root, filesPath), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048.
|
||||
// Handle failure from what may be a golang bug in the conversion of
|
||||
// UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat
|
||||
// which is called by filepath.Walk will fail when a filename contains
|
||||
// unicode characters. Skip the recycle bin regardless which is goodness.
|
||||
if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
r.result <- &fileEntry{path, info, nil}
|
||||
if !<-r.proceed {
|
||||
return errorIterationCanceled
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err == errorIterationCanceled {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
utilityVMAbsPath := filepath.Join(r.root, utilityVMPath)
|
||||
utilityVMFilesAbsPath := filepath.Join(r.root, utilityVMFilesPath)
|
||||
|
||||
// Ignore a UtilityVM without Files, that's not _really_ a UtiltyVM
|
||||
if _, err = os.Lstat(utilityVMFilesAbsPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return io.EOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err = filepath.Walk(utilityVMAbsPath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if path != utilityVMAbsPath && path != utilityVMFilesAbsPath && !hasPathPrefix(path, utilityVMFilesAbsPath) {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
r.result <- &fileEntry{path, info, nil}
|
||||
if !<-r.proceed {
|
||||
return errorIterationCanceled
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err == errorIterationCanceled {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
func (r *baseLayerReader) walk() {
|
||||
defer close(r.result)
|
||||
if !<-r.proceed {
|
||||
return
|
||||
}
|
||||
|
||||
err := r.walkUntilCancelled()
|
||||
if err != nil {
|
||||
for {
|
||||
r.result <- &fileEntry{err: err}
|
||||
if !<-r.proceed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *baseLayerReader) reset() {
|
||||
if r.backupReader != nil {
|
||||
r.backupReader.Close()
|
||||
r.backupReader = nil
|
||||
}
|
||||
if r.currentFile != nil {
|
||||
r.currentFile.Close()
|
||||
r.currentFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *baseLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) {
|
||||
r.reset()
|
||||
r.proceed <- true
|
||||
fe := <-r.result
|
||||
if fe == nil {
|
||||
err = errors.New("BaseLayerReader closed")
|
||||
return
|
||||
}
|
||||
if fe.err != nil {
|
||||
err = fe.err
|
||||
return
|
||||
}
|
||||
|
||||
path, err = filepath.Rel(r.root, fe.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if f != nil {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
fileInfo, err = winio.GetFileBasicInfo(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
size = fe.fi.Size()
|
||||
r.backupReader = winio.NewBackupFileReader(f, true)
|
||||
|
||||
r.currentFile = f
|
||||
f = nil
|
||||
return
|
||||
}
|
||||
|
||||
func (r *baseLayerReader) LinkInfo() (uint32, *winio.FileIDInfo, error) {
|
||||
fileStandardInfo, err := winio.GetFileStandardInfo(r.currentFile)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
fileIDInfo, err := winio.GetFileID(r.currentFile)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
return fileStandardInfo.NumberOfLinks, fileIDInfo, nil
|
||||
}
|
||||
|
||||
func (r *baseLayerReader) Read(b []byte) (int, error) {
|
||||
if r.backupReader == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return r.backupReader.Read(b)
|
||||
}
|
||||
|
||||
func (r *baseLayerReader) Close() (err error) {
|
||||
defer r.s.End()
|
||||
defer func() {
|
||||
oc.SetSpanStatus(r.s, err)
|
||||
close(r.proceed)
|
||||
}()
|
||||
r.proceed <- false
|
||||
// The r.result channel will be closed once walk() returns
|
||||
<-r.result
|
||||
r.reset()
|
||||
return nil
|
||||
}
|
158
vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go
generated
vendored
Normal file
158
vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
package wclayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hcserror"
|
||||
"github.com/Microsoft/hcsshim/internal/longpath"
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/Microsoft/hcsshim/internal/safefile"
|
||||
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var hiveNames = []string{"DEFAULT", "SAM", "SECURITY", "SOFTWARE", "SYSTEM"}
|
||||
|
||||
// Ensure the given file exists as an ordinary file, and create a minimal hive file if not.
|
||||
func ensureHive(path string, root *os.File) (err error) {
|
||||
_, err = safefile.LstatRelative(path, root)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("accessing %s: %w", path, err)
|
||||
}
|
||||
|
||||
version := windows.RtlGetVersion()
|
||||
if version == nil {
|
||||
return fmt.Errorf("failed to get OS version")
|
||||
}
|
||||
|
||||
var fullPath string
|
||||
fullPath, err = longpath.LongAbs(filepath.Join(root.Name(), path))
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting path: %w", err)
|
||||
}
|
||||
|
||||
var key syscall.Handle
|
||||
err = winapi.ORCreateHive(&key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating hive: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := winapi.ORCloseHive(key)
|
||||
if closeErr != nil && err == nil {
|
||||
err = fmt.Errorf("closing hive key: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
err = winapi.ORSaveHive(key, fullPath, version.MajorVersion, version.MinorVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("saving hive: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) {
|
||||
// The base layer registry hives will be copied from here
|
||||
const hiveSourcePath = "Files\\Windows\\System32\\config"
|
||||
if err = safefile.MkdirAllRelative(hiveSourcePath, root); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, hiveName := range hiveNames {
|
||||
hivePath := filepath.Join(hiveSourcePath, hiveName)
|
||||
if err = ensureHive(hivePath, root); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
stat, err := safefile.LstatRelative(utilityVMFilesPath, root)
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !stat.Mode().IsDir() {
|
||||
fullPath := filepath.Join(root.Name(), utilityVMFilesPath)
|
||||
return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String())
|
||||
}
|
||||
|
||||
const bcdRelativePath = "EFI\\Microsoft\\Boot\\BCD"
|
||||
|
||||
// Just check that this exists as a regular file. If it exists but is not a valid registry hive,
|
||||
// ProcessUtilityVMImage will complain:
|
||||
// "The registry could not read in, or write out, or flush, one of the files that contain the system's image of the registry."
|
||||
bcdPath := filepath.Join(utilityVMFilesPath, bcdRelativePath)
|
||||
|
||||
stat, err = safefile.LstatRelative(bcdPath, root)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "UtilityVM must contain '%s'", bcdRelativePath)
|
||||
}
|
||||
|
||||
if !stat.Mode().IsRegular() {
|
||||
fullPath := filepath.Join(root.Name(), bcdPath)
|
||||
return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String())
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func convertToBaseLayer(ctx context.Context, root *os.File) error {
|
||||
hasUtilityVM, err := ensureBaseLayer(root)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ProcessBaseLayer(ctx, root.Name()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !hasUtilityVM {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = safefile.EnsureNotReparsePointRelative(utilityVMPath, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
utilityVMPath := filepath.Join(root.Name(), utilityVMPath)
|
||||
return ProcessUtilityVMImage(ctx, utilityVMPath)
|
||||
}
|
||||
|
||||
// ConvertToBaseLayer processes a candidate base layer, i.e. a directory
|
||||
// containing the desired file content under Files/, and optionally the
|
||||
// desired file content for a UtilityVM under UtilityVM/Files/
|
||||
func ConvertToBaseLayer(ctx context.Context, path string) (err error) {
|
||||
title := "hcsshim::ConvertToBaseLayer"
|
||||
ctx, span := trace.StartSpan(ctx, title)
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("path", path))
|
||||
|
||||
root, err := safefile.OpenRoot(path)
|
||||
if err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
defer func() {
|
||||
if err2 := root.Close(); err == nil && err2 != nil {
|
||||
err = hcserror.New(err2, title+" - failed", "")
|
||||
}
|
||||
}()
|
||||
|
||||
if err = convertToBaseLayer(ctx, root); err != nil {
|
||||
return hcserror.New(err, title+" - failed", "")
|
||||
}
|
||||
return nil
|
||||
}
|
7
vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go
generated
vendored
@ -45,6 +45,8 @@ func ExportLayer(ctx context.Context, path string, exportFolderPath string, pare
|
||||
type LayerReader interface {
|
||||
// Next advances to the next file and returns the name, size, and file info
|
||||
Next() (string, int64, *winio.FileBasicInfo, error)
|
||||
// LinkInfo returns the number of links and the file identifier for the current file.
|
||||
LinkInfo() (uint32, *winio.FileIDInfo, error)
|
||||
// Read reads data from the current file, in the format of a Win32 backup stream, and
|
||||
// returns the number of bytes read.
|
||||
Read(b []byte) (int, error)
|
||||
@ -67,6 +69,11 @@ func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string)
|
||||
trace.StringAttribute("path", path),
|
||||
trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", ")))
|
||||
|
||||
if len(parentLayerPaths) == 0 {
|
||||
// This is a base layer. It gets exported differently.
|
||||
return newBaseLayerReader(path, span), nil
|
||||
}
|
||||
|
||||
exportPath, err := os.MkdirTemp("", "hcs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
12
vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
generated
vendored
12
vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
generated
vendored
@ -294,6 +294,18 @@ func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.Fil
|
||||
return
|
||||
}
|
||||
|
||||
func (r *legacyLayerReader) LinkInfo() (uint32, *winio.FileIDInfo, error) {
|
||||
fileStandardInfo, err := winio.GetFileStandardInfo(r.currentFile)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
fileIDInfo, err := winio.GetFileID(r.currentFile)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
return fileStandardInfo.NumberOfLinks, fileIDInfo, nil
|
||||
}
|
||||
|
||||
func (r *legacyLayerReader) Read(b []byte) (int, error) {
|
||||
if r.backupReader == nil {
|
||||
if r.currentFile == nil {
|
||||
|
5
vendor/github.com/Microsoft/hcsshim/internal/winapi/ofreg.go
generated
vendored
Normal file
5
vendor/github.com/Microsoft/hcsshim/internal/winapi/ofreg.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
package winapi
|
||||
|
||||
//sys ORCreateHive(key *syscall.Handle) (regerrno error) = offreg.ORCreateHive
|
||||
//sys ORSaveHive(key syscall.Handle, file string, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) = offreg.ORSaveHive
|
||||
//sys ORCloseHive(key syscall.Handle) (regerrno error) = offreg.ORCloseHive
|
37
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
37
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
@ -47,6 +47,7 @@ var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modnetapi32 = windows.NewLazySystemDLL("netapi32.dll")
|
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
modoffreg = windows.NewLazySystemDLL("offreg.dll")
|
||||
|
||||
procLogonUserW = modadvapi32.NewProc("LogonUserW")
|
||||
procBfSetupFilter = modbindfltapi.NewProc("BfSetupFilter")
|
||||
@ -82,6 +83,9 @@ var (
|
||||
procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation")
|
||||
procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile")
|
||||
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
|
||||
procORCloseHive = modoffreg.NewProc("ORCloseHive")
|
||||
procORCreateHive = modoffreg.NewProc("ORCreateHive")
|
||||
procORSaveHive = modoffreg.NewProc("ORSaveHive")
|
||||
)
|
||||
|
||||
func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) {
|
||||
@ -376,3 +380,36 @@ func RtlNtStatusToDosError(status uint32) (winerr error) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ORCloseHive(key syscall.Handle) (regerrno error) {
|
||||
r0, _, _ := syscall.Syscall(procORCloseHive.Addr(), 1, uintptr(key), 0, 0)
|
||||
if r0 != 0 {
|
||||
regerrno = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ORCreateHive(key *syscall.Handle) (regerrno error) {
|
||||
r0, _, _ := syscall.Syscall(procORCreateHive.Addr(), 1, uintptr(unsafe.Pointer(key)), 0, 0)
|
||||
if r0 != 0 {
|
||||
regerrno = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ORSaveHive(key syscall.Handle, file string, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) {
|
||||
var _p0 *uint16
|
||||
_p0, regerrno = syscall.UTF16PtrFromString(file)
|
||||
if regerrno != nil {
|
||||
return
|
||||
}
|
||||
return _ORSaveHive(key, _p0, OsMajorVersion, OsMinorVersion)
|
||||
}
|
||||
|
||||
func _ORSaveHive(key syscall.Handle, file *uint16, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) {
|
||||
r0, _, _ := syscall.Syscall6(procORSaveHive.Addr(), 4, uintptr(key), uintptr(unsafe.Pointer(file)), uintptr(OsMajorVersion), uintptr(OsMinorVersion), 0, 0)
|
||||
if r0 != 0 {
|
||||
regerrno = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
3
vendor/github.com/Microsoft/hcsshim/layer.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/layer.go
generated
vendored
@ -70,6 +70,9 @@ func ProcessUtilityVMImage(path string) error {
|
||||
func UnprepareLayer(info DriverInfo, layerId string) error {
|
||||
return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId))
|
||||
}
|
||||
func ConvertToBaseLayer(path string) error {
|
||||
return wclayer.ConvertToBaseLayer(context.Background(), path)
|
||||
}
|
||||
|
||||
type DriverInfo struct {
|
||||
Flavour int
|
||||
|
13
vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
generated
vendored
13
vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
generated
vendored
@ -1,5 +1,3 @@
|
||||
//go:build windows
|
||||
|
||||
package osversion
|
||||
|
||||
import (
|
||||
@ -47,6 +45,15 @@ func Build() uint16 {
|
||||
return Get().Build
|
||||
}
|
||||
|
||||
func (osv OSVersion) ToString() string {
|
||||
// String returns the OSVersion formatted as a string. It implements the
|
||||
// [fmt.Stringer] interface.
|
||||
func (osv OSVersion) String() string {
|
||||
return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build)
|
||||
}
|
||||
|
||||
// ToString returns the OSVersion formatted as a string.
|
||||
//
|
||||
// Deprecated: use [OSVersion.String].
|
||||
func (osv OSVersion) ToString() string {
|
||||
return osv.String()
|
||||
}
|
||||
|
23
vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go
generated
vendored
23
vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go
generated
vendored
@ -51,6 +51,8 @@ func ExportLayerToTar(ctx context.Context, w io.Writer, path string, parentLayer
|
||||
}
|
||||
|
||||
func writeTarFromLayer(ctx context.Context, r wclayer.LayerReader, w io.Writer) error {
|
||||
linkRecords := make(map[[16]byte]string)
|
||||
|
||||
t := tar.NewWriter(w)
|
||||
for {
|
||||
select {
|
||||
@ -76,6 +78,27 @@ func writeTarFromLayer(ctx context.Context, r wclayer.LayerReader, w io.Writer)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
numberOfLinks, fileIDInfo, err := r.LinkInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if numberOfLinks > 1 {
|
||||
if linkName, ok := linkRecords[fileIDInfo.FileID]; ok {
|
||||
// We've seen this file before, by another name, so put a hardlink in the tar stream.
|
||||
hdr := backuptar.BasicInfoHeader(name, 0, fileInfo)
|
||||
hdr.Mode = 0644
|
||||
hdr.Typeflag = tar.TypeLink
|
||||
hdr.Linkname = linkName
|
||||
if err := t.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// All subsequent names for this file will be hard-linked to this name
|
||||
linkRecords[fileIDInfo.FileID] = filepath.ToSlash(name)
|
||||
}
|
||||
|
||||
err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
|
29
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
29
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@ -3,8 +3,7 @@
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
The package is written with optimized pure Go and also contains even faster
|
||||
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||
opts into using the Go code even on those architectures.
|
||||
|
||||
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||
|
||||
## Compatibility
|
||||
|
||||
@ -46,18 +48,19 @@ Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
| ---------- | --------- | --------- |
|
||||
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||
CPU using the following commands under Go 1.19.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# Small convenience script for running the tests with various combinations of
|
||||
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||
|
||||
go test ./...
|
||||
go test -tags purego ./...
|
||||
GOARCH=arm64 go test
|
||||
GOARCH=arm64 go test -tags purego
|
47
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
47
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@ -16,19 +16,11 @@ const (
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array of the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
@ -50,10 +42,10 @@ func New() *Digest {
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v1 = primes[0] + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.v4 = -primes[0]
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
copy(memleft, b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
c := copy(memleft, b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
b = b[c:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
|
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@ -1,215 +1,209 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
// Registers:
|
||||
#define h AX
|
||||
#define d AX
|
||||
#define p SI // pointer to advance through b
|
||||
#define n DX
|
||||
#define end BX // loop end
|
||||
#define v1 R8
|
||||
#define v2 R9
|
||||
#define v3 R10
|
||||
#define v4 R11
|
||||
#define x R12
|
||||
#define prime1 R13
|
||||
#define prime2 R14
|
||||
#define prime4 DI
|
||||
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
#define round(acc, x) \
|
||||
IMULQ prime2, x \
|
||||
ADDQ x, acc \
|
||||
ROLQ $31, acc \
|
||||
IMULQ prime1, acc
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
IMULQ prime2, x \
|
||||
ROLQ $31, x \
|
||||
IMULQ prime1, x
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and x.
|
||||
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
XORQ x, acc \
|
||||
IMULQ prime1, acc \
|
||||
ADDQ prime4, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||
// to process.
|
||||
#define blockLoop() \
|
||||
loop: \
|
||||
MOVQ +0(p), x \
|
||||
round(v1, x) \
|
||||
MOVQ +8(p), x \
|
||||
round(v2, x) \
|
||||
MOVQ +16(p), x \
|
||||
round(v3, x) \
|
||||
MOVQ +24(p), x \
|
||||
round(v4, x) \
|
||||
ADDQ $32, p \
|
||||
CMPQ p, end \
|
||||
JLE loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·primes+24(SB), prime4
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
MOVQ b_base+0(FP), p
|
||||
MOVQ b_len+8(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
SUBQ $32, end
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
CMPQ n, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
MOVQ prime1, v1
|
||||
ADDQ prime2, v1
|
||||
MOVQ prime2, v2
|
||||
XORQ v3, v3
|
||||
XORQ v4, v4
|
||||
SUBQ prime1, v4
|
||||
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
blockLoop()
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
MOVQ v1, h
|
||||
ROLQ $1, h
|
||||
MOVQ v2, x
|
||||
ROLQ $7, x
|
||||
ADDQ x, h
|
||||
MOVQ v3, x
|
||||
ROLQ $12, x
|
||||
ADDQ x, h
|
||||
MOVQ v4, x
|
||||
ROLQ $18, x
|
||||
ADDQ x, h
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
MOVQ ·primes+32(SB), h
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
ADDQ n, h
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
ADDQ $24, end
|
||||
CMPQ p, end
|
||||
JG try4
|
||||
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
loop8:
|
||||
MOVQ (p), x
|
||||
ADDQ $8, p
|
||||
round0(x)
|
||||
XORQ x, h
|
||||
ROLQ $27, h
|
||||
IMULQ prime1, h
|
||||
ADDQ prime4, h
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
CMPQ p, end
|
||||
JLE loop8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
try4:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JG try1
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
MOVL (p), x
|
||||
ADDQ $4, p
|
||||
IMULQ prime1, x
|
||||
XORQ x, h
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
ROLQ $23, h
|
||||
IMULQ prime2, h
|
||||
ADDQ ·primes+16(SB), h
|
||||
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
try1:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
loop1:
|
||||
MOVBQZX (p), x
|
||||
ADDQ $1, p
|
||||
IMULQ ·primes+32(SB), x
|
||||
XORQ x, h
|
||||
ROLQ $11, h
|
||||
IMULQ prime1, h
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
CMPQ p, end
|
||||
JL loop1
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
MOVQ h, x
|
||||
SHRQ $33, x
|
||||
XORQ x, h
|
||||
IMULQ prime2, h
|
||||
MOVQ h, x
|
||||
SHRQ $29, x
|
||||
XORQ x, h
|
||||
IMULQ ·primes+16(SB), h
|
||||
MOVQ h, x
|
||||
SHRQ $32, x
|
||||
XORQ x, h
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
MOVQ h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
MOVQ b_base+8(FP), p
|
||||
MOVQ b_len+16(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
SUBQ $32, end
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
MOVQ s+0(FP), d
|
||||
MOVQ 0(d), v1
|
||||
MOVQ 8(d), v2
|
||||
MOVQ 16(d), v3
|
||||
MOVQ 24(d), v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
blockLoop()
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
MOVQ v1, 0(d)
|
||||
MOVQ v2, 8(d)
|
||||
MOVQ v3, 16(d)
|
||||
MOVQ v4, 24(d)
|
||||
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
// The number of bytes written is p minus the old base pointer.
|
||||
SUBQ b_base+8(FP), p
|
||||
MOVQ p, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define digest R1
|
||||
#define h R2 // return value
|
||||
#define p R3 // input pointer
|
||||
#define n R4 // input length
|
||||
#define nblocks R5 // n / 32
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
#define prime4 R10
|
||||
#define prime5 R11
|
||||
#define v1 R12
|
||||
#define v2 R13
|
||||
#define v3 R14
|
||||
#define v4 R15
|
||||
#define x1 R20
|
||||
#define x2 R21
|
||||
#define x3 R22
|
||||
#define x4 R23
|
||||
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x
|
||||
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
EOR x, acc \
|
||||
MADD acc, prime4, prime1, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||
#define blockLoop() \
|
||||
LSR $5, n, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 16(p), (x1, x2) \
|
||||
LDP.P 16(p), (x3, x4) \
|
||||
round(v1, x1) \
|
||||
round(v2, x2) \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
SUB $1, nblocks \
|
||||
CBNZ nblocks, loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
LDP b_base+0(FP), (p, n)
|
||||
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
LDP ·primes+16(SB), (prime3, prime4)
|
||||
MOVD ·primes+32(SB), prime5
|
||||
|
||||
CMP $32, n
|
||||
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||
BLT afterLoop
|
||||
|
||||
ADD prime1, prime2, v1
|
||||
MOVD prime2, v2
|
||||
MOVD $0, v3
|
||||
NEG prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
ROR $64-1, v1, x1
|
||||
ROR $64-7, v2, x2
|
||||
ADD x1, x2
|
||||
ROR $64-12, v3, x3
|
||||
ROR $64-18, v4, x4
|
||||
ADD x3, x4
|
||||
ADD x2, x4, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
afterLoop:
|
||||
ADD n, h
|
||||
|
||||
TBZ $4, n, try8
|
||||
LDP.P 16(p), (x1, x2)
|
||||
|
||||
round0(x1)
|
||||
|
||||
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||
// rotated register) is worth a small but measurable speedup for small
|
||||
// inputs.
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
round0(x2)
|
||||
ROR $64-27, h
|
||||
EOR x2 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try8:
|
||||
TBZ $3, n, try4
|
||||
MOVD.P 8(p), x1
|
||||
|
||||
round0(x1)
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try4:
|
||||
TBZ $2, n, try2
|
||||
MOVWU.P 4(p), x2
|
||||
|
||||
MUL prime1, x2
|
||||
ROR $64-23, h
|
||||
EOR x2 @> 64-23, h, h
|
||||
MADD h, prime3, prime2, h
|
||||
|
||||
try2:
|
||||
TBZ $1, n, try1
|
||||
MOVHU.P 2(p), x3
|
||||
AND $255, x3, x1
|
||||
LSR $8, x3, x2
|
||||
|
||||
MUL prime5, x1
|
||||
ROR $64-11, h
|
||||
EOR x1 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
MUL prime5, x2
|
||||
ROR $64-11, h
|
||||
EOR x2 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
try1:
|
||||
TBZ $0, n, finalize
|
||||
MOVBU (p), x4
|
||||
|
||||
MUL prime5, x4
|
||||
ROR $64-11, h
|
||||
EOR x4 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
finalize:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
MUL prime3, h
|
||||
EOR h >> 32, h
|
||||
|
||||
MOVD h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, n)
|
||||
|
||||
blockLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, n
|
||||
MOVD n, ret+32(FP)
|
||||
RET
|
@ -1,3 +1,5 @@
|
||||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@ -1,4 +1,5 @@
|
||||
// +build !amd64 appengine !gc purego
|
||||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||
// +build !amd64,!arm64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v1 := primes[0] + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
v4 := -primes[0]
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
|
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
3
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
3
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
@ -11,7 +12,7 @@ import (
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
|
40
vendor/github.com/containerd/cgroups/v3/cgroup1/utils.go
generated
vendored
40
vendor/github.com/containerd/cgroups/v3/cgroup1/utils.go
generated
vendored
@ -19,7 +19,6 @@ package cgroup1
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@ -180,7 +179,7 @@ func parseKV(raw string) (string, uint64, error) {
|
||||
// etc.
|
||||
//
|
||||
// The resulting map does not have an element for cgroup v2 unified hierarchy.
|
||||
// Use ParseCgroupFileUnified to get the unified path.
|
||||
// Use [cgroups.ParseCgroupFileUnified] to get the unified path.
|
||||
func ParseCgroupFile(path string) (map[string]string, error) {
|
||||
x, _, err := ParseCgroupFileUnified(path)
|
||||
return x, err
|
||||
@ -188,41 +187,10 @@ func ParseCgroupFile(path string) (map[string]string, error) {
|
||||
|
||||
// ParseCgroupFileUnified returns legacy subsystem paths as the first value,
|
||||
// and returns the unified path as the second value.
|
||||
//
|
||||
// Deprecated: use [cgroups.ParseCgroupFileUnified] instead .
|
||||
func ParseCgroupFileUnified(path string) (map[string]string, string, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer f.Close()
|
||||
return parseCgroupFromReaderUnified(f)
|
||||
}
|
||||
|
||||
func parseCgroupFromReaderUnified(r io.Reader) (map[string]string, string, error) {
|
||||
var (
|
||||
cgroups = make(map[string]string)
|
||||
unified = ""
|
||||
s = bufio.NewScanner(r)
|
||||
)
|
||||
for s.Scan() {
|
||||
var (
|
||||
text = s.Text()
|
||||
parts = strings.SplitN(text, ":", 3)
|
||||
)
|
||||
if len(parts) < 3 {
|
||||
return nil, unified, fmt.Errorf("invalid cgroup entry: %q", text)
|
||||
}
|
||||
for _, subs := range strings.Split(parts[1], ",") {
|
||||
if subs == "" {
|
||||
unified = parts[2]
|
||||
} else {
|
||||
cgroups[subs] = parts[2]
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, unified, err
|
||||
}
|
||||
return cgroups, unified, nil
|
||||
return cgroups.ParseCgroupFileUnified(path)
|
||||
}
|
||||
|
||||
func getCgroupDestination(subsystem string) (string, error) {
|
||||
|
43
vendor/github.com/containerd/cgroups/v3/utils.go
generated
vendored
43
vendor/github.com/containerd/cgroups/v3/utils.go
generated
vendored
@ -19,8 +19,10 @@ package cgroups
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
@ -105,3 +107,44 @@ func RunningInUserNS() bool {
|
||||
})
|
||||
return inUserNS
|
||||
}
|
||||
|
||||
// ParseCgroupFileUnified returns legacy subsystem paths as the first value,
|
||||
// and returns the unified path as the second value.
|
||||
func ParseCgroupFileUnified(path string) (map[string]string, string, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer f.Close()
|
||||
return ParseCgroupFromReaderUnified(f)
|
||||
}
|
||||
|
||||
// ParseCgroupFromReaderUnified returns legacy subsystem paths as the first value,
|
||||
// and returns the unified path as the second value.
|
||||
func ParseCgroupFromReaderUnified(r io.Reader) (map[string]string, string, error) {
|
||||
var (
|
||||
cgroups = make(map[string]string)
|
||||
unified = ""
|
||||
s = bufio.NewScanner(r)
|
||||
)
|
||||
for s.Scan() {
|
||||
var (
|
||||
text = s.Text()
|
||||
parts = strings.SplitN(text, ":", 3)
|
||||
)
|
||||
if len(parts) < 3 {
|
||||
return nil, unified, fmt.Errorf("invalid cgroup entry: %q", text)
|
||||
}
|
||||
for _, subs := range strings.Split(parts[1], ",") {
|
||||
if subs == "" {
|
||||
unified = parts[2]
|
||||
} else {
|
||||
cgroups[subs] = parts[2]
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, unified, err
|
||||
}
|
||||
return cgroups, unified, nil
|
||||
}
|
||||
|
30
vendor/github.com/containerd/fifo/.golangci.yml
generated
vendored
30
vendor/github.com/containerd/fifo/.golangci.yml
generated
vendored
@ -1,19 +1,35 @@
|
||||
linters:
|
||||
enable:
|
||||
- structcheck
|
||||
- varcheck
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- exportloopref # Checks for pointers to enclosing loop variables
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- gosec
|
||||
- ineffassign
|
||||
- vet
|
||||
- unused
|
||||
- misspell
|
||||
- nolintlint
|
||||
- revive
|
||||
- staticcheck
|
||||
- tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17
|
||||
- unconvert
|
||||
- unused
|
||||
- vet
|
||||
- dupword # Checks for duplicate words in the source code
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
linters-settings:
|
||||
gosec:
|
||||
# The following issues surfaced when `gosec` linter
|
||||
# was enabled. They are temporarily excluded to unblock
|
||||
# the existing workflow, but still to be addressed by
|
||||
# future works.
|
||||
excludes:
|
||||
- G204
|
||||
- G305
|
||||
- G306
|
||||
- G402
|
||||
- G404
|
||||
|
||||
run:
|
||||
timeout: 3m
|
||||
skip-dirs:
|
||||
|
24
vendor/github.com/containerd/fifo/fifo.go
generated
vendored
24
vendor/github.com/containerd/fifo/fifo.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build !windows
|
||||
//go:build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
@ -20,13 +20,13 @@ package fifo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@ -48,12 +48,12 @@ var leakCheckWg *sync.WaitGroup
|
||||
func OpenFifoDup2(ctx context.Context, fn string, flag int, perm os.FileMode, fd int) (io.ReadWriteCloser, error) {
|
||||
f, err := openFifo(ctx, fn, flag, perm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fifo error")
|
||||
return nil, fmt.Errorf("fifo error: %w", err)
|
||||
}
|
||||
|
||||
if err := unix.Dup2(int(f.file.Fd()), fd); err != nil {
|
||||
_ = f.Close()
|
||||
return nil, errors.Wrap(err, "dup2 error")
|
||||
return nil, fmt.Errorf("dup2 error: %w", err)
|
||||
}
|
||||
|
||||
return f, nil
|
||||
@ -70,14 +70,20 @@ func OpenFifoDup2(ctx context.Context, fn string, flag int, perm os.FileMode, fd
|
||||
// fifo isn't open. read/write will be connected after the actual fifo is
|
||||
// open or after fifo is closed.
|
||||
func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) {
|
||||
return openFifo(ctx, fn, flag, perm)
|
||||
fifo, err := openFifo(ctx, fn, flag, perm)
|
||||
if fifo == nil {
|
||||
// Do not return a non-nil ReadWriteCloser((*fifo)(nil)) value
|
||||
// as that can confuse callers.
|
||||
return nil, err
|
||||
}
|
||||
return fifo, err
|
||||
}
|
||||
|
||||
func openFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (*fifo, error) {
|
||||
if _, err := os.Stat(fn); err != nil {
|
||||
if os.IsNotExist(err) && flag&syscall.O_CREAT != 0 {
|
||||
if err := syscall.Mkfifo(fn, uint32(perm&os.ModePerm)); err != nil && !os.IsExist(err) {
|
||||
return nil, errors.Wrapf(err, "error creating fifo %v", fn)
|
||||
return nil, fmt.Errorf("error creating fifo %v: %w", fn, err)
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
@ -138,7 +144,7 @@ func openFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (*fifo
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
default:
|
||||
err = errors.Errorf("fifo %v was closed before opening", h.Name())
|
||||
err = fmt.Errorf("fifo %v was closed before opening", h.Name())
|
||||
}
|
||||
if file != nil {
|
||||
file.Close()
|
||||
@ -206,6 +212,10 @@ func (f *fifo) Write(b []byte) (int, error) {
|
||||
// before open(2) has returned and fifo was never opened.
|
||||
func (f *fifo) Close() (retErr error) {
|
||||
for {
|
||||
if f == nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-f.closed:
|
||||
f.handle.Close()
|
||||
|
16
vendor/github.com/containerd/fifo/handle_linux.go
generated
vendored
16
vendor/github.com/containerd/fifo/handle_linux.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build linux
|
||||
//go:build linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
@ -23,11 +23,9 @@ import (
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
//nolint:golint
|
||||
//nolint:revive
|
||||
const O_PATH = 010000000
|
||||
|
||||
type handle struct {
|
||||
@ -42,7 +40,7 @@ type handle struct {
|
||||
func getHandle(fn string) (*handle, error) {
|
||||
f, err := os.OpenFile(fn, O_PATH, 0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open %v with O_PATH", fn)
|
||||
return nil, fmt.Errorf("failed to open %v with O_PATH: %w", fn, err)
|
||||
}
|
||||
|
||||
var (
|
||||
@ -51,7 +49,7 @@ func getHandle(fn string) (*handle, error) {
|
||||
)
|
||||
if err := syscall.Fstat(int(fd), &stat); err != nil {
|
||||
f.Close()
|
||||
return nil, errors.Wrapf(err, "failed to stat handle %v", fd)
|
||||
return nil, fmt.Errorf("failed to stat handle %v: %w", fd, err)
|
||||
}
|
||||
|
||||
h := &handle{
|
||||
@ -66,7 +64,7 @@ func getHandle(fn string) (*handle, error) {
|
||||
// check /proc just in case
|
||||
if _, err := os.Stat(h.procPath()); err != nil {
|
||||
f.Close()
|
||||
return nil, errors.Wrapf(err, "couldn't stat %v", h.procPath())
|
||||
return nil, fmt.Errorf("couldn't stat %v: %w", h.procPath(), err)
|
||||
}
|
||||
|
||||
return h, nil
|
||||
@ -83,11 +81,11 @@ func (h *handle) Name() string {
|
||||
func (h *handle) Path() (string, error) {
|
||||
var stat syscall.Stat_t
|
||||
if err := syscall.Stat(h.procPath(), &stat); err != nil {
|
||||
return "", errors.Wrapf(err, "path %v could not be statted", h.procPath())
|
||||
return "", fmt.Errorf("path %v could not be statted: %w", h.procPath(), err)
|
||||
}
|
||||
//nolint:unconvert
|
||||
if uint64(stat.Dev) != h.dev || stat.Ino != h.ino {
|
||||
return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino)
|
||||
return "", fmt.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino)
|
||||
}
|
||||
return h.procPath(), nil
|
||||
}
|
||||
|
17
vendor/github.com/containerd/fifo/handle_nolinux.go
generated
vendored
17
vendor/github.com/containerd/fifo/handle_nolinux.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build !linux,!windows
|
||||
//go:build !linux && !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
@ -19,9 +19,8 @@
|
||||
package fifo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type handle struct {
|
||||
@ -33,13 +32,13 @@ type handle struct {
|
||||
func getHandle(fn string) (*handle, error) {
|
||||
var stat syscall.Stat_t
|
||||
if err := syscall.Stat(fn, &stat); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to stat %v", fn)
|
||||
return nil, fmt.Errorf("failed to stat %v: %w", fn, err)
|
||||
}
|
||||
|
||||
h := &handle{
|
||||
fn: fn,
|
||||
dev: uint64(stat.Dev), //nolint: unconvert
|
||||
ino: uint64(stat.Ino), //nolint: unconvert
|
||||
dev: uint64(stat.Dev), //nolint:unconvert,nolintlint
|
||||
ino: uint64(stat.Ino), //nolint:unconvert,nolintlint
|
||||
}
|
||||
|
||||
return h, nil
|
||||
@ -48,10 +47,10 @@ func getHandle(fn string) (*handle, error) {
|
||||
func (h *handle) Path() (string, error) {
|
||||
var stat syscall.Stat_t
|
||||
if err := syscall.Stat(h.fn, &stat); err != nil {
|
||||
return "", errors.Wrapf(err, "path %v could not be statted", h.fn)
|
||||
return "", fmt.Errorf("path %v could not be statted: %w", h.fn, err)
|
||||
}
|
||||
if uint64(stat.Dev) != h.dev || uint64(stat.Ino) != h.ino { //nolint: unconvert
|
||||
return "", errors.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn)
|
||||
if uint64(stat.Dev) != h.dev || uint64(stat.Ino) != h.ino { //nolint:unconvert,nolintlint
|
||||
return "", fmt.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn)
|
||||
}
|
||||
return h.fn, nil
|
||||
}
|
||||
|
2
vendor/github.com/containerd/fifo/raw.go
generated
vendored
2
vendor/github.com/containerd/fifo/raw.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build !windows
|
||||
//go:build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
15
vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
15
vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
@ -1,10 +1,21 @@
|
||||
# Change history of go-restful
|
||||
|
||||
## [v3.9.0] - 20221-07-21
|
||||
## [v3.10.1] - 2022-11-19
|
||||
|
||||
- fix broken 3.10.0 by using path package for joining paths
|
||||
|
||||
## [v3.10.0] - 2022-10-11 - BROKEN
|
||||
|
||||
- changed tokenizer to match std route match behavior; do not trimright the path (#511)
|
||||
- Add MIME_ZIP (#512)
|
||||
- Add MIME_ZIP and HEADER_ContentDisposition (#513)
|
||||
- Changed how to get query parameter issue #510
|
||||
|
||||
## [v3.9.0] - 2022-07-21
|
||||
|
||||
- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci)
|
||||
|
||||
## [v3.8.0] - 20221-06-06
|
||||
## [v3.8.0] - 2022-06-06
|
||||
|
||||
- use exact matching of allowed domain entries, issue #489 (#493)
|
||||
- this changes fixes [security] Authorization Bypass Through User-Controlled Key
|
||||
|
2
vendor/github.com/emicklei/go-restful/v3/constants.go
generated
vendored
2
vendor/github.com/emicklei/go-restful/v3/constants.go
generated
vendored
@ -7,12 +7,14 @@ package restful
|
||||
const (
|
||||
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
|
||||
|
||||
HEADER_Allow = "Allow"
|
||||
HEADER_Accept = "Accept"
|
||||
HEADER_Origin = "Origin"
|
||||
HEADER_ContentType = "Content-Type"
|
||||
HEADER_ContentDisposition = "Content-Disposition"
|
||||
HEADER_LastModified = "Last-Modified"
|
||||
HEADER_AcceptEncoding = "Accept-Encoding"
|
||||
HEADER_ContentEncoding = "Content-Encoding"
|
||||
|
3
vendor/github.com/emicklei/go-restful/v3/request.go
generated
vendored
3
vendor/github.com/emicklei/go-restful/v3/request.go
generated
vendored
@ -31,6 +31,7 @@ func NewRequest(httpRequest *http.Request) *Request {
|
||||
// a "Unable to unmarshal content of type:" response is returned.
|
||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||
// Example:
|
||||
//
|
||||
// restful.DefaultRequestContentType(restful.MIME_JSON)
|
||||
func DefaultRequestContentType(mime string) {
|
||||
defaultRequestContentType = mime
|
||||
@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string {
|
||||
|
||||
// QueryParameter returns the (first) Query parameter value by its name
|
||||
func (r *Request) QueryParameter(name string) string {
|
||||
return r.Request.FormValue(name)
|
||||
return r.Request.URL.Query().Get(name)
|
||||
}
|
||||
|
||||
// QueryParameters returns the all the query parameters values by name
|
||||
|
3
vendor/github.com/emicklei/go-restful/v3/response.go
generated
vendored
3
vendor/github.com/emicklei/go-restful/v3/response.go
generated
vendored
@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
|
||||
if DefaultResponseMimeType == MIME_XML {
|
||||
return entityAccessRegistry.accessorAt(MIME_XML)
|
||||
}
|
||||
if DefaultResponseMimeType == MIME_ZIP {
|
||||
return entityAccessRegistry.accessorAt(MIME_ZIP)
|
||||
}
|
||||
// Fallback to whatever the route says it can produce.
|
||||
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
for _, each := range r.routeProduces {
|
||||
|
4
vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
4
vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
@ -164,7 +164,7 @@ func tokenizePath(path string) []string {
|
||||
if "/" == path {
|
||||
return nil
|
||||
}
|
||||
return strings.Split(strings.Trim(path, "/"), "/")
|
||||
return strings.Split(strings.TrimLeft(path, "/"), "/")
|
||||
}
|
||||
|
||||
// for debugging
|
||||
@ -176,3 +176,5 @@ func (r *Route) String() string {
|
||||
func (r *Route) EnableContentEncoding(enabled bool) {
|
||||
r.contentEncodingEnabled = &enabled
|
||||
}
|
||||
|
||||
var TrimRightSlashEnabled = false
|
||||
|
4
vendor/github.com/emicklei/go-restful/v3/route_builder.go
generated
vendored
4
vendor/github.com/emicklei/go-restful/v3/route_builder.go
generated
vendored
@ -7,6 +7,7 @@ package restful
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
@ -46,6 +47,7 @@ type RouteBuilder struct {
|
||||
// Do evaluates each argument with the RouteBuilder itself.
|
||||
// This allows you to follow DRY principles without breaking the fluent programming style.
|
||||
// Example:
|
||||
//
|
||||
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
|
||||
//
|
||||
// func Returns500(b *RouteBuilder) {
|
||||
@ -352,7 +354,7 @@ func (b *RouteBuilder) Build() Route {
|
||||
}
|
||||
|
||||
func concatPath(path1, path2 string) string {
|
||||
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
|
||||
return path.Join(path1, path2)
|
||||
}
|
||||
|
||||
var anonymousFuncCount int32
|
||||
|
32
vendor/github.com/imdario/mergo/README.md
generated
vendored
32
vendor/github.com/imdario/mergo/README.md
generated
vendored
@ -8,8 +8,7 @@
|
||||
[![Coverage Status][9]][10]
|
||||
[![Sourcegraph][11]][12]
|
||||
[![FOSSA Status][13]][14]
|
||||
|
||||
[![GoCenter Kudos][15]][16]
|
||||
[![Become my sponsor][15]][16]
|
||||
|
||||
[1]: https://travis-ci.org/imdario/mergo.png
|
||||
[2]: https://travis-ci.org/imdario/mergo
|
||||
@ -25,8 +24,8 @@
|
||||
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
|
||||
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
|
||||
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
|
||||
[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
|
||||
[16]: https://search.gocenter.io/github.com/imdario/mergo
|
||||
[15]: https://img.shields.io/github/sponsors/imdario
|
||||
[16]: https://github.com/sponsors/imdario
|
||||
|
||||
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
|
||||
|
||||
@ -36,11 +35,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
|
||||
|
||||
## Status
|
||||
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
|
||||
### Important note
|
||||
|
||||
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
|
||||
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
|
||||
|
||||
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
|
||||
|
||||
@ -51,12 +50,12 @@ If you were using Mergo before April 6th, 2015, please check your project works
|
||||
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
|
||||
|
||||
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
|
||||
[](https://beerpay.io/imdario/mergo)
|
||||
[](https://beerpay.io/imdario/mergo)
|
||||
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
|
||||
<a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
|
||||
|
||||
### Mergo in the wild
|
||||
|
||||
- [cli/cli](https://github.com/cli/cli)
|
||||
- [moby/moby](https://github.com/moby/moby)
|
||||
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
- [vmware/dispatch](https://github.com/vmware/dispatch)
|
||||
@ -98,6 +97,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
|
||||
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
|
||||
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
|
||||
- [containerssh/containerssh](https://github.com/containerssh/containerssh)
|
||||
- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
|
||||
- [tjpnz/structbot](https://github.com/tjpnz/structbot)
|
||||
|
||||
## Install
|
||||
|
||||
@ -168,7 +169,7 @@ func main() {
|
||||
|
||||
Note: if test are failing due missing package, please execute:
|
||||
|
||||
go get gopkg.in/yaml.v2
|
||||
go get gopkg.in/yaml.v3
|
||||
|
||||
### Transformers
|
||||
|
||||
@ -218,7 +219,6 @@ func main() {
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Contact me
|
||||
|
||||
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
|
||||
@ -227,18 +227,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don
|
||||
|
||||
Written by [Dario Castañé](http://dario.im).
|
||||
|
||||
## Top Contributors
|
||||
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
|
||||
|
2
vendor/github.com/imdario/mergo/merge.go
generated
vendored
2
vendor/github.com/imdario/mergo/merge.go
generated
vendored
@ -79,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
visited[h] = &visit{addr, typ, seen}
|
||||
}
|
||||
|
||||
if config.Transformers != nil && !isEmptyValue(dst) {
|
||||
if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
|
||||
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
|
||||
err = fn(dst, src)
|
||||
return
|
||||
|
4
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
4
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
@ -17,7 +17,7 @@ import (
|
||||
var (
|
||||
ErrNilArguments = errors.New("src and dst must not be nil")
|
||||
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
|
||||
ErrNotSupported = errors.New("only structs and maps are supported")
|
||||
ErrNotSupported = errors.New("only structs, maps, and slices are supported")
|
||||
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
|
||||
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
|
||||
ErrNonPointerAgument = errors.New("dst must be a pointer")
|
||||
@ -65,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
|
||||
return
|
||||
}
|
||||
vDst = reflect.ValueOf(dst).Elem()
|
||||
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
|
||||
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
|
||||
err = ErrNotSupported
|
||||
return
|
||||
}
|
||||
|
2
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
2
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
@ -3,7 +3,7 @@
|
||||
before:
|
||||
hooks:
|
||||
- ./gen.sh
|
||||
- go install mvdan.cc/garble@latest
|
||||
- go install mvdan.cc/garble@v0.9.3
|
||||
|
||||
builds:
|
||||
-
|
||||
|
30
vendor/github.com/klauspost/compress/README.md
generated
vendored
30
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -9,7 +9,6 @@ This package provides various compression algorithms.
|
||||
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
|
||||
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
|
||||
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
|
||||
* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
|
||||
|
||||
[](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
|
||||
[](https://github.com/klauspost/compress/actions/workflows/go.yml)
|
||||
@ -17,6 +16,35 @@ This package provides various compression algorithms.
|
||||
|
||||
# changelog
|
||||
|
||||
* Jan 21st, 2023 (v1.15.15)
|
||||
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
|
||||
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
|
||||
|
||||
* Jan 3rd, 2023 (v1.15.14)
|
||||
|
||||
* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718
|
||||
* zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720
|
||||
* export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722
|
||||
* s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723
|
||||
|
||||
* Dec 11, 2022 (v1.15.13)
|
||||
* zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691
|
||||
* zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708
|
||||
|
||||
* Oct 26, 2022 (v1.15.12)
|
||||
|
||||
* zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680
|
||||
* gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683
|
||||
|
||||
* Sept 26, 2022 (v1.15.11)
|
||||
|
||||
* flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678
|
||||
* zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677
|
||||
* zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668
|
||||
* zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667
|
||||
|
||||
* Sept 16, 2022 (v1.15.10)
|
||||
|
||||
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
|
||||
|
31
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
31
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error {
|
||||
c1.encodeZero(tt[src[ip-2]])
|
||||
ip -= 2
|
||||
}
|
||||
src = src[:ip]
|
||||
|
||||
// Main compression loop.
|
||||
switch {
|
||||
case !s.zeroBits && s.actualTableLog <= 8:
|
||||
// We can encode 4 symbols without requiring a flush.
|
||||
// We do not need to check if any output is 0 bits.
|
||||
for ip >= 4 {
|
||||
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||
s.bw.flush32()
|
||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
||||
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||
c2.encode(tt[v0])
|
||||
c1.encode(tt[v1])
|
||||
c2.encode(tt[v2])
|
||||
c1.encode(tt[v3])
|
||||
ip -= 4
|
||||
}
|
||||
case !s.zeroBits:
|
||||
// We do not need to check if any output is 0 bits.
|
||||
for ip >= 4 {
|
||||
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||
s.bw.flush32()
|
||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
||||
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||
c2.encode(tt[v0])
|
||||
c1.encode(tt[v1])
|
||||
s.bw.flush32()
|
||||
c2.encode(tt[v2])
|
||||
c1.encode(tt[v3])
|
||||
ip -= 4
|
||||
}
|
||||
case s.actualTableLog <= 8:
|
||||
// We can encode 4 symbols without requiring a flush
|
||||
for ip >= 4 {
|
||||
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||
s.bw.flush32()
|
||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
||||
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||
c2.encodeZero(tt[v0])
|
||||
c1.encodeZero(tt[v1])
|
||||
c2.encodeZero(tt[v2])
|
||||
c1.encodeZero(tt[v3])
|
||||
ip -= 4
|
||||
}
|
||||
default:
|
||||
for ip >= 4 {
|
||||
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||
s.bw.flush32()
|
||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
||||
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||
c2.encodeZero(tt[v0])
|
||||
c1.encodeZero(tt[v1])
|
||||
s.bw.flush32()
|
||||
c2.encodeZero(tt[v2])
|
||||
c1.encodeZero(tt[v3])
|
||||
ip -= 4
|
||||
}
|
||||
}
|
||||
|
||||
@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) {
|
||||
for _, v := range in {
|
||||
s.count[v]++
|
||||
}
|
||||
m := uint32(0)
|
||||
m, symlen := uint32(0), s.symbolLen
|
||||
for i, v := range s.count[:] {
|
||||
if v == 0 {
|
||||
continue
|
||||
}
|
||||
if v > m {
|
||||
m = v
|
||||
}
|
||||
if v > 0 {
|
||||
s.symbolLen = uint16(i) + 1
|
||||
}
|
||||
symlen = uint16(i) + 1
|
||||
}
|
||||
s.symbolLen = symlen
|
||||
return int(m)
|
||||
}
|
||||
|
||||
|
8
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
8
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() {
|
||||
|
||||
// 2 bounds checks.
|
||||
v := b.in[b.off-4 : b.off]
|
||||
v = v[:4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value |= uint64(low) << (b.bitsRead - 32)
|
||||
b.bitsRead -= 32
|
||||
@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() {
|
||||
return
|
||||
}
|
||||
if b.off > 4 {
|
||||
v := b.in[b.off-4:]
|
||||
v = v[:4]
|
||||
v := b.in[b.off-4 : b.off]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value |= uint64(low) << (b.bitsRead - 32)
|
||||
b.bitsRead -= 32
|
||||
@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() {
|
||||
|
||||
// 2 bounds checks.
|
||||
v := b.in[b.off-4 : b.off]
|
||||
v = v[:4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
||||
b.bitsRead -= 32
|
||||
@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() {
|
||||
return
|
||||
}
|
||||
if b.off > 4 {
|
||||
v := b.in[b.off-4:]
|
||||
v = v[:4]
|
||||
v := b.in[b.off-4 : b.off]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
||||
b.bitsRead -= 32
|
||||
|
104
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
104
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
@ -365,30 +365,30 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
|
||||
m := uint32(0)
|
||||
if len(s.prevTable) > 0 {
|
||||
for i, v := range s.count[:] {
|
||||
if v == 0 {
|
||||
continue
|
||||
}
|
||||
if v > m {
|
||||
m = v
|
||||
}
|
||||
if v > 0 {
|
||||
s.symbolLen = uint16(i) + 1
|
||||
if i >= len(s.prevTable) {
|
||||
reuse = false
|
||||
} else {
|
||||
if s.prevTable[i].nBits == 0 {
|
||||
} else if s.prevTable[i].nBits == 0 {
|
||||
reuse = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return int(m), reuse
|
||||
}
|
||||
for i, v := range s.count[:] {
|
||||
if v == 0 {
|
||||
continue
|
||||
}
|
||||
if v > m {
|
||||
m = v
|
||||
}
|
||||
if v > 0 {
|
||||
s.symbolLen = uint16(i) + 1
|
||||
}
|
||||
}
|
||||
return int(m), false
|
||||
}
|
||||
|
||||
@ -484,34 +484,35 @@ func (s *Scratch) buildCTable() error {
|
||||
// Different from reference implementation.
|
||||
huffNode0 := s.nodes[0 : huffNodesLen+1]
|
||||
|
||||
for huffNode[nonNullRank].count == 0 {
|
||||
for huffNode[nonNullRank].count() == 0 {
|
||||
nonNullRank--
|
||||
}
|
||||
|
||||
lowS := int16(nonNullRank)
|
||||
nodeRoot := nodeNb + lowS - 1
|
||||
lowN := nodeNb
|
||||
huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
|
||||
huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
|
||||
huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count())
|
||||
huffNode[lowS].setParent(nodeNb)
|
||||
huffNode[lowS-1].setParent(nodeNb)
|
||||
nodeNb++
|
||||
lowS -= 2
|
||||
for n := nodeNb; n <= nodeRoot; n++ {
|
||||
huffNode[n].count = 1 << 30
|
||||
huffNode[n].setCount(1 << 30)
|
||||
}
|
||||
// fake entry, strong barrier
|
||||
huffNode0[0].count = 1 << 31
|
||||
huffNode0[0].setCount(1 << 31)
|
||||
|
||||
// create parents
|
||||
for nodeNb <= nodeRoot {
|
||||
var n1, n2 int16
|
||||
if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
|
||||
if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
|
||||
n1 = lowS
|
||||
lowS--
|
||||
} else {
|
||||
n1 = lowN
|
||||
lowN++
|
||||
}
|
||||
if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
|
||||
if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
|
||||
n2 = lowS
|
||||
lowS--
|
||||
} else {
|
||||
@ -519,18 +520,19 @@ func (s *Scratch) buildCTable() error {
|
||||
lowN++
|
||||
}
|
||||
|
||||
huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
|
||||
huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
|
||||
huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count())
|
||||
huffNode0[n1+1].setParent(nodeNb)
|
||||
huffNode0[n2+1].setParent(nodeNb)
|
||||
nodeNb++
|
||||
}
|
||||
|
||||
// distribute weights (unlimited tree height)
|
||||
huffNode[nodeRoot].nbBits = 0
|
||||
huffNode[nodeRoot].setNbBits(0)
|
||||
for n := nodeRoot - 1; n >= startNode; n-- {
|
||||
huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
|
||||
huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
|
||||
}
|
||||
for n := uint16(0); n <= nonNullRank; n++ {
|
||||
huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
|
||||
huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
|
||||
}
|
||||
s.actualTableLog = s.setMaxHeight(int(nonNullRank))
|
||||
maxNbBits := s.actualTableLog
|
||||
@ -542,7 +544,7 @@ func (s *Scratch) buildCTable() error {
|
||||
var nbPerRank [tableLogMax + 1]uint16
|
||||
var valPerRank [16]uint16
|
||||
for _, v := range huffNode[:nonNullRank+1] {
|
||||
nbPerRank[v.nbBits]++
|
||||
nbPerRank[v.nbBits()]++
|
||||
}
|
||||
// determine stating value per rank
|
||||
{
|
||||
@ -557,7 +559,7 @@ func (s *Scratch) buildCTable() error {
|
||||
|
||||
// push nbBits per symbol, symbol order
|
||||
for _, v := range huffNode[:nonNullRank+1] {
|
||||
s.cTable[v.symbol].nBits = v.nbBits
|
||||
s.cTable[v.symbol()].nBits = v.nbBits()
|
||||
}
|
||||
|
||||
// assign value within rank, symbol order
|
||||
@ -603,12 +605,12 @@ func (s *Scratch) huffSort() {
|
||||
pos := rank[r].current
|
||||
rank[r].current++
|
||||
prev := nodes[(pos-1)&huffNodesMask]
|
||||
for pos > rank[r].base && c > prev.count {
|
||||
for pos > rank[r].base && c > prev.count() {
|
||||
nodes[pos&huffNodesMask] = prev
|
||||
pos--
|
||||
prev = nodes[(pos-1)&huffNodesMask]
|
||||
}
|
||||
nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
|
||||
nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n))
|
||||
}
|
||||
}
|
||||
|
||||
@ -617,7 +619,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
||||
huffNode := s.nodes[1 : huffNodesLen+1]
|
||||
//huffNode = huffNode[: huffNodesLen]
|
||||
|
||||
largestBits := huffNode[lastNonNull].nbBits
|
||||
largestBits := huffNode[lastNonNull].nbBits()
|
||||
|
||||
// early exit : no elt > maxNbBits
|
||||
if largestBits <= maxNbBits {
|
||||
@ -627,14 +629,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
||||
baseCost := int(1) << (largestBits - maxNbBits)
|
||||
n := uint32(lastNonNull)
|
||||
|
||||
for huffNode[n].nbBits > maxNbBits {
|
||||
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
|
||||
huffNode[n].nbBits = maxNbBits
|
||||
for huffNode[n].nbBits() > maxNbBits {
|
||||
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits()))
|
||||
huffNode[n].setNbBits(maxNbBits)
|
||||
n--
|
||||
}
|
||||
// n stops at huffNode[n].nbBits <= maxNbBits
|
||||
|
||||
for huffNode[n].nbBits == maxNbBits {
|
||||
for huffNode[n].nbBits() == maxNbBits {
|
||||
n--
|
||||
}
|
||||
// n end at index of smallest symbol using < maxNbBits
|
||||
@ -655,10 +657,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
||||
{
|
||||
currentNbBits := maxNbBits
|
||||
for pos := int(n); pos >= 0; pos-- {
|
||||
if huffNode[pos].nbBits >= currentNbBits {
|
||||
if huffNode[pos].nbBits() >= currentNbBits {
|
||||
continue
|
||||
}
|
||||
currentNbBits = huffNode[pos].nbBits // < maxNbBits
|
||||
currentNbBits = huffNode[pos].nbBits() // < maxNbBits
|
||||
rankLast[maxNbBits-currentNbBits] = uint32(pos)
|
||||
}
|
||||
}
|
||||
@ -675,8 +677,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
||||
if lowPos == noSymbol {
|
||||
break
|
||||
}
|
||||
highTotal := huffNode[highPos].count
|
||||
lowTotal := 2 * huffNode[lowPos].count
|
||||
highTotal := huffNode[highPos].count()
|
||||
lowTotal := 2 * huffNode[lowPos].count()
|
||||
if highTotal <= lowTotal {
|
||||
break
|
||||
}
|
||||
@ -692,13 +694,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
||||
// this rank is no longer empty
|
||||
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
|
||||
}
|
||||
huffNode[rankLast[nBitsToDecrease]].nbBits++
|
||||
huffNode[rankLast[nBitsToDecrease]].setNbBits(1 +
|
||||
huffNode[rankLast[nBitsToDecrease]].nbBits())
|
||||
if rankLast[nBitsToDecrease] == 0 {
|
||||
/* special case, reached largest symbol */
|
||||
rankLast[nBitsToDecrease] = noSymbol
|
||||
} else {
|
||||
rankLast[nBitsToDecrease]--
|
||||
if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
|
||||
if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease {
|
||||
rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
|
||||
}
|
||||
}
|
||||
@ -706,15 +709,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
||||
|
||||
for totalCost < 0 { /* Sometimes, cost correction overshoot */
|
||||
if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
|
||||
for huffNode[n].nbBits == maxNbBits {
|
||||
for huffNode[n].nbBits() == maxNbBits {
|
||||
n--
|
||||
}
|
||||
huffNode[n+1].nbBits--
|
||||
huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1)
|
||||
rankLast[1] = n + 1
|
||||
totalCost++
|
||||
continue
|
||||
}
|
||||
huffNode[rankLast[1]+1].nbBits--
|
||||
huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1)
|
||||
rankLast[1]++
|
||||
totalCost++
|
||||
}
|
||||
@ -722,9 +725,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
||||
return maxNbBits
|
||||
}
|
||||
|
||||
type nodeElt struct {
|
||||
count uint32
|
||||
parent uint16
|
||||
symbol byte
|
||||
nbBits uint8
|
||||
// A nodeElt is the fields
|
||||
//
|
||||
// count uint32
|
||||
// parent uint16
|
||||
// symbol byte
|
||||
// nbBits uint8
|
||||
//
|
||||
// in some order, all squashed into an integer so that the compiler
|
||||
// always loads and stores entire nodeElts instead of separate fields.
|
||||
type nodeElt uint64
|
||||
|
||||
func makeNodeElt(count uint32, symbol byte) nodeElt {
|
||||
return nodeElt(count) | nodeElt(symbol)<<48
|
||||
}
|
||||
|
||||
func (e *nodeElt) count() uint32 { return uint32(*e) }
|
||||
func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) }
|
||||
func (e *nodeElt) symbol() byte { return byte(*e >> 48) }
|
||||
func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) }
|
||||
|
||||
func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) }
|
||||
func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 }
|
||||
func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }
|
||||
|
2
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
2
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
|
||||
b, err := fse.Decompress(in[:iSize], s.fse)
|
||||
s.fse.Out = nil
|
||||
if err != nil {
|
||||
return s, nil, err
|
||||
return s, nil, fmt.Errorf("fse decompress returned: %w", err)
|
||||
}
|
||||
if len(b) > 255 {
|
||||
return s, nil, errors.New("corrupt input: output table too large")
|
||||
|
576
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
576
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
@ -4,360 +4,349 @@
|
||||
|
||||
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
||||
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
|
||||
XORQ DX, DX
|
||||
|
||||
// Preload values
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVBQZX 8(AX), DI
|
||||
MOVQ 16(AX), SI
|
||||
MOVQ 48(AX), BX
|
||||
MOVQ 24(AX), R9
|
||||
MOVQ 32(AX), R10
|
||||
MOVQ (AX), R11
|
||||
MOVQ 16(AX), BX
|
||||
MOVQ 48(AX), SI
|
||||
MOVQ 24(AX), R8
|
||||
MOVQ 32(AX), R9
|
||||
MOVQ (AX), R10
|
||||
|
||||
// Main loop
|
||||
main_loop:
|
||||
MOVQ SI, R8
|
||||
CMPQ R8, BX
|
||||
XORL DX, DX
|
||||
CMPQ BX, SI
|
||||
SETGE DL
|
||||
|
||||
// br0.fillFast32()
|
||||
MOVQ 32(R11), R12
|
||||
MOVBQZX 40(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
MOVQ 32(R10), R11
|
||||
MOVBQZX 40(R10), R12
|
||||
CMPQ R12, $0x20
|
||||
JBE skip_fill0
|
||||
MOVQ 24(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
MOVQ 24(R10), AX
|
||||
SUBQ $0x20, R12
|
||||
SUBQ $0x04, AX
|
||||
MOVQ (R11), R14
|
||||
MOVQ (R10), R13
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 24(R11)
|
||||
ORQ R14, R12
|
||||
MOVL (AX)(R13*1), R13
|
||||
MOVQ R12, CX
|
||||
SHLQ CL, R13
|
||||
MOVQ AX, 24(R10)
|
||||
ORQ R13, R11
|
||||
|
||||
// exhausted = exhausted || (br0.off < 4)
|
||||
// exhausted += (br0.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
ADCB $+0, DL
|
||||
|
||||
skip_fill0:
|
||||
// val0 := br0.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br0.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val1 := br0.peekTopBits(peekBits)
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
MOVQ R11, R13
|
||||
SHRQ CL, R13
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br0.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (R8)
|
||||
MOVW AX, (BX)
|
||||
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 32(R11)
|
||||
MOVB R13, 40(R11)
|
||||
ADDQ R9, R8
|
||||
MOVQ R11, 32(R10)
|
||||
MOVB R12, 40(R10)
|
||||
|
||||
// br1.fillFast32()
|
||||
MOVQ 80(R11), R12
|
||||
MOVBQZX 88(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
MOVQ 80(R10), R11
|
||||
MOVBQZX 88(R10), R12
|
||||
CMPQ R12, $0x20
|
||||
JBE skip_fill1
|
||||
MOVQ 72(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
MOVQ 72(R10), AX
|
||||
SUBQ $0x20, R12
|
||||
SUBQ $0x04, AX
|
||||
MOVQ 48(R11), R14
|
||||
MOVQ 48(R10), R13
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 72(R11)
|
||||
ORQ R14, R12
|
||||
MOVL (AX)(R13*1), R13
|
||||
MOVQ R12, CX
|
||||
SHLQ CL, R13
|
||||
MOVQ AX, 72(R10)
|
||||
ORQ R13, R11
|
||||
|
||||
// exhausted = exhausted || (br1.off < 4)
|
||||
// exhausted += (br1.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
ADCB $+0, DL
|
||||
|
||||
skip_fill1:
|
||||
// val0 := br1.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br1.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val1 := br1.peekTopBits(peekBits)
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
MOVQ R11, R13
|
||||
SHRQ CL, R13
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br1.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (R8)
|
||||
MOVW AX, (BX)(R8*1)
|
||||
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 80(R11)
|
||||
MOVB R13, 88(R11)
|
||||
ADDQ R9, R8
|
||||
MOVQ R11, 80(R10)
|
||||
MOVB R12, 88(R10)
|
||||
|
||||
// br2.fillFast32()
|
||||
MOVQ 128(R11), R12
|
||||
MOVBQZX 136(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
MOVQ 128(R10), R11
|
||||
MOVBQZX 136(R10), R12
|
||||
CMPQ R12, $0x20
|
||||
JBE skip_fill2
|
||||
MOVQ 120(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
MOVQ 120(R10), AX
|
||||
SUBQ $0x20, R12
|
||||
SUBQ $0x04, AX
|
||||
MOVQ 96(R11), R14
|
||||
MOVQ 96(R10), R13
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 120(R11)
|
||||
ORQ R14, R12
|
||||
MOVL (AX)(R13*1), R13
|
||||
MOVQ R12, CX
|
||||
SHLQ CL, R13
|
||||
MOVQ AX, 120(R10)
|
||||
ORQ R13, R11
|
||||
|
||||
// exhausted = exhausted || (br2.off < 4)
|
||||
// exhausted += (br2.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
ADCB $+0, DL
|
||||
|
||||
skip_fill2:
|
||||
// val0 := br2.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br2.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val1 := br2.peekTopBits(peekBits)
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
MOVQ R11, R13
|
||||
SHRQ CL, R13
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br2.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (R8)
|
||||
MOVW AX, (BX)(R8*2)
|
||||
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 128(R11)
|
||||
MOVB R13, 136(R11)
|
||||
ADDQ R9, R8
|
||||
MOVQ R11, 128(R10)
|
||||
MOVB R12, 136(R10)
|
||||
|
||||
// br3.fillFast32()
|
||||
MOVQ 176(R11), R12
|
||||
MOVBQZX 184(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
MOVQ 176(R10), R11
|
||||
MOVBQZX 184(R10), R12
|
||||
CMPQ R12, $0x20
|
||||
JBE skip_fill3
|
||||
MOVQ 168(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
MOVQ 168(R10), AX
|
||||
SUBQ $0x20, R12
|
||||
SUBQ $0x04, AX
|
||||
MOVQ 144(R11), R14
|
||||
MOVQ 144(R10), R13
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 168(R11)
|
||||
ORQ R14, R12
|
||||
MOVL (AX)(R13*1), R13
|
||||
MOVQ R12, CX
|
||||
SHLQ CL, R13
|
||||
MOVQ AX, 168(R10)
|
||||
ORQ R13, R11
|
||||
|
||||
// exhausted = exhausted || (br3.off < 4)
|
||||
// exhausted += (br3.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
ADCB $+0, DL
|
||||
|
||||
skip_fill3:
|
||||
// val0 := br3.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br3.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val1 := br3.peekTopBits(peekBits)
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
MOVQ R11, R13
|
||||
SHRQ CL, R13
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br3.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (R8)
|
||||
LEAQ (R8)(R8*2), CX
|
||||
MOVW AX, (BX)(CX*1)
|
||||
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 176(R11)
|
||||
MOVB R13, 184(R11)
|
||||
ADDQ $0x02, SI
|
||||
MOVQ R11, 176(R10)
|
||||
MOVB R12, 184(R10)
|
||||
ADDQ $0x02, BX
|
||||
TESTB DL, DL
|
||||
JZ main_loop
|
||||
MOVQ ctx+0(FP), AX
|
||||
SUBQ 16(AX), SI
|
||||
SHLQ $0x02, SI
|
||||
MOVQ SI, 40(AX)
|
||||
SUBQ 16(AX), BX
|
||||
SHLQ $0x02, BX
|
||||
MOVQ BX, 40(AX)
|
||||
RET
|
||||
|
||||
// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
||||
TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
|
||||
XORQ DX, DX
|
||||
|
||||
// Preload values
|
||||
MOVQ ctx+0(FP), CX
|
||||
MOVBQZX 8(CX), DI
|
||||
MOVQ 16(CX), BX
|
||||
MOVQ 48(CX), SI
|
||||
MOVQ 24(CX), R9
|
||||
MOVQ 32(CX), R10
|
||||
MOVQ (CX), R11
|
||||
MOVQ 24(CX), R8
|
||||
MOVQ 32(CX), R9
|
||||
MOVQ (CX), R10
|
||||
|
||||
// Main loop
|
||||
main_loop:
|
||||
MOVQ BX, R8
|
||||
CMPQ R8, SI
|
||||
XORL DX, DX
|
||||
CMPQ BX, SI
|
||||
SETGE DL
|
||||
|
||||
// br0.fillFast32()
|
||||
MOVQ 32(R11), R12
|
||||
MOVBQZX 40(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
MOVQ 32(R10), R11
|
||||
MOVBQZX 40(R10), R12
|
||||
CMPQ R12, $0x20
|
||||
JBE skip_fill0
|
||||
MOVQ 24(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ (R11), R15
|
||||
MOVQ 24(R10), R13
|
||||
SUBQ $0x20, R12
|
||||
SUBQ $0x04, R13
|
||||
MOVQ (R10), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 24(R11)
|
||||
ORQ R15, R12
|
||||
MOVL (R13)(R14*1), R14
|
||||
MOVQ R12, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ R13, 24(R10)
|
||||
ORQ R14, R11
|
||||
|
||||
// exhausted = exhausted || (br0.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
// exhausted += (br0.off < 4)
|
||||
CMPQ R13, $0x04
|
||||
ADCB $+0, DL
|
||||
|
||||
skip_fill0:
|
||||
// val0 := br0.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br0.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val1 := br0.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br0.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br0.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br0.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val3 := br0.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br0.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
@ -365,88 +354,86 @@ skip_fill0:
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (R8)
|
||||
MOVL AX, (BX)
|
||||
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 32(R11)
|
||||
MOVB R13, 40(R11)
|
||||
ADDQ R9, R8
|
||||
MOVQ R11, 32(R10)
|
||||
MOVB R12, 40(R10)
|
||||
|
||||
// br1.fillFast32()
|
||||
MOVQ 80(R11), R12
|
||||
MOVBQZX 88(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
MOVQ 80(R10), R11
|
||||
MOVBQZX 88(R10), R12
|
||||
CMPQ R12, $0x20
|
||||
JBE skip_fill1
|
||||
MOVQ 72(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ 48(R11), R15
|
||||
MOVQ 72(R10), R13
|
||||
SUBQ $0x20, R12
|
||||
SUBQ $0x04, R13
|
||||
MOVQ 48(R10), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 72(R11)
|
||||
ORQ R15, R12
|
||||
MOVL (R13)(R14*1), R14
|
||||
MOVQ R12, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ R13, 72(R10)
|
||||
ORQ R14, R11
|
||||
|
||||
// exhausted = exhausted || (br1.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
// exhausted += (br1.off < 4)
|
||||
CMPQ R13, $0x04
|
||||
ADCB $+0, DL
|
||||
|
||||
skip_fill1:
|
||||
// val0 := br1.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br1.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val1 := br1.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br1.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br1.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br1.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val3 := br1.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br1.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
@ -454,88 +441,86 @@ skip_fill1:
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (R8)
|
||||
MOVL AX, (BX)(R8*1)
|
||||
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 80(R11)
|
||||
MOVB R13, 88(R11)
|
||||
ADDQ R9, R8
|
||||
MOVQ R11, 80(R10)
|
||||
MOVB R12, 88(R10)
|
||||
|
||||
// br2.fillFast32()
|
||||
MOVQ 128(R11), R12
|
||||
MOVBQZX 136(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
MOVQ 128(R10), R11
|
||||
MOVBQZX 136(R10), R12
|
||||
CMPQ R12, $0x20
|
||||
JBE skip_fill2
|
||||
MOVQ 120(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ 96(R11), R15
|
||||
MOVQ 120(R10), R13
|
||||
SUBQ $0x20, R12
|
||||
SUBQ $0x04, R13
|
||||
MOVQ 96(R10), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 120(R11)
|
||||
ORQ R15, R12
|
||||
MOVL (R13)(R14*1), R14
|
||||
MOVQ R12, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ R13, 120(R10)
|
||||
ORQ R14, R11
|
||||
|
||||
// exhausted = exhausted || (br2.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
// exhausted += (br2.off < 4)
|
||||
CMPQ R13, $0x04
|
||||
ADCB $+0, DL
|
||||
|
||||
skip_fill2:
|
||||
// val0 := br2.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br2.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val1 := br2.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br2.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br2.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br2.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val3 := br2.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br2.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
@ -543,88 +528,86 @@ skip_fill2:
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (R8)
|
||||
MOVL AX, (BX)(R8*2)
|
||||
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 128(R11)
|
||||
MOVB R13, 136(R11)
|
||||
ADDQ R9, R8
|
||||
MOVQ R11, 128(R10)
|
||||
MOVB R12, 136(R10)
|
||||
|
||||
// br3.fillFast32()
|
||||
MOVQ 176(R11), R12
|
||||
MOVBQZX 184(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
MOVQ 176(R10), R11
|
||||
MOVBQZX 184(R10), R12
|
||||
CMPQ R12, $0x20
|
||||
JBE skip_fill3
|
||||
MOVQ 168(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ 144(R11), R15
|
||||
MOVQ 168(R10), R13
|
||||
SUBQ $0x20, R12
|
||||
SUBQ $0x04, R13
|
||||
MOVQ 144(R10), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 168(R11)
|
||||
ORQ R15, R12
|
||||
MOVL (R13)(R14*1), R14
|
||||
MOVQ R12, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ R13, 168(R10)
|
||||
ORQ R14, R11
|
||||
|
||||
// exhausted = exhausted || (br3.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
// exhausted += (br3.off < 4)
|
||||
CMPQ R13, $0x04
|
||||
ADCB $+0, DL
|
||||
|
||||
skip_fill3:
|
||||
// val0 := br3.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br3.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val1 := br3.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br3.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br3.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br3.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
|
||||
// val3 := br3.peekTopBits(peekBits)
|
||||
MOVQ R12, R14
|
||||
MOVQ R11, R13
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
SHRQ CL, R13
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R10)(R14*2), CX
|
||||
MOVW (R9)(R13*2), CX
|
||||
|
||||
// br3.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
SHLQ CL, R11
|
||||
ADDB CL, R12
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
@ -632,11 +615,12 @@ skip_fill3:
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (R8)
|
||||
LEAQ (R8)(R8*2), CX
|
||||
MOVL AX, (BX)(CX*1)
|
||||
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 176(R11)
|
||||
MOVB R13, 184(R11)
|
||||
MOVQ R11, 176(R10)
|
||||
MOVB R12, 184(R10)
|
||||
ADDQ $0x04, BX
|
||||
TESTB DL, DL
|
||||
JZ main_loop
|
||||
@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8
|
||||
MOVQ 16(CX), DX
|
||||
MOVQ 24(CX), BX
|
||||
CMPQ BX, $0x04
|
||||
JB error_max_decoded_size_exeeded
|
||||
JB error_max_decoded_size_exceeded
|
||||
LEAQ (DX)(BX*1), BX
|
||||
MOVQ (CX), SI
|
||||
MOVQ (SI), R8
|
||||
@ -667,7 +651,7 @@ main_loop:
|
||||
// Check if we have room for 4 bytes in the output buffer
|
||||
LEAQ 4(DX), CX
|
||||
CMPQ CX, BX
|
||||
JGE error_max_decoded_size_exeeded
|
||||
JGE error_max_decoded_size_exceeded
|
||||
|
||||
// Decode 4 values
|
||||
CMPQ R11, $0x20
|
||||
@ -744,7 +728,7 @@ loop_condition:
|
||||
RET
|
||||
|
||||
// Report error
|
||||
error_max_decoded_size_exeeded:
|
||||
error_max_decoded_size_exceeded:
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ $-1, CX
|
||||
MOVQ CX, 40(AX)
|
||||
@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
|
||||
MOVQ 16(CX), DX
|
||||
MOVQ 24(CX), BX
|
||||
CMPQ BX, $0x04
|
||||
JB error_max_decoded_size_exeeded
|
||||
JB error_max_decoded_size_exceeded
|
||||
LEAQ (DX)(BX*1), BX
|
||||
MOVQ (CX), SI
|
||||
MOVQ (SI), R8
|
||||
@ -772,7 +756,7 @@ main_loop:
|
||||
// Check if we have room for 4 bytes in the output buffer
|
||||
LEAQ 4(DX), CX
|
||||
CMPQ CX, BX
|
||||
JGE error_max_decoded_size_exeeded
|
||||
JGE error_max_decoded_size_exceeded
|
||||
|
||||
// Decode 4 values
|
||||
CMPQ R11, $0x20
|
||||
@ -839,7 +823,7 @@ loop_condition:
|
||||
RET
|
||||
|
||||
// Report error
|
||||
error_max_decoded_size_exeeded:
|
||||
error_max_decoded_size_exceeded:
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ $-1, CX
|
||||
MOVQ CX, 40(AX)
|
||||
|
22
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
22
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
@ -103,6 +103,28 @@ func hash(u, shift uint32) uint32 {
|
||||
return (u * 0x1e35a7bd) >> shift
|
||||
}
|
||||
|
||||
// EncodeBlockInto exposes encodeBlock but checks dst size.
|
||||
func EncodeBlockInto(dst, src []byte) (d int) {
|
||||
if MaxEncodedLen(len(src)) > len(dst) {
|
||||
return 0
|
||||
}
|
||||
|
||||
// encodeBlock breaks on too big blocks, so split.
|
||||
for len(src) > 0 {
|
||||
p := src
|
||||
src = nil
|
||||
if len(p) > maxBlockSize {
|
||||
p, src = p[:maxBlockSize], p[maxBlockSize:]
|
||||
}
|
||||
if len(p) < minNonLiteralBlockSize {
|
||||
d += emitLiteral(dst[d:], p)
|
||||
} else {
|
||||
d += encodeBlock(dst[d:], p)
|
||||
}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
|
16
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
16
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@ -82,8 +82,9 @@ type blockDec struct {
|
||||
|
||||
err error
|
||||
|
||||
// Check against this crc
|
||||
checkCRC []byte
|
||||
// Check against this crc, if hasCRC is true.
|
||||
checkCRC uint32
|
||||
hasCRC bool
|
||||
|
||||
// Frame to use for singlethreaded decoding.
|
||||
// Should not be used by the decoder itself since parent may be another frame.
|
||||
@ -191,16 +192,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
||||
}
|
||||
|
||||
// Read block data.
|
||||
if cap(b.dataStorage) < cSize {
|
||||
if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize {
|
||||
// byteBuf doesn't need a destination buffer.
|
||||
if b.lowMem || cSize > maxCompressedBlockSize {
|
||||
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
|
||||
} else {
|
||||
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
|
||||
}
|
||||
}
|
||||
if cap(b.dst) <= maxSize {
|
||||
b.dst = make([]byte, 0, maxSize+1)
|
||||
}
|
||||
b.data, err = br.readBig(cSize, b.dataStorage)
|
||||
if err != nil {
|
||||
if debugDecoder {
|
||||
@ -209,6 +208,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
if cap(b.dst) <= maxSize {
|
||||
b.dst = make([]byte, 0, maxSize+1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -232,7 +234,7 @@ func (b *blockDec) decodeBuf(hist *history) error {
|
||||
if b.lowMem {
|
||||
b.dst = make([]byte, b.RLESize)
|
||||
} else {
|
||||
b.dst = make([]byte, maxBlockSize)
|
||||
b.dst = make([]byte, maxCompressedBlockSize)
|
||||
}
|
||||
}
|
||||
b.dst = b.dst[:b.RLESize]
|
||||
|
9
vendor/github.com/klauspost/compress/zstd/decodeheader.go
generated
vendored
9
vendor/github.com/klauspost/compress/zstd/decodeheader.go
generated
vendored
@ -4,7 +4,6 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
@ -102,8 +101,8 @@ func (h *Header) Decode(in []byte) error {
|
||||
}
|
||||
h.HeaderSize += 4
|
||||
b, in := in[:4], in[4:]
|
||||
if !bytes.Equal(b, frameMagic) {
|
||||
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
|
||||
if string(b) != frameMagic {
|
||||
if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
|
||||
return ErrMagicMismatch
|
||||
}
|
||||
if len(in) < 4 {
|
||||
@ -153,7 +152,7 @@ func (h *Header) Decode(in []byte) error {
|
||||
}
|
||||
b, in = in[:size], in[size:]
|
||||
h.HeaderSize += int(size)
|
||||
switch size {
|
||||
switch len(b) {
|
||||
case 1:
|
||||
h.DictionaryID = uint32(b[0])
|
||||
case 2:
|
||||
@ -183,7 +182,7 @@ func (h *Header) Decode(in []byte) error {
|
||||
}
|
||||
b, in = in[:fcsSize], in[fcsSize:]
|
||||
h.HeaderSize += int(fcsSize)
|
||||
switch fcsSize {
|
||||
switch len(b) {
|
||||
case 1:
|
||||
h.FrameContentSize = uint64(b[0])
|
||||
case 2:
|
||||
|
93
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
93
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -5,7 +5,6 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
@ -41,8 +40,7 @@ type Decoder struct {
|
||||
frame *frameDec
|
||||
|
||||
// Custom dictionaries.
|
||||
// Always uses copies.
|
||||
dicts map[uint32]dict
|
||||
dicts map[uint32]*dict
|
||||
|
||||
// streamWg is the waitgroup for all streams
|
||||
streamWg sync.WaitGroup
|
||||
@ -104,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
|
||||
}
|
||||
|
||||
// Transfer option dicts.
|
||||
d.dicts = make(map[uint32]dict, len(d.o.dicts))
|
||||
d.dicts = make(map[uint32]*dict, len(d.o.dicts))
|
||||
for _, dc := range d.o.dicts {
|
||||
d.dicts[dc.id] = dc
|
||||
}
|
||||
@ -342,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
}
|
||||
return dst, err
|
||||
}
|
||||
if frame.DictionaryID != nil {
|
||||
dict, ok := d.dicts[*frame.DictionaryID]
|
||||
if !ok {
|
||||
return nil, ErrUnknownDictionary
|
||||
}
|
||||
if debugDecoder {
|
||||
println("setting dict", frame.DictionaryID)
|
||||
}
|
||||
frame.history.setDict(&dict)
|
||||
if err = d.setDict(frame); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if frame.WindowSize > d.o.maxWindowSize {
|
||||
if debugDecoder {
|
||||
@ -459,7 +450,11 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
||||
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
|
||||
}
|
||||
|
||||
if !d.o.ignoreChecksum && len(next.b) > 0 {
|
||||
if d.o.ignoreChecksum {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(next.b) > 0 {
|
||||
n, err := d.current.crc.Write(next.b)
|
||||
if err == nil {
|
||||
if n != len(next.b) {
|
||||
@ -467,18 +462,16 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
|
||||
got := d.current.crc.Sum64()
|
||||
var tmp [4]byte
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
|
||||
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
|
||||
if next.err == nil && next.d != nil && next.d.hasCRC {
|
||||
got := uint32(d.current.crc.Sum64())
|
||||
if got != next.d.checkCRC {
|
||||
if debugDecoder {
|
||||
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
|
||||
printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
|
||||
}
|
||||
d.current.err = ErrCRCMismatch
|
||||
} else {
|
||||
if debugDecoder {
|
||||
println("CRC ok", tmp[:])
|
||||
printf("CRC ok %08x\n", got)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -494,18 +487,12 @@ func (d *Decoder) nextBlockSync() (ok bool) {
|
||||
if !d.syncStream.inFrame {
|
||||
d.frame.history.reset()
|
||||
d.current.err = d.frame.reset(&d.syncStream.br)
|
||||
if d.current.err == nil {
|
||||
d.current.err = d.setDict(d.frame)
|
||||
}
|
||||
if d.current.err != nil {
|
||||
return false
|
||||
}
|
||||
if d.frame.DictionaryID != nil {
|
||||
dict, ok := d.dicts[*d.frame.DictionaryID]
|
||||
if !ok {
|
||||
d.current.err = ErrUnknownDictionary
|
||||
return false
|
||||
} else {
|
||||
d.frame.history.setDict(&dict)
|
||||
}
|
||||
}
|
||||
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
|
||||
d.current.err = ErrDecoderSizeExceeded
|
||||
return false
|
||||
@ -770,7 +757,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
||||
if block.lowMem {
|
||||
block.dst = make([]byte, block.RLESize)
|
||||
} else {
|
||||
block.dst = make([]byte, maxBlockSize)
|
||||
block.dst = make([]byte, maxCompressedBlockSize)
|
||||
}
|
||||
}
|
||||
block.dst = block.dst[:block.RLESize]
|
||||
@ -864,13 +851,8 @@ decodeStream:
|
||||
if debugDecoder && err != nil {
|
||||
println("Frame decoder returned", err)
|
||||
}
|
||||
if err == nil && frame.DictionaryID != nil {
|
||||
dict, ok := d.dicts[*frame.DictionaryID]
|
||||
if !ok {
|
||||
err = ErrUnknownDictionary
|
||||
} else {
|
||||
frame.history.setDict(&dict)
|
||||
}
|
||||
if err == nil {
|
||||
err = d.setDict(frame)
|
||||
}
|
||||
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
|
||||
if debugDecoder {
|
||||
@ -918,18 +900,22 @@ decodeStream:
|
||||
println("next block returned error:", err)
|
||||
}
|
||||
dec.err = err
|
||||
dec.checkCRC = nil
|
||||
dec.hasCRC = false
|
||||
if dec.Last && frame.HasCheckSum && err == nil {
|
||||
crc, err := frame.rawInput.readSmall(4)
|
||||
if err != nil {
|
||||
if len(crc) < 4 {
|
||||
if err == nil {
|
||||
err = io.ErrUnexpectedEOF
|
||||
|
||||
}
|
||||
println("CRC missing?", err)
|
||||
dec.err = err
|
||||
}
|
||||
var tmp [4]byte
|
||||
copy(tmp[:], crc)
|
||||
dec.checkCRC = tmp[:]
|
||||
} else {
|
||||
dec.checkCRC = binary.LittleEndian.Uint32(crc)
|
||||
dec.hasCRC = true
|
||||
if debugDecoder {
|
||||
println("found crc to check:", dec.checkCRC)
|
||||
printf("found crc to check: %08x\n", dec.checkCRC)
|
||||
}
|
||||
}
|
||||
}
|
||||
err = dec.err
|
||||
@ -948,3 +934,20 @@ decodeStream:
|
||||
hist.reset()
|
||||
d.frame.history.b = frameHistCache
|
||||
}
|
||||
|
||||
func (d *Decoder) setDict(frame *frameDec) (err error) {
|
||||
dict, ok := d.dicts[frame.DictionaryID]
|
||||
if ok {
|
||||
if debugDecoder {
|
||||
println("setting dict", frame.DictionaryID)
|
||||
}
|
||||
frame.history.setDict(dict)
|
||||
} else if frame.DictionaryID != 0 {
|
||||
// A zero or missing dictionary id is ambiguous:
|
||||
// either dictionary zero, or no dictionary. In particular,
|
||||
// zstd --patch-from uses this id for the source file,
|
||||
// so only return an error if the dictionary id is not zero.
|
||||
err = ErrUnknownDictionary
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
26
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
26
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
@ -6,6 +6,8 @@ package zstd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
@ -18,7 +20,7 @@ type decoderOptions struct {
|
||||
concurrent int
|
||||
maxDecodedSize uint64
|
||||
maxWindowSize uint64
|
||||
dicts []dict
|
||||
dicts []*dict
|
||||
ignoreChecksum bool
|
||||
limitToCap bool
|
||||
decodeBufsBelow int
|
||||
@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption {
|
||||
}
|
||||
|
||||
// WithDecoderDicts allows to register one or more dictionaries for the decoder.
|
||||
// If several dictionaries with the same ID is provided the last one will be used.
|
||||
//
|
||||
// Each slice in dict must be in the [dictionary format] produced by
|
||||
// "zstd --train" from the Zstandard reference implementation.
|
||||
//
|
||||
// If several dictionaries with the same ID are provided, the last one will be used.
|
||||
//
|
||||
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
|
||||
func WithDecoderDicts(dicts ...[]byte) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
for _, b := range dicts {
|
||||
@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.dicts = append(o.dicts, *d)
|
||||
o.dicts = append(o.dicts, d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithEncoderDictRaw registers a dictionary that may be used by the decoder.
|
||||
// The slice content can be arbitrary data.
|
||||
func WithDecoderDictRaw(id uint32, content []byte) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
|
||||
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
|
||||
}
|
||||
o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDecoderMaxWindow allows to set a maximum window size for decodes.
|
||||
// This allows rejecting packets that will cause big memory usage.
|
||||
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
|
||||
|
51
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
51
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -20,7 +19,10 @@ type dict struct {
|
||||
content []byte
|
||||
}
|
||||
|
||||
var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
|
||||
const dictMagic = "\x37\xa4\x30\xec"
|
||||
|
||||
// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB.
|
||||
const dictMaxLength = 1 << 31
|
||||
|
||||
// ID returns the dictionary id or 0 if d is nil.
|
||||
func (d *dict) ID() uint32 {
|
||||
@ -30,14 +32,38 @@ func (d *dict) ID() uint32 {
|
||||
return d.id
|
||||
}
|
||||
|
||||
// DictContentSize returns the dictionary content size or 0 if d is nil.
|
||||
func (d *dict) DictContentSize() int {
|
||||
// ContentSize returns the dictionary content size or 0 if d is nil.
|
||||
func (d *dict) ContentSize() int {
|
||||
if d == nil {
|
||||
return 0
|
||||
}
|
||||
return len(d.content)
|
||||
}
|
||||
|
||||
// Content returns the dictionary content.
|
||||
func (d *dict) Content() []byte {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
return d.content
|
||||
}
|
||||
|
||||
// Offsets returns the initial offsets.
|
||||
func (d *dict) Offsets() [3]int {
|
||||
if d == nil {
|
||||
return [3]int{}
|
||||
}
|
||||
return d.offsets
|
||||
}
|
||||
|
||||
// LitEncoder returns the literal encoder.
|
||||
func (d *dict) LitEncoder() *huff0.Scratch {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
return d.litEnc
|
||||
}
|
||||
|
||||
// Load a dictionary as described in
|
||||
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
|
||||
func loadDict(b []byte) (*dict, error) {
|
||||
@ -50,7 +76,7 @@ func loadDict(b []byte) (*dict, error) {
|
||||
ofDec: sequenceDec{fse: &fseDecoder{}},
|
||||
mlDec: sequenceDec{fse: &fseDecoder{}},
|
||||
}
|
||||
if !bytes.Equal(b[:4], dictMagic[:]) {
|
||||
if string(b[:4]) != dictMagic {
|
||||
return nil, ErrMagicMismatch
|
||||
}
|
||||
d.id = binary.LittleEndian.Uint32(b[4:8])
|
||||
@ -62,7 +88,7 @@ func loadDict(b []byte) (*dict, error) {
|
||||
var err error
|
||||
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("loading literal table: %w", err)
|
||||
}
|
||||
d.litEnc.Reuse = huff0.ReusePolicyMust
|
||||
|
||||
@ -120,3 +146,16 @@ func loadDict(b []byte) (*dict, error) {
|
||||
|
||||
return &d, nil
|
||||
}
|
||||
|
||||
// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
|
||||
func InspectDictionary(b []byte) (interface {
|
||||
ID() uint32
|
||||
ContentSize() int
|
||||
Content() []byte
|
||||
Offsets() [3]int
|
||||
LitEncoder() *huff0.Scratch
|
||||
}, error) {
|
||||
initPredefined()
|
||||
d, err := loadDict(b)
|
||||
return d, err
|
||||
}
|
||||
|
28
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
28
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
@ -16,6 +16,7 @@ type fastBase struct {
|
||||
cur int32
|
||||
// maximum offset. Should be at least 2x block size.
|
||||
maxMatchOff int32
|
||||
bufferReset int32
|
||||
hist []byte
|
||||
crc *xxhash.Digest
|
||||
tmp [8]byte
|
||||
@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc {
|
||||
}
|
||||
|
||||
func (e *fastBase) addBlock(src []byte) int32 {
|
||||
if debugAsserts && e.cur > bufferReset {
|
||||
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
|
||||
if debugAsserts && e.cur > e.bufferReset {
|
||||
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
|
||||
}
|
||||
// check if we have space already
|
||||
if len(e.hist)+len(src) > cap(e.hist) {
|
||||
@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
|
||||
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
|
||||
}
|
||||
}
|
||||
a := src[s:]
|
||||
b := src[t:]
|
||||
b = b[:len(a)]
|
||||
end := int32((len(a) >> 3) << 3)
|
||||
for i := int32(0); i < end; i += 8 {
|
||||
if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
|
||||
return i + int32(bits.TrailingZeros64(diff)>>3)
|
||||
}
|
||||
}
|
||||
|
||||
a = a[end:]
|
||||
b = b[end:]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return int32(i) + end
|
||||
}
|
||||
}
|
||||
return int32(len(a)) + end
|
||||
return int32(matchLen(src[s:], src[t:]))
|
||||
}
|
||||
|
||||
// Reset the encoding table.
|
||||
@ -165,13 +149,13 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
|
||||
if singleBlock {
|
||||
e.lowMem = true
|
||||
}
|
||||
e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
|
||||
e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
|
||||
e.lowMem = low
|
||||
}
|
||||
|
||||
// We offset current position so everything will be out of reach.
|
||||
// If above reset line, history will be purged.
|
||||
if e.cur < bufferReset {
|
||||
if e.cur < e.bufferReset {
|
||||
e.cur += e.maxMatchOff + int32(len(e.hist))
|
||||
}
|
||||
e.hist = e.hist[:0]
|
||||
|
63
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
63
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@ -85,14 +85,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = prevEntry{}
|
||||
}
|
||||
for i := range e.longTable[:] {
|
||||
e.longTable[i] = prevEntry{}
|
||||
}
|
||||
e.table = [bestShortTableSize]prevEntry{}
|
||||
e.longTable = [bestLongTableSize]prevEntry{}
|
||||
e.cur = e.maxMatchOff
|
||||
break
|
||||
}
|
||||
@ -193,8 +189,8 @@ encodeLoop:
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
bestOf := func(a, b match) match {
|
||||
if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
|
||||
bestOf := func(a, b *match) *match {
|
||||
if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
@ -220,22 +216,26 @@ encodeLoop:
|
||||
return m
|
||||
}
|
||||
|
||||
best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
|
||||
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
|
||||
best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
|
||||
m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||
m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||
m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
|
||||
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))
|
||||
|
||||
if canRepeat && best.length < goodEnough {
|
||||
cv32 := uint32(cv >> 8)
|
||||
spp := s + 1
|
||||
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
|
||||
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
|
||||
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
|
||||
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
||||
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
||||
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
||||
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
||||
if best.length > 0 {
|
||||
cv32 = uint32(cv >> 24)
|
||||
spp += 2
|
||||
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
|
||||
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
|
||||
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
|
||||
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
||||
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
||||
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
||||
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
||||
}
|
||||
}
|
||||
// Load next and check...
|
||||
@ -262,26 +262,33 @@ encodeLoop:
|
||||
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
||||
|
||||
// Short at s+1
|
||||
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
|
||||
m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||
// Long at s+1, s+2
|
||||
best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
|
||||
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
|
||||
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
|
||||
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
|
||||
m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||
m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||
m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
|
||||
m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
|
||||
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
|
||||
if false {
|
||||
// Short at s+3.
|
||||
// Too often worse...
|
||||
best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
|
||||
m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
|
||||
best = bestOf(best, &m)
|
||||
}
|
||||
// See if we can find a better match by checking where the current best ends.
|
||||
// Use that offset to see if we can find a better full match.
|
||||
if sAt := best.s + best.length; sAt < sLimit {
|
||||
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
||||
candidateEnd := e.longTable[nextHashL]
|
||||
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
|
||||
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
|
||||
if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
|
||||
bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
|
||||
// Start check at a fixed offset to allow for a few mismatches.
|
||||
// For this compression level 2 yields the best results.
|
||||
const skipBeginning = 2
|
||||
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||
bestEnd := bestOf(best, &m)
|
||||
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||
bestEnd = bestOf(bestEnd, &m)
|
||||
}
|
||||
best = bestEnd
|
||||
}
|
||||
|
12
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
12
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
for i := range e.longTable[:] {
|
||||
e.longTable[i] = prevEntry{}
|
||||
}
|
||||
e.table = [betterShortTableSize]tableEntry{}
|
||||
e.longTable = [betterLongTableSize]prevEntry{}
|
||||
e.cur = e.maxMatchOff
|
||||
break
|
||||
}
|
||||
@ -587,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
|
16
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
16
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
for i := range e.longTable[:] {
|
||||
e.longTable[i] = tableEntry{}
|
||||
}
|
||||
e.table = [dFastShortTableSize]tableEntry{}
|
||||
e.longTable = [dFastLongTableSize]tableEntry{}
|
||||
e.cur = e.maxMatchOff
|
||||
break
|
||||
}
|
||||
@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
if e.cur >= bufferReset {
|
||||
if e.cur >= e.bufferReset {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
@ -685,7 +681,7 @@ encodeLoop:
|
||||
}
|
||||
|
||||
// We do not store history, so we must offset e.cur to avoid false matches for next user.
|
||||
if e.cur < bufferReset {
|
||||
if e.cur < e.bufferReset {
|
||||
e.cur += int32(len(src))
|
||||
}
|
||||
}
|
||||
@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
|
14
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
14
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
@ -304,13 +304,13 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
if debugEncoder {
|
||||
if len(src) > maxBlockSize {
|
||||
if len(src) > maxCompressedBlockSize {
|
||||
panic("src too big")
|
||||
}
|
||||
}
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
if e.cur >= bufferReset {
|
||||
if e.cur >= e.bufferReset {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
@ -538,7 +538,7 @@ encodeLoop:
|
||||
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
|
||||
}
|
||||
// We do not store history, so we must offset e.cur to avoid false matches for next user.
|
||||
if e.cur < bufferReset {
|
||||
if e.cur < e.bufferReset {
|
||||
e.cur += int32(len(src))
|
||||
}
|
||||
}
|
||||
@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
||||
return
|
||||
}
|
||||
// Protect against e.cur wraparound.
|
||||
for e.cur >= bufferReset {
|
||||
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||
if len(e.hist) == 0 {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
e.table = [tableSize]tableEntry{}
|
||||
e.cur = e.maxMatchOff
|
||||
break
|
||||
}
|
||||
|
35
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
35
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
rdebug "runtime/debug"
|
||||
"sync"
|
||||
|
||||
@ -639,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// MaxEncodedSize returns the expected maximum
|
||||
// size of an encoded block or stream.
|
||||
func (e *Encoder) MaxEncodedSize(size int) int {
|
||||
frameHeader := 4 + 2 // magic + frame header & window descriptor
|
||||
if e.o.dict != nil {
|
||||
frameHeader += 4
|
||||
}
|
||||
// Frame content size:
|
||||
if size < 256 {
|
||||
frameHeader++
|
||||
} else if size < 65536+256 {
|
||||
frameHeader += 2
|
||||
} else if size < math.MaxInt32 {
|
||||
frameHeader += 4
|
||||
} else {
|
||||
frameHeader += 8
|
||||
}
|
||||
// Final crc
|
||||
if e.o.crc {
|
||||
frameHeader += 4
|
||||
}
|
||||
|
||||
// Max overhead is 3 bytes/block.
|
||||
// There cannot be 0 blocks.
|
||||
blocks := (size + e.o.blockSize) / e.o.blockSize
|
||||
|
||||
// Combine, add padding.
|
||||
maxSz := frameHeader + 3*blocks + size
|
||||
if e.o.pad > 1 {
|
||||
maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
|
||||
}
|
||||
return maxSz
|
||||
}
|
||||
|
36
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
36
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
@ -3,6 +3,8 @@ package zstd
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/bits"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
@ -47,22 +49,22 @@ func (o encoderOptions) encoder() encoder {
|
||||
switch o.level {
|
||||
case SpeedFastest:
|
||||
if o.dict != nil {
|
||||
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
||||
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
|
||||
}
|
||||
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
||||
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
|
||||
|
||||
case SpeedDefault:
|
||||
if o.dict != nil {
|
||||
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
|
||||
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
|
||||
}
|
||||
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
||||
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
|
||||
case SpeedBetterCompression:
|
||||
if o.dict != nil {
|
||||
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
||||
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
|
||||
}
|
||||
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
||||
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
|
||||
case SpeedBestCompression:
|
||||
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
||||
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
|
||||
}
|
||||
panic("unknown compression level")
|
||||
}
|
||||
@ -304,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption {
|
||||
}
|
||||
|
||||
// WithEncoderDict allows to register a dictionary that will be used for the encode.
|
||||
//
|
||||
// The slice dict must be in the [dictionary format] produced by
|
||||
// "zstd --train" from the Zstandard reference implementation.
|
||||
//
|
||||
// The encoder *may* choose to use no dictionary instead for certain payloads.
|
||||
//
|
||||
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
|
||||
func WithEncoderDict(dict []byte) EOption {
|
||||
return func(o *encoderOptions) error {
|
||||
d, err := loadDict(dict)
|
||||
@ -315,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
|
||||
//
|
||||
// The slice content may contain arbitrary data. It will be used as an initial
|
||||
// history.
|
||||
func WithEncoderDictRaw(id uint32, content []byte) EOption {
|
||||
return func(o *encoderOptions) error {
|
||||
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
|
||||
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
|
||||
}
|
||||
o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
56
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
56
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@ -5,7 +5,7 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
@ -29,7 +29,7 @@ type frameDec struct {
|
||||
|
||||
FrameContentSize uint64
|
||||
|
||||
DictionaryID *uint32
|
||||
DictionaryID uint32
|
||||
HasCheckSum bool
|
||||
SingleSegment bool
|
||||
}
|
||||
@ -43,9 +43,9 @@ const (
|
||||
MaxWindowSize = 1 << 29
|
||||
)
|
||||
|
||||
var (
|
||||
frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
|
||||
skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
|
||||
const (
|
||||
frameMagic = "\x28\xb5\x2f\xfd"
|
||||
skippableFrameMagic = "\x2a\x4d\x18"
|
||||
)
|
||||
|
||||
func newFrameDec(o decoderOptions) *frameDec {
|
||||
@ -89,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
copy(signature[1:], b)
|
||||
}
|
||||
|
||||
if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 {
|
||||
if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
|
||||
if debugDecoder {
|
||||
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic))
|
||||
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
|
||||
}
|
||||
// Break if not skippable frame.
|
||||
break
|
||||
@ -114,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(signature[:], frameMagic) {
|
||||
if string(signature[:]) != frameMagic {
|
||||
if debugDecoder {
|
||||
println("Got magic numbers: ", signature, "want:", frameMagic)
|
||||
println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
|
||||
}
|
||||
return ErrMagicMismatch
|
||||
}
|
||||
@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
|
||||
// Read Dictionary_ID
|
||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
|
||||
d.DictionaryID = nil
|
||||
d.DictionaryID = 0
|
||||
if size := fhd & 3; size != 0 {
|
||||
if size == 3 {
|
||||
size = 4
|
||||
@ -167,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
return err
|
||||
}
|
||||
var id uint32
|
||||
switch size {
|
||||
switch len(b) {
|
||||
case 1:
|
||||
id = uint32(b[0])
|
||||
case 2:
|
||||
@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
if debugDecoder {
|
||||
println("Dict size", size, "ID:", id)
|
||||
}
|
||||
if id > 0 {
|
||||
// ID 0 means "sorry, no dictionary anyway".
|
||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
|
||||
d.DictionaryID = &id
|
||||
}
|
||||
d.DictionaryID = id
|
||||
}
|
||||
|
||||
// Read Frame_Content_Size
|
||||
@ -204,7 +200,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
println("Reading Frame content", err)
|
||||
return err
|
||||
}
|
||||
switch fcsSize {
|
||||
switch len(b) {
|
||||
case 1:
|
||||
d.FrameContentSize = uint64(b[0])
|
||||
case 2:
|
||||
@ -261,12 +257,17 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
}
|
||||
d.history.windowSize = int(d.WindowSize)
|
||||
if !d.o.lowMem || d.history.windowSize < maxBlockSize {
|
||||
// Alloc 2x window size if not low-mem, or very small window size.
|
||||
// Alloc 2x window size if not low-mem, or window size below 2MB.
|
||||
d.history.allocFrameBuffer = d.history.windowSize * 2
|
||||
} else {
|
||||
// Alloc with one additional block
|
||||
if d.o.lowMem {
|
||||
// Alloc with 1MB extra.
|
||||
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
|
||||
} else {
|
||||
// Alloc with 2MB extra.
|
||||
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
||||
}
|
||||
}
|
||||
|
||||
if debugDecoder {
|
||||
println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
|
||||
@ -300,7 +301,7 @@ func (d *frameDec) checkCRC() error {
|
||||
}
|
||||
|
||||
// We can overwrite upper tmp now
|
||||
want, err := d.rawInput.readSmall(4)
|
||||
buf, err := d.rawInput.readSmall(4)
|
||||
if err != nil {
|
||||
println("CRC missing?", err)
|
||||
return err
|
||||
@ -310,22 +311,17 @@ func (d *frameDec) checkCRC() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var tmp [4]byte
|
||||
got := d.crc.Sum64()
|
||||
// Flip to match file order.
|
||||
tmp[0] = byte(got >> 0)
|
||||
tmp[1] = byte(got >> 8)
|
||||
tmp[2] = byte(got >> 16)
|
||||
tmp[3] = byte(got >> 24)
|
||||
want := binary.LittleEndian.Uint32(buf[:4])
|
||||
got := uint32(d.crc.Sum64())
|
||||
|
||||
if !bytes.Equal(tmp[:], want) {
|
||||
if got != want {
|
||||
if debugDecoder {
|
||||
println("CRC Check Failed:", tmp[:], "!=", want)
|
||||
printf("CRC check failed: got %08x, want %08x\n", got, want)
|
||||
}
|
||||
return ErrCRCMismatch
|
||||
}
|
||||
if debugDecoder {
|
||||
println("CRC ok", tmp[:])
|
||||
printf("CRC ok %08x\n", got)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
47
vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
generated
vendored
47
vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
generated
vendored
@ -2,12 +2,7 @@
|
||||
|
||||
VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
|
||||
|
||||
|
||||
[](https://godoc.org/github.com/cespare/xxhash)
|
||||
[](https://travis-ci.org/cespare/xxhash)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
@ -28,8 +23,23 @@ func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
The package is written with optimized pure Go and also contains even faster
|
||||
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||
opts into using the Go code even on those architectures.
|
||||
|
||||
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
@ -37,22 +47,25 @@ Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
| ---------- | --------- | --------- |
|
||||
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||
CPU using the following commands under Go 1.19.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
|
47
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
generated
vendored
47
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
generated
vendored
@ -18,19 +18,11 @@ const (
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array of the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
@ -52,10 +44,10 @@ func New() *Digest {
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v1 = primes[0] + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.v4 = -primes[0]
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
copy(memleft, b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
c := copy(memleft, b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
b = b[c:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 {
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
|
308
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
308
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !appengine && gc && !purego && !noasm
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
@ -5,212 +6,205 @@
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
// Registers:
|
||||
#define h AX
|
||||
#define d AX
|
||||
#define p SI // pointer to advance through b
|
||||
#define n DX
|
||||
#define end BX // loop end
|
||||
#define v1 R8
|
||||
#define v2 R9
|
||||
#define v3 R10
|
||||
#define v4 R11
|
||||
#define x R12
|
||||
#define prime1 R13
|
||||
#define prime2 R14
|
||||
#define prime4 DI
|
||||
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
#define round(acc, x) \
|
||||
IMULQ prime2, x \
|
||||
ADDQ x, acc \
|
||||
ROLQ $31, acc \
|
||||
IMULQ prime1, acc
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
IMULQ prime2, x \
|
||||
ROLQ $31, x \
|
||||
IMULQ prime1, x
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and x.
|
||||
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
XORQ x, acc \
|
||||
IMULQ prime1, acc \
|
||||
ADDQ prime4, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||
// to process.
|
||||
#define blockLoop() \
|
||||
loop: \
|
||||
MOVQ +0(p), x \
|
||||
round(v1, x) \
|
||||
MOVQ +8(p), x \
|
||||
round(v2, x) \
|
||||
MOVQ +16(p), x \
|
||||
round(v3, x) \
|
||||
MOVQ +24(p), x \
|
||||
round(v4, x) \
|
||||
ADDQ $32, p \
|
||||
CMPQ p, end \
|
||||
JLE loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·primes+24(SB), prime4
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
MOVQ b_base+0(FP), p
|
||||
MOVQ b_len+8(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
SUBQ $32, end
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
CMPQ n, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
MOVQ prime1, v1
|
||||
ADDQ prime2, v1
|
||||
MOVQ prime2, v2
|
||||
XORQ v3, v3
|
||||
XORQ v4, v4
|
||||
SUBQ prime1, v4
|
||||
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
blockLoop()
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
MOVQ v1, h
|
||||
ROLQ $1, h
|
||||
MOVQ v2, x
|
||||
ROLQ $7, x
|
||||
ADDQ x, h
|
||||
MOVQ v3, x
|
||||
ROLQ $12, x
|
||||
ADDQ x, h
|
||||
MOVQ v4, x
|
||||
ROLQ $18, x
|
||||
ADDQ x, h
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
MOVQ ·primes+32(SB), h
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
ADDQ n, h
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
ADDQ $24, end
|
||||
CMPQ p, end
|
||||
JG try4
|
||||
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
loop8:
|
||||
MOVQ (p), x
|
||||
ADDQ $8, p
|
||||
round0(x)
|
||||
XORQ x, h
|
||||
ROLQ $27, h
|
||||
IMULQ prime1, h
|
||||
ADDQ prime4, h
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
CMPQ p, end
|
||||
JLE loop8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
try4:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JG try1
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
MOVL (p), x
|
||||
ADDQ $4, p
|
||||
IMULQ prime1, x
|
||||
XORQ x, h
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
ROLQ $23, h
|
||||
IMULQ prime2, h
|
||||
ADDQ ·primes+16(SB), h
|
||||
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
try1:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
loop1:
|
||||
MOVBQZX (p), x
|
||||
ADDQ $1, p
|
||||
IMULQ ·primes+32(SB), x
|
||||
XORQ x, h
|
||||
ROLQ $11, h
|
||||
IMULQ prime1, h
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
CMPQ p, end
|
||||
JL loop1
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
MOVQ h, x
|
||||
SHRQ $33, x
|
||||
XORQ x, h
|
||||
IMULQ prime2, h
|
||||
MOVQ h, x
|
||||
SHRQ $29, x
|
||||
XORQ x, h
|
||||
IMULQ ·primes+16(SB), h
|
||||
MOVQ h, x
|
||||
SHRQ $32, x
|
||||
XORQ x, h
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
MOVQ h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
MOVQ b_base+8(FP), p
|
||||
MOVQ b_len+16(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
SUBQ $32, end
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
MOVQ s+0(FP), d
|
||||
MOVQ 0(d), v1
|
||||
MOVQ 8(d), v2
|
||||
MOVQ 16(d), v3
|
||||
MOVQ 24(d), v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
blockLoop()
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
MOVQ v1, 0(d)
|
||||
MOVQ v2, 8(d)
|
||||
MOVQ v3, 16(d)
|
||||
MOVQ v4, 24(d)
|
||||
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
// The number of bytes written is p minus the old base pointer.
|
||||
SUBQ b_base+8(FP), p
|
||||
MOVQ p, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
122
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
generated
vendored
122
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
generated
vendored
@ -1,13 +1,17 @@
|
||||
// +build gc,!purego,!noasm
|
||||
//go:build !appengine && gc && !purego && !noasm
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation.
|
||||
// Registers:
|
||||
#define digest R1
|
||||
#define h R2 // Return value.
|
||||
#define p R3 // Input pointer.
|
||||
#define len R4
|
||||
#define nblocks R5 // len / 32.
|
||||
#define h R2 // return value
|
||||
#define p R3 // input pointer
|
||||
#define n R4 // input length
|
||||
#define nblocks R5 // n / 32
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
@ -25,60 +29,52 @@
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc \
|
||||
MUL prime1, acc
|
||||
|
||||
// x = round(0, x).
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x \
|
||||
MUL prime1, x
|
||||
|
||||
#define mergeRound(x) \
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
EOR x, h \
|
||||
MADD h, prime4, prime1, h \
|
||||
EOR x, acc \
|
||||
MADD acc, prime4, prime1, acc
|
||||
|
||||
// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
|
||||
#define blocksLoop() \
|
||||
LSR $5, len, nblocks \
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||
#define blockLoop() \
|
||||
LSR $5, n, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 32(p), (x1, x2) \
|
||||
LDP.P 16(p), (x1, x2) \
|
||||
LDP.P 16(p), (x3, x4) \
|
||||
round(v1, x1) \
|
||||
LDP -16(p), (x3, x4) \
|
||||
round(v2, x2) \
|
||||
SUB $1, nblocks \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
CBNZ nblocks, loop \
|
||||
|
||||
// The primes are repeated here to ensure that they're stored
|
||||
// in a contiguous array, so we can load them with LDP.
|
||||
DATA primes<> +0(SB)/8, $11400714785074694791
|
||||
DATA primes<> +8(SB)/8, $14029467366897019727
|
||||
DATA primes<>+16(SB)/8, $1609587929392839161
|
||||
DATA primes<>+24(SB)/8, $9650029242287828579
|
||||
DATA primes<>+32(SB)/8, $2870177450012600261
|
||||
GLOBL primes<>(SB), NOPTR+RODATA, $40
|
||||
SUB $1, nblocks \
|
||||
CBNZ nblocks, loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
|
||||
LDP b_base+0(FP), (p, len)
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
LDP b_base+0(FP), (p, n)
|
||||
|
||||
LDP primes<> +0(SB), (prime1, prime2)
|
||||
LDP primes<>+16(SB), (prime3, prime4)
|
||||
MOVD primes<>+32(SB), prime5
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
LDP ·primes+16(SB), (prime3, prime4)
|
||||
MOVD ·primes+32(SB), prime5
|
||||
|
||||
CMP $32, len
|
||||
CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 }
|
||||
BLO afterLoop
|
||||
CMP $32, n
|
||||
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||
BLT afterLoop
|
||||
|
||||
ADD prime1, prime2, v1
|
||||
MOVD prime2, v2
|
||||
MOVD $0, v3
|
||||
NEG prime1, v4
|
||||
|
||||
blocksLoop()
|
||||
blockLoop()
|
||||
|
||||
ROR $64-1, v1, x1
|
||||
ROR $64-7, v2, x2
|
||||
@ -88,71 +84,75 @@ TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
|
||||
ADD x3, x4
|
||||
ADD x2, x4, h
|
||||
|
||||
mergeRound(v1)
|
||||
mergeRound(v2)
|
||||
mergeRound(v3)
|
||||
mergeRound(v4)
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
afterLoop:
|
||||
ADD len, h
|
||||
ADD n, h
|
||||
|
||||
TBZ $4, len, try8
|
||||
TBZ $4, n, try8
|
||||
LDP.P 16(p), (x1, x2)
|
||||
|
||||
round0(x1)
|
||||
|
||||
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||
// rotated register) is worth a small but measurable speedup for small
|
||||
// inputs.
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
round0(x2)
|
||||
ROR $64-27, h
|
||||
EOR x2 @> 64-27, h
|
||||
EOR x2 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try8:
|
||||
TBZ $3, len, try4
|
||||
TBZ $3, n, try4
|
||||
MOVD.P 8(p), x1
|
||||
|
||||
round0(x1)
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try4:
|
||||
TBZ $2, len, try2
|
||||
TBZ $2, n, try2
|
||||
MOVWU.P 4(p), x2
|
||||
|
||||
MUL prime1, x2
|
||||
ROR $64-23, h
|
||||
EOR x2 @> 64-23, h
|
||||
EOR x2 @> 64-23, h, h
|
||||
MADD h, prime3, prime2, h
|
||||
|
||||
try2:
|
||||
TBZ $1, len, try1
|
||||
TBZ $1, n, try1
|
||||
MOVHU.P 2(p), x3
|
||||
AND $255, x3, x1
|
||||
LSR $8, x3, x2
|
||||
|
||||
MUL prime5, x1
|
||||
ROR $64-11, h
|
||||
EOR x1 @> 64-11, h
|
||||
EOR x1 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
MUL prime5, x2
|
||||
ROR $64-11, h
|
||||
EOR x2 @> 64-11, h
|
||||
EOR x2 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
try1:
|
||||
TBZ $0, len, end
|
||||
TBZ $0, n, finalize
|
||||
MOVBU (p), x4
|
||||
|
||||
MUL prime5, x4
|
||||
ROR $64-11, h
|
||||
EOR x4 @> 64-11, h
|
||||
EOR x4 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
end:
|
||||
finalize:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
@ -163,24 +163,22 @@ end:
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
//
|
||||
// Assumes len(b) >= 32.
|
||||
TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
|
||||
LDP primes<>(SB), (prime1, prime2)
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, len)
|
||||
LDP b_base+8(FP), (p, n)
|
||||
|
||||
blocksLoop()
|
||||
blockLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, len
|
||||
MOVD len, ret+32(FP)
|
||||
BIC $31, n
|
||||
MOVD n, ret+32(FP)
|
||||
RET
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
generated
vendored
@ -13,4 +13,4 @@ package xxhash
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
||||
func writeBlocks(s *Digest, b []byte) int
|
||||
|
19
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
generated
vendored
19
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
generated
vendored
@ -15,10 +15,10 @@ func Sum64(b []byte) uint64 {
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v1 := primes[0] + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
v4 := -primes[0]
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
@ -37,19 +37,18 @@ func Sum64(b []byte) uint64 {
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
|
28
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
28
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
@ -320,10 +320,6 @@ error_not_enough_literals:
|
||||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with not enough output space error
|
||||
MOVQ $0x00000005, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
// Requires: CMOV
|
||||
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
|
||||
@ -617,10 +613,6 @@ error_not_enough_literals:
|
||||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with not enough output space error
|
||||
MOVQ $0x00000005, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
// Requires: BMI, BMI2, CMOV
|
||||
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
|
||||
@ -897,10 +889,6 @@ error_not_enough_literals:
|
||||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with not enough output space error
|
||||
MOVQ $0x00000005, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
// Requires: BMI, BMI2, CMOV
|
||||
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
|
||||
@ -1152,10 +1140,6 @@ error_not_enough_literals:
|
||||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with not enough output space error
|
||||
MOVQ $0x00000005, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
||||
// Requires: SSE
|
||||
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
|
||||
@ -1389,8 +1373,7 @@ loop_finished:
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ DX, 24(AX)
|
||||
MOVQ DI, 104(AX)
|
||||
MOVQ 80(AX), CX
|
||||
SUBQ CX, SI
|
||||
SUBQ 80(AX), SI
|
||||
MOVQ SI, 112(AX)
|
||||
RET
|
||||
|
||||
@ -1402,8 +1385,7 @@ error_match_off_too_big:
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ DX, 24(AX)
|
||||
MOVQ DI, 104(AX)
|
||||
MOVQ 80(AX), CX
|
||||
SUBQ CX, SI
|
||||
SUBQ 80(AX), SI
|
||||
MOVQ SI, 112(AX)
|
||||
RET
|
||||
|
||||
@ -1747,8 +1729,7 @@ loop_finished:
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ DX, 24(AX)
|
||||
MOVQ DI, 104(AX)
|
||||
MOVQ 80(AX), CX
|
||||
SUBQ CX, SI
|
||||
SUBQ 80(AX), SI
|
||||
MOVQ SI, 112(AX)
|
||||
RET
|
||||
|
||||
@ -1760,8 +1741,7 @@ error_match_off_too_big:
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ DX, 24(AX)
|
||||
MOVQ DI, 104(AX)
|
||||
MOVQ 80(AX), CX
|
||||
SUBQ CX, SI
|
||||
SUBQ 80(AX), SI
|
||||
MOVQ SI, 112(AX)
|
||||
RET
|
||||
|
||||
|
31
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
31
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@ -36,9 +36,6 @@ const forcePreDef = false
|
||||
// zstdMinMatch is the minimum zstd match length.
|
||||
const zstdMinMatch = 3
|
||||
|
||||
// Reset the buffer offset when reaching this.
|
||||
const bufferReset = math.MaxInt32 - MaxWindowSize
|
||||
|
||||
// fcsUnknown is used for unknown frame content size.
|
||||
const fcsUnknown = math.MaxUint64
|
||||
|
||||
@ -75,7 +72,6 @@ var (
|
||||
ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
|
||||
|
||||
// ErrUnknownDictionary is returned if the dictionary ID is unknown.
|
||||
// For the time being dictionaries are not supported.
|
||||
ErrUnknownDictionary = errors.New("unknown dictionary")
|
||||
|
||||
// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
|
||||
@ -110,26 +106,25 @@ func printf(format string, a ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// matchLen returns the maximum length.
|
||||
// matchLen returns the maximum common prefix length of a and b.
|
||||
// a must be the shortest of the two.
|
||||
// The function also returns whether all bytes matched.
|
||||
func matchLen(a, b []byte) int {
|
||||
b = b[:len(a)]
|
||||
for i := 0; i < len(a)-7; i += 8 {
|
||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
||||
return i + (bits.TrailingZeros64(diff) >> 3)
|
||||
func matchLen(a, b []byte) (n int) {
|
||||
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
|
||||
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
|
||||
if diff != 0 {
|
||||
return n + bits.TrailingZeros64(diff)>>3
|
||||
}
|
||||
n += 8
|
||||
}
|
||||
|
||||
checked := (len(a) >> 3) << 3
|
||||
a = a[checked:]
|
||||
b = b[checked:]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return i + checked
|
||||
break
|
||||
}
|
||||
n++
|
||||
}
|
||||
return len(a) + checked
|
||||
return n
|
||||
|
||||
}
|
||||
|
||||
func load3232(b []byte, i int32) uint32 {
|
||||
@ -140,10 +135,6 @@ func load6432(b []byte, i int32) uint64 {
|
||||
return binary.LittleEndian.Uint64(b[i:])
|
||||
}
|
||||
|
||||
func load64(b []byte, i int) uint64 {
|
||||
return binary.LittleEndian.Uint64(b[i:])
|
||||
}
|
||||
|
||||
type byter interface {
|
||||
Bytes() []byte
|
||||
Len() int
|
||||
|
11
vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
11
vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
@ -319,6 +319,10 @@ type LinuxMemory struct {
|
||||
DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"`
|
||||
// Enables hierarchical memory accounting
|
||||
UseHierarchy *bool `json:"useHierarchy,omitempty"`
|
||||
// CheckBeforeUpdate enables checking if a new memory limit is lower
|
||||
// than the current usage during update, and if so, rejecting the new
|
||||
// limit.
|
||||
CheckBeforeUpdate *bool `json:"checkBeforeUpdate,omitempty"`
|
||||
}
|
||||
|
||||
// LinuxCPU for Linux cgroup 'cpu' resource management
|
||||
@ -327,6 +331,9 @@ type LinuxCPU struct {
|
||||
Shares *uint64 `json:"shares,omitempty"`
|
||||
// CPU hardcap limit (in usecs). Allowed cpu time in a given period.
|
||||
Quota *int64 `json:"quota,omitempty"`
|
||||
// CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a
|
||||
// given period.
|
||||
Burst *uint64 `json:"burst,omitempty"`
|
||||
// CPU period to be used for hardcapping (in usecs).
|
||||
Period *uint64 `json:"period,omitempty"`
|
||||
// How much time realtime scheduling may use (in usecs).
|
||||
@ -645,6 +652,10 @@ const (
|
||||
// LinuxSeccompFlagSpecAllow can be used to disable Speculative Store
|
||||
// Bypass mitigation. (since Linux 4.17)
|
||||
LinuxSeccompFlagSpecAllow LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_SPEC_ALLOW"
|
||||
|
||||
// LinuxSeccompFlagWaitKillableRecv can be used to switch to the wait
|
||||
// killable semantics. (since Linux 5.19)
|
||||
LinuxSeccompFlagWaitKillableRecv LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV"
|
||||
)
|
||||
|
||||
// Additional architectures permitted to be used for system calls
|
||||
|
6
vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
generated
vendored
6
vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
generated
vendored
@ -6,12 +6,12 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 1
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 0
|
||||
VersionMinor = 1
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 2
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = "-dev"
|
||||
VersionDev = "-rc.1"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
22
vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
generated
vendored
22
vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
generated
vendored
@ -78,6 +78,9 @@ func ReleaseLabel(label string) error {
|
||||
// Deprecated: use selinux.DupSecOpt
|
||||
var DupSecOpt = selinux.DupSecOpt
|
||||
|
||||
// FormatMountLabel returns a string to be used by the mount command. Using
|
||||
// the SELinux `context` mount option. Changing labels of files on mount
|
||||
// points with this option can never be changed.
|
||||
// FormatMountLabel returns a string to be used by the mount command.
|
||||
// The format of this string will be used to alter the labeling of the mountpoint.
|
||||
// The string returned is suitable to be used as the options field of the mount command.
|
||||
@ -85,12 +88,27 @@ var DupSecOpt = selinux.DupSecOpt
|
||||
// the first parameter. Second parameter is the label that you wish to apply
|
||||
// to all content in the mount point.
|
||||
func FormatMountLabel(src, mountLabel string) string {
|
||||
return FormatMountLabelByType(src, mountLabel, "context")
|
||||
}
|
||||
|
||||
// FormatMountLabelByType returns a string to be used by the mount command.
|
||||
// Allow caller to specify the mount options. For example using the SELinux
|
||||
// `fscontext` mount option would allow certain container processes to change
|
||||
// labels of files created on the mount points, where as `context` option does
|
||||
// not.
|
||||
// FormatMountLabelByType returns a string to be used by the mount command.
|
||||
// The format of this string will be used to alter the labeling of the mountpoint.
|
||||
// The string returned is suitable to be used as the options field of the mount command.
|
||||
// If you need to have additional mount point options, you can pass them in as
|
||||
// the first parameter. Second parameter is the label that you wish to apply
|
||||
// to all content in the mount point.
|
||||
func FormatMountLabelByType(src, mountLabel, contextType string) string {
|
||||
if mountLabel != "" {
|
||||
switch src {
|
||||
case "":
|
||||
src = fmt.Sprintf("context=%q", mountLabel)
|
||||
src = fmt.Sprintf("%s=%q", contextType, mountLabel)
|
||||
default:
|
||||
src = fmt.Sprintf("%s,context=%q", src, mountLabel)
|
||||
src = fmt.Sprintf("%s,%s=%q", src, contextType, mountLabel)
|
||||
}
|
||||
}
|
||||
return src
|
||||
|
34
vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go
generated
vendored
34
vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
//go:build linux && go1.16
|
||||
// +build linux,go1.16
|
||||
|
||||
package selinux
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
||||
"github.com/opencontainers/selinux/pkg/pwalkdir"
|
||||
)
|
||||
|
||||
func rchcon(fpath, label string) error {
|
||||
fastMode := false
|
||||
// If the current label matches the new label, assume
|
||||
// other labels are correct.
|
||||
if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label {
|
||||
fastMode = true
|
||||
}
|
||||
return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error {
|
||||
if fastMode {
|
||||
if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
e := lSetFileLabel(p, label)
|
||||
// Walk a file tree can race with removal, so ignore ENOENT.
|
||||
if errors.Is(e, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
})
|
||||
}
|
22
vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go
generated
vendored
22
vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
//go:build linux && !go1.16
|
||||
// +build linux,!go1.16
|
||||
|
||||
package selinux
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/opencontainers/selinux/pkg/pwalk"
|
||||
)
|
||||
|
||||
func rchcon(fpath, label string) error {
|
||||
return pwalk.Walk(fpath, func(p string, _ os.FileInfo, _ error) error {
|
||||
e := lSetFileLabel(p, label)
|
||||
// Walk a file tree can race with removal, so ignore ENOENT.
|
||||
if errors.Is(e, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
})
|
||||
}
|
30
vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
generated
vendored
30
vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
generated
vendored
@ -23,8 +23,13 @@ var (
|
||||
// ErrEmptyPath is returned when an empty path has been specified.
|
||||
ErrEmptyPath = errors.New("empty path")
|
||||
|
||||
// ErrInvalidLabel is returned when an invalid label is specified.
|
||||
ErrInvalidLabel = errors.New("invalid Label")
|
||||
|
||||
// InvalidLabel is returned when an invalid label is specified.
|
||||
InvalidLabel = errors.New("Invalid Label")
|
||||
//
|
||||
// Deprecated: use [ErrInvalidLabel].
|
||||
InvalidLabel = ErrInvalidLabel
|
||||
|
||||
// ErrIncomparable is returned two levels are not comparable
|
||||
ErrIncomparable = errors.New("incomparable levels")
|
||||
@ -144,7 +149,7 @@ func CalculateGlbLub(sourceRange, targetRange string) (string, error) {
|
||||
// of the program is finished to guarantee another goroutine does not migrate to the current
|
||||
// thread before execution is complete.
|
||||
func SetExecLabel(label string) error {
|
||||
return setExecLabel(label)
|
||||
return writeCon(attrPath("exec"), label)
|
||||
}
|
||||
|
||||
// SetTaskLabel sets the SELinux label for the current thread, or an error.
|
||||
@ -152,21 +157,21 @@ func SetExecLabel(label string) error {
|
||||
// be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() to guarantee
|
||||
// the current thread does not run in a new mislabeled thread.
|
||||
func SetTaskLabel(label string) error {
|
||||
return setTaskLabel(label)
|
||||
return writeCon(attrPath("current"), label)
|
||||
}
|
||||
|
||||
// SetSocketLabel takes a process label and tells the kernel to assign the
|
||||
// label to the next socket that gets created. Calls to SetSocketLabel
|
||||
// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until
|
||||
// the the socket is created to guarantee another goroutine does not migrate
|
||||
// the socket is created to guarantee another goroutine does not migrate
|
||||
// to the current thread before execution is complete.
|
||||
func SetSocketLabel(label string) error {
|
||||
return setSocketLabel(label)
|
||||
return writeCon(attrPath("sockcreate"), label)
|
||||
}
|
||||
|
||||
// SocketLabel retrieves the current socket label setting
|
||||
func SocketLabel() (string, error) {
|
||||
return socketLabel()
|
||||
return readCon(attrPath("sockcreate"))
|
||||
}
|
||||
|
||||
// PeerLabel retrieves the label of the client on the other side of a socket
|
||||
@ -185,7 +190,7 @@ func SetKeyLabel(label string) error {
|
||||
|
||||
// KeyLabel retrieves the current kernel keyring label setting
|
||||
func KeyLabel() (string, error) {
|
||||
return keyLabel()
|
||||
return readCon("/proc/self/attr/keycreate")
|
||||
}
|
||||
|
||||
// Get returns the Context as a string
|
||||
@ -208,6 +213,11 @@ func ReserveLabel(label string) {
|
||||
reserveLabel(label)
|
||||
}
|
||||
|
||||
// MLSEnabled checks if MLS is enabled.
|
||||
func MLSEnabled() bool {
|
||||
return isMLSEnabled()
|
||||
}
|
||||
|
||||
// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
|
||||
func EnforceMode() int {
|
||||
return enforceMode()
|
||||
@ -220,7 +230,7 @@ func SetEnforceMode(mode int) error {
|
||||
}
|
||||
|
||||
// DefaultEnforceMode returns the systems default SELinux mode Enforcing,
|
||||
// Permissive or Disabled. Note this is is just the default at boot time.
|
||||
// Permissive or Disabled. Note this is just the default at boot time.
|
||||
// EnforceMode tells you the systems current mode.
|
||||
func DefaultEnforceMode() int {
|
||||
return defaultEnforceMode()
|
||||
@ -266,7 +276,7 @@ func CopyLevel(src, dest string) (string, error) {
|
||||
return copyLevel(src, dest)
|
||||
}
|
||||
|
||||
// Chcon changes the fpath file object to the SELinux label label.
|
||||
// Chcon changes the fpath file object to the SELinux label.
|
||||
// If fpath is a directory and recurse is true, then Chcon walks the
|
||||
// directory tree setting the label.
|
||||
//
|
||||
@ -284,7 +294,7 @@ func DupSecOpt(src string) ([]string, error) {
|
||||
// DisableSecOpt returns a security opt that can be used to disable SELinux
|
||||
// labeling support for future container processes.
|
||||
func DisableSecOpt() []string {
|
||||
return disableSecOpt()
|
||||
return []string{"disable"}
|
||||
}
|
||||
|
||||
// GetDefaultContextWithLevel gets a single context for the specified SELinux user
|
||||
|
183
vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
generated
vendored
183
vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
generated
vendored
@ -8,16 +8,16 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"io/fs"
|
||||
"math/big"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/opencontainers/selinux/pkg/pwalkdir"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@ -35,17 +35,17 @@ const (
|
||||
)
|
||||
|
||||
type selinuxState struct {
|
||||
mcsList map[string]bool
|
||||
selinuxfs string
|
||||
selinuxfsOnce sync.Once
|
||||
enabledSet bool
|
||||
enabled bool
|
||||
selinuxfsOnce sync.Once
|
||||
selinuxfs string
|
||||
mcsList map[string]bool
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type level struct {
|
||||
sens uint
|
||||
cats *big.Int
|
||||
sens uint
|
||||
}
|
||||
|
||||
type mlsRange struct {
|
||||
@ -54,10 +54,10 @@ type mlsRange struct {
|
||||
}
|
||||
|
||||
type defaultSECtx struct {
|
||||
user, level, scon string
|
||||
userRdr, defaultRdr io.Reader
|
||||
|
||||
userRdr io.Reader
|
||||
verifier func(string) error
|
||||
defaultRdr io.Reader
|
||||
user, level, scon string
|
||||
}
|
||||
|
||||
type levelItem byte
|
||||
@ -155,7 +155,7 @@ func findSELinuxfs() string {
|
||||
}
|
||||
|
||||
// check if selinuxfs is available before going the slow path
|
||||
fs, err := ioutil.ReadFile("/proc/filesystems")
|
||||
fs, err := os.ReadFile("/proc/filesystems")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
@ -292,7 +292,7 @@ func readCon(fpath string) (string, error) {
|
||||
}
|
||||
|
||||
func readConFd(in *os.File) (string, error) {
|
||||
data, err := ioutil.ReadAll(in)
|
||||
data, err := io.ReadAll(in)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -305,7 +305,7 @@ func classIndex(class string) (int, error) {
|
||||
permpath := fmt.Sprintf("class/%s/index", class)
|
||||
indexpath := filepath.Join(getSelinuxMountPoint(), permpath)
|
||||
|
||||
indexB, err := ioutil.ReadFile(indexpath)
|
||||
indexB, err := os.ReadFile(indexpath)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
@ -391,21 +391,19 @@ func lFileLabel(fpath string) (string, error) {
|
||||
return string(label), nil
|
||||
}
|
||||
|
||||
// setFSCreateLabel tells kernel the label to create all file system objects
|
||||
// created by this task. Setting label="" to return to default.
|
||||
func setFSCreateLabel(label string) error {
|
||||
return writeAttr("fscreate", label)
|
||||
return writeCon(attrPath("fscreate"), label)
|
||||
}
|
||||
|
||||
// fsCreateLabel returns the default label the kernel which the kernel is using
|
||||
// for file system objects created by this task. "" indicates default.
|
||||
func fsCreateLabel() (string, error) {
|
||||
return readAttr("fscreate")
|
||||
return readCon(attrPath("fscreate"))
|
||||
}
|
||||
|
||||
// currentLabel returns the SELinux label of the current process thread, or an error.
|
||||
func currentLabel() (string, error) {
|
||||
return readAttr("current")
|
||||
return readCon(attrPath("current"))
|
||||
}
|
||||
|
||||
// pidLabel returns the SELinux label of the given pid, or an error.
|
||||
@ -416,7 +414,7 @@ func pidLabel(pid int) (string, error) {
|
||||
// ExecLabel returns the SELinux label that the kernel will use for any programs
|
||||
// that are executed by the current process thread, or an error.
|
||||
func execLabel() (string, error) {
|
||||
return readAttr("exec")
|
||||
return readCon(attrPath("exec"))
|
||||
}
|
||||
|
||||
func writeCon(fpath, val string) error {
|
||||
@ -462,18 +460,10 @@ func attrPath(attr string) string {
|
||||
})
|
||||
|
||||
if haveThreadSelf {
|
||||
return path.Join(threadSelfPrefix, attr)
|
||||
return filepath.Join(threadSelfPrefix, attr)
|
||||
}
|
||||
|
||||
return path.Join("/proc/self/task/", strconv.Itoa(unix.Gettid()), "/attr/", attr)
|
||||
}
|
||||
|
||||
func readAttr(attr string) (string, error) {
|
||||
return readCon(attrPath(attr))
|
||||
}
|
||||
|
||||
func writeAttr(attr, val string) error {
|
||||
return writeCon(attrPath(attr), val)
|
||||
return filepath.Join("/proc/self/task", strconv.Itoa(unix.Gettid()), "attr", attr)
|
||||
}
|
||||
|
||||
// canonicalizeContext takes a context string and writes it to the kernel
|
||||
@ -560,30 +550,30 @@ func (l *level) parseLevel(levelStr string) error {
|
||||
|
||||
// rangeStrToMLSRange marshals a string representation of a range.
|
||||
func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) {
|
||||
mlsRange := &mlsRange{}
|
||||
levelSlice := strings.SplitN(rangeStr, "-", 2)
|
||||
r := &mlsRange{}
|
||||
l := strings.SplitN(rangeStr, "-", 2)
|
||||
|
||||
switch len(levelSlice) {
|
||||
switch len(l) {
|
||||
// rangeStr that has a low and a high level, e.g. s4:c0.c1023-s6:c0.c1023
|
||||
case 2:
|
||||
mlsRange.high = &level{}
|
||||
if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse high level %q: %w", levelSlice[1], err)
|
||||
r.high = &level{}
|
||||
if err := r.high.parseLevel(l[1]); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse high level %q: %w", l[1], err)
|
||||
}
|
||||
fallthrough
|
||||
// rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023
|
||||
case 1:
|
||||
mlsRange.low = &level{}
|
||||
if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse low level %q: %w", levelSlice[0], err)
|
||||
r.low = &level{}
|
||||
if err := r.low.parseLevel(l[0]); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse low level %q: %w", l[0], err)
|
||||
}
|
||||
}
|
||||
|
||||
if mlsRange.high == nil {
|
||||
mlsRange.high = mlsRange.low
|
||||
if r.high == nil {
|
||||
r.high = r.low
|
||||
}
|
||||
|
||||
return mlsRange, nil
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// bitsetToStr takes a category bitset and returns it in the
|
||||
@ -617,17 +607,17 @@ func bitsetToStr(c *big.Int) string {
|
||||
return str
|
||||
}
|
||||
|
||||
func (l1 *level) equal(l2 *level) bool {
|
||||
if l2 == nil || l1 == nil {
|
||||
return l1 == l2
|
||||
func (l *level) equal(l2 *level) bool {
|
||||
if l2 == nil || l == nil {
|
||||
return l == l2
|
||||
}
|
||||
if l1.sens != l2.sens {
|
||||
if l2.sens != l.sens {
|
||||
return false
|
||||
}
|
||||
if l2.cats == nil || l1.cats == nil {
|
||||
return l2.cats == l1.cats
|
||||
if l2.cats == nil || l.cats == nil {
|
||||
return l2.cats == l.cats
|
||||
}
|
||||
return l1.cats.Cmp(l2.cats) == 0
|
||||
return l.cats.Cmp(l2.cats) == 0
|
||||
}
|
||||
|
||||
// String returns an mlsRange as a string.
|
||||
@ -721,36 +711,13 @@ func readWriteCon(fpath string, val string) (string, error) {
|
||||
return readConFd(f)
|
||||
}
|
||||
|
||||
// setExecLabel sets the SELinux label that the kernel will use for any programs
|
||||
// that are executed by the current process thread, or an error.
|
||||
func setExecLabel(label string) error {
|
||||
return writeAttr("exec", label)
|
||||
}
|
||||
|
||||
// setTaskLabel sets the SELinux label for the current thread, or an error.
|
||||
// This requires the dyntransition permission.
|
||||
func setTaskLabel(label string) error {
|
||||
return writeAttr("current", label)
|
||||
}
|
||||
|
||||
// setSocketLabel takes a process label and tells the kernel to assign the
|
||||
// label to the next socket that gets created
|
||||
func setSocketLabel(label string) error {
|
||||
return writeAttr("sockcreate", label)
|
||||
}
|
||||
|
||||
// socketLabel retrieves the current socket label setting
|
||||
func socketLabel() (string, error) {
|
||||
return readAttr("sockcreate")
|
||||
}
|
||||
|
||||
// peerLabel retrieves the label of the client on the other side of a socket
|
||||
func peerLabel(fd uintptr) (string, error) {
|
||||
label, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC)
|
||||
l, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC)
|
||||
if err != nil {
|
||||
return "", &os.PathError{Op: "getsockopt", Path: "fd " + strconv.Itoa(int(fd)), Err: err}
|
||||
}
|
||||
return label, nil
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// setKeyLabel takes a process label and tells the kernel to assign the
|
||||
@ -766,15 +733,10 @@ func setKeyLabel(label string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// keyLabel retrieves the current kernel keyring label setting
|
||||
func keyLabel() (string, error) {
|
||||
return readCon("/proc/self/attr/keycreate")
|
||||
}
|
||||
|
||||
// get returns the Context as a string
|
||||
func (c Context) get() string {
|
||||
if level := c["level"]; level != "" {
|
||||
return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + level
|
||||
if l := c["level"]; l != "" {
|
||||
return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + l
|
||||
}
|
||||
return c["user"] + ":" + c["role"] + ":" + c["type"]
|
||||
}
|
||||
@ -786,7 +748,7 @@ func newContext(label string) (Context, error) {
|
||||
if len(label) != 0 {
|
||||
con := strings.SplitN(label, ":", 4)
|
||||
if len(con) < 3 {
|
||||
return c, InvalidLabel
|
||||
return c, ErrInvalidLabel
|
||||
}
|
||||
c["user"] = con[0]
|
||||
c["role"] = con[1]
|
||||
@ -816,14 +778,23 @@ func reserveLabel(label string) {
|
||||
}
|
||||
|
||||
func selinuxEnforcePath() string {
|
||||
return path.Join(getSelinuxMountPoint(), "enforce")
|
||||
return filepath.Join(getSelinuxMountPoint(), "enforce")
|
||||
}
|
||||
|
||||
// isMLSEnabled checks if MLS is enabled.
|
||||
func isMLSEnabled() bool {
|
||||
enabledB, err := os.ReadFile(filepath.Join(getSelinuxMountPoint(), "mls"))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enabledB, []byte{'1'})
|
||||
}
|
||||
|
||||
// enforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
|
||||
func enforceMode() int {
|
||||
var enforce int
|
||||
|
||||
enforceB, err := ioutil.ReadFile(selinuxEnforcePath())
|
||||
enforceB, err := os.ReadFile(selinuxEnforcePath())
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
@ -837,11 +808,12 @@ func enforceMode() int {
|
||||
// setEnforceMode sets the current SELinux mode Enforcing, Permissive.
|
||||
// Disabled is not valid, since this needs to be set at boot time.
|
||||
func setEnforceMode(mode int) error {
|
||||
return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644)
|
||||
//nolint:gosec // ignore G306: permissions to be 0600 or less.
|
||||
return os.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644)
|
||||
}
|
||||
|
||||
// defaultEnforceMode returns the systems default SELinux mode Enforcing,
|
||||
// Permissive or Disabled. Note this is is just the default at boot time.
|
||||
// Permissive or Disabled. Note this is just the default at boot time.
|
||||
// EnforceMode tells you the systems current mode.
|
||||
func defaultEnforceMode() int {
|
||||
switch readConfig(selinuxTag) {
|
||||
@ -941,7 +913,7 @@ func openContextFile() (*os.File, error) {
|
||||
if f, err := os.Open(contextFile); err == nil {
|
||||
return f, nil
|
||||
}
|
||||
return os.Open(filepath.Join(policyRoot(), "/contexts/lxc_contexts"))
|
||||
return os.Open(filepath.Join(policyRoot(), "contexts", "lxc_contexts"))
|
||||
}
|
||||
|
||||
func loadLabels() {
|
||||
@ -1044,7 +1016,8 @@ func addMcs(processLabel, fileLabel string) (string, string) {
|
||||
|
||||
// securityCheckContext validates that the SELinux label is understood by the kernel
|
||||
func securityCheckContext(val string) error {
|
||||
return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644)
|
||||
//nolint:gosec // ignore G306: permissions to be 0600 or less.
|
||||
return os.WriteFile(filepath.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644)
|
||||
}
|
||||
|
||||
// copyLevel returns a label with the MLS/MCS level from src label replaced on
|
||||
@ -1073,7 +1046,7 @@ func copyLevel(src, dest string) (string, error) {
|
||||
return tcon.Get(), nil
|
||||
}
|
||||
|
||||
// chcon changes the fpath file object to the SELinux label label.
|
||||
// chcon changes the fpath file object to the SELinux label.
|
||||
// If fpath is a directory and recurse is true, then chcon walks the
|
||||
// directory tree setting the label.
|
||||
func chcon(fpath string, label string, recurse bool) error {
|
||||
@ -1084,7 +1057,7 @@ func chcon(fpath string, label string, recurse bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
exclude_paths := map[string]bool{
|
||||
excludePaths := map[string]bool{
|
||||
"/": true,
|
||||
"/bin": true,
|
||||
"/boot": true,
|
||||
@ -1112,19 +1085,19 @@ func chcon(fpath string, label string, recurse bool) error {
|
||||
}
|
||||
|
||||
if home := os.Getenv("HOME"); home != "" {
|
||||
exclude_paths[home] = true
|
||||
excludePaths[home] = true
|
||||
}
|
||||
|
||||
if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" {
|
||||
if usr, err := user.Lookup(sudoUser); err == nil {
|
||||
exclude_paths[usr.HomeDir] = true
|
||||
excludePaths[usr.HomeDir] = true
|
||||
}
|
||||
}
|
||||
|
||||
if fpath != "/" {
|
||||
fpath = strings.TrimSuffix(fpath, "/")
|
||||
}
|
||||
if exclude_paths[fpath] {
|
||||
if excludePaths[fpath] {
|
||||
return fmt.Errorf("SELinux relabeling of %s is not allowed", fpath)
|
||||
}
|
||||
|
||||
@ -1152,6 +1125,28 @@ func chcon(fpath string, label string, recurse bool) error {
|
||||
return rchcon(fpath, label)
|
||||
}
|
||||
|
||||
func rchcon(fpath, label string) error { //revive:disable:cognitive-complexity
|
||||
fastMode := false
|
||||
// If the current label matches the new label, assume
|
||||
// other labels are correct.
|
||||
if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label {
|
||||
fastMode = true
|
||||
}
|
||||
return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error {
|
||||
if fastMode {
|
||||
if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
err := lSetFileLabel(p, label)
|
||||
// Walk a file tree can race with removal, so ignore ENOENT.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// dupSecOpt takes an SELinux process label and returns security options that
|
||||
// can be used to set the SELinux Type and Level for future container processes.
|
||||
func dupSecOpt(src string) ([]string, error) {
|
||||
@ -1180,12 +1175,6 @@ func dupSecOpt(src string) ([]string, error) {
|
||||
return dup, nil
|
||||
}
|
||||
|
||||
// disableSecOpt returns a security opt that can be used to disable SELinux
|
||||
// labeling support for future container processes.
|
||||
func disableSecOpt() []string {
|
||||
return []string{"disable"}
|
||||
}
|
||||
|
||||
// findUserInContext scans the reader for a valid SELinux context
|
||||
// match that is verified with the verifier. Invalid contexts are
|
||||
// skipped. It returns a matched context or an empty string if no
|
||||
|
44
vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
generated
vendored
44
vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
generated
vendored
@ -3,9 +3,20 @@
|
||||
|
||||
package selinux
|
||||
|
||||
func setDisabled() {
|
||||
func attrPath(string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func readCon(fpath string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func writeCon(string, string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDisabled() {}
|
||||
|
||||
func getEnabled() bool {
|
||||
return false
|
||||
}
|
||||
@ -62,22 +73,6 @@ func calculateGlbLub(sourceRange, targetRange string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func setExecLabel(label string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func setTaskLabel(label string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func setSocketLabel(label string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func socketLabel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func peerLabel(fd uintptr) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
@ -86,17 +81,12 @@ func setKeyLabel(label string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func keyLabel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (c Context) get() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func newContext(label string) (Context, error) {
|
||||
c := make(Context)
|
||||
return c, nil
|
||||
return Context{}, nil
|
||||
}
|
||||
|
||||
func clearLabels() {
|
||||
@ -105,6 +95,10 @@ func clearLabels() {
|
||||
func reserveLabel(label string) {
|
||||
}
|
||||
|
||||
func isMLSEnabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func enforceMode() int {
|
||||
return Disabled
|
||||
}
|
||||
@ -152,10 +146,6 @@ func dupSecOpt(src string) ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func disableSecOpt() []string {
|
||||
return []string{"disable"}
|
||||
}
|
||||
|
||||
func getDefaultContextWithLevel(user, level, scon string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
48
vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md
generated
vendored
48
vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md
generated
vendored
@ -1,48 +0,0 @@
|
||||
## pwalk: parallel implementation of filepath.Walk
|
||||
|
||||
This is a wrapper for [filepath.Walk](https://pkg.go.dev/path/filepath?tab=doc#Walk)
|
||||
which may speed it up by calling multiple callback functions (WalkFunc) in parallel,
|
||||
utilizing goroutines.
|
||||
|
||||
By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks.
|
||||
This can be changed by using WalkN function which has the additional
|
||||
parameter, specifying the number of goroutines (concurrency).
|
||||
|
||||
### pwalk vs pwalkdir
|
||||
|
||||
This package is deprecated in favor of
|
||||
[pwalkdir](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir),
|
||||
which is faster, but requires at least Go 1.16.
|
||||
|
||||
### Caveats
|
||||
|
||||
Please note the following limitations of this code:
|
||||
|
||||
* Unlike filepath.Walk, the order of calls is non-deterministic;
|
||||
|
||||
* Only primitive error handling is supported:
|
||||
|
||||
* filepath.SkipDir is not supported;
|
||||
|
||||
* no errors are ever passed to WalkFunc;
|
||||
|
||||
* once any error is returned from any WalkFunc instance, no more new calls
|
||||
to WalkFunc are made, and the error is returned to the caller of Walk;
|
||||
|
||||
* if more than one walkFunc instance will return an error, only one
|
||||
of such errors will be propagated and returned by Walk, others
|
||||
will be silently discarded.
|
||||
|
||||
### Documentation
|
||||
|
||||
For the official documentation, see
|
||||
https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalk?tab=doc
|
||||
|
||||
### Benchmarks
|
||||
|
||||
For a WalkFunc that consists solely of the return statement, this
|
||||
implementation is about 10% slower than the standard library's
|
||||
filepath.Walk.
|
||||
|
||||
Otherwise (if a WalkFunc is doing something) this is usually faster,
|
||||
except when the WalkN(..., 1) is used.
|
115
vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
generated
vendored
115
vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
generated
vendored
@ -1,115 +0,0 @@
|
||||
package pwalk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type WalkFunc = filepath.WalkFunc
|
||||
|
||||
// Walk is a wrapper for filepath.Walk which can call multiple walkFn
|
||||
// in parallel, allowing to handle each item concurrently. A maximum of
|
||||
// twice the runtime.NumCPU() walkFn will be called at any one time.
|
||||
// If you want to change the maximum, use WalkN instead.
|
||||
//
|
||||
// The order of calls is non-deterministic.
|
||||
//
|
||||
// Note that this implementation only supports primitive error handling:
|
||||
//
|
||||
// - no errors are ever passed to walkFn;
|
||||
//
|
||||
// - once a walkFn returns any error, all further processing stops
|
||||
// and the error is returned to the caller of Walk;
|
||||
//
|
||||
// - filepath.SkipDir is not supported;
|
||||
//
|
||||
// - if more than one walkFn instance will return an error, only one
|
||||
// of such errors will be propagated and returned by Walk, others
|
||||
// will be silently discarded.
|
||||
func Walk(root string, walkFn WalkFunc) error {
|
||||
return WalkN(root, walkFn, runtime.NumCPU()*2)
|
||||
}
|
||||
|
||||
// WalkN is a wrapper for filepath.Walk which can call multiple walkFn
|
||||
// in parallel, allowing to handle each item concurrently. A maximum of
|
||||
// num walkFn will be called at any one time.
|
||||
//
|
||||
// Please see Walk documentation for caveats of using this function.
|
||||
func WalkN(root string, walkFn WalkFunc, num int) error {
|
||||
// make sure limit is sensible
|
||||
if num < 1 {
|
||||
return fmt.Errorf("walk(%q): num must be > 0", root)
|
||||
}
|
||||
|
||||
files := make(chan *walkArgs, 2*num)
|
||||
errCh := make(chan error, 1) // get the first error, ignore others
|
||||
|
||||
// Start walking a tree asap
|
||||
var (
|
||||
err error
|
||||
wg sync.WaitGroup
|
||||
|
||||
rootLen = len(root)
|
||||
rootEntry *walkArgs
|
||||
)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
err = filepath.Walk(root, func(p string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
close(files)
|
||||
return err
|
||||
}
|
||||
if len(p) == rootLen {
|
||||
// Root entry is processed separately below.
|
||||
rootEntry = &walkArgs{path: p, info: &info}
|
||||
return nil
|
||||
}
|
||||
// add a file to the queue unless a callback sent an error
|
||||
select {
|
||||
case e := <-errCh:
|
||||
close(files)
|
||||
return e
|
||||
default:
|
||||
files <- &walkArgs{path: p, info: &info}
|
||||
return nil
|
||||
}
|
||||
})
|
||||
if err == nil {
|
||||
close(files)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
wg.Add(num)
|
||||
for i := 0; i < num; i++ {
|
||||
go func() {
|
||||
for file := range files {
|
||||
if e := walkFn(file.path, *file.info, nil); e != nil {
|
||||
select {
|
||||
case errCh <- e: // sent ok
|
||||
default: // buffer full
|
||||
}
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if err == nil {
|
||||
err = walkFn(rootEntry.path, *rootEntry.info, nil)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// walkArgs holds the arguments that were passed to the Walk or WalkN
|
||||
// functions.
|
||||
type walkArgs struct {
|
||||
path string
|
||||
info *os.FileInfo
|
||||
}
|
2
vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
generated
vendored
2
vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
generated
vendored
@ -111,6 +111,6 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
|
||||
// walkArgs holds the arguments that were passed to the Walk or WalkN
|
||||
// functions.
|
||||
type walkArgs struct {
|
||||
path string
|
||||
entry fs.DirEntry
|
||||
path string
|
||||
}
|
||||
|
78
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
78
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@ -141,12 +140,11 @@ func CallerInfo() []string {
|
||||
}
|
||||
|
||||
parts := strings.Split(file, "/")
|
||||
file = parts[len(parts)-1]
|
||||
if len(parts) > 1 {
|
||||
filename := parts[len(parts)-1]
|
||||
dir := parts[len(parts)-2]
|
||||
if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
|
||||
path, _ := filepath.Abs(file)
|
||||
callers = append(callers, fmt.Sprintf("%s:%d", path, line))
|
||||
if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
|
||||
callers = append(callers, fmt.Sprintf("%s:%d", file, line))
|
||||
}
|
||||
}
|
||||
|
||||
@ -530,7 +528,7 @@ func isNil(object interface{}) bool {
|
||||
[]reflect.Kind{
|
||||
reflect.Chan, reflect.Func,
|
||||
reflect.Interface, reflect.Map,
|
||||
reflect.Ptr, reflect.Slice},
|
||||
reflect.Ptr, reflect.Slice, reflect.UnsafePointer},
|
||||
kind)
|
||||
|
||||
if isNilableKind && value.IsNil() {
|
||||
@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
|
||||
return true // we consider nil to be equal to the nil set
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
ok = false
|
||||
}
|
||||
}()
|
||||
|
||||
listKind := reflect.TypeOf(list).Kind()
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
|
||||
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetValue := reflect.ValueOf(subset)
|
||||
if subsetKind == reflect.Map && listKind == reflect.Map {
|
||||
listValue := reflect.ValueOf(list)
|
||||
subsetKeys := subsetValue.MapKeys()
|
||||
subsetMap := reflect.ValueOf(subset)
|
||||
actualMap := reflect.ValueOf(list)
|
||||
|
||||
for i := 0; i < len(subsetKeys); i++ {
|
||||
subsetKey := subsetKeys[i]
|
||||
subsetElement := subsetValue.MapIndex(subsetKey).Interface()
|
||||
listElement := listValue.MapIndex(subsetKey).Interface()
|
||||
for _, k := range subsetMap.MapKeys() {
|
||||
ev := subsetMap.MapIndex(k)
|
||||
av := actualMap.MapIndex(k)
|
||||
|
||||
if !ObjectsAreEqual(subsetElement, listElement) {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
|
||||
if !av.IsValid() {
|
||||
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
|
||||
}
|
||||
if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
|
||||
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
for i := 0; i < subsetValue.Len(); i++ {
|
||||
element := subsetValue.Index(i).Interface()
|
||||
subsetList := reflect.ValueOf(subset)
|
||||
for i := 0; i < subsetList.Len(); i++ {
|
||||
element := subsetList.Index(i).Interface()
|
||||
ok, found := containsElement(list, element)
|
||||
if !ok {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...)
|
||||
}
|
||||
if !found {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
|
||||
return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
ok = false
|
||||
}
|
||||
}()
|
||||
|
||||
listKind := reflect.TypeOf(list).Kind()
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
|
||||
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetKind := reflect.TypeOf(subset).Kind()
|
||||
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
|
||||
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
|
||||
}
|
||||
|
||||
subsetValue := reflect.ValueOf(subset)
|
||||
if subsetKind == reflect.Map && listKind == reflect.Map {
|
||||
listValue := reflect.ValueOf(list)
|
||||
subsetKeys := subsetValue.MapKeys()
|
||||
subsetMap := reflect.ValueOf(subset)
|
||||
actualMap := reflect.ValueOf(list)
|
||||
|
||||
for i := 0; i < len(subsetKeys); i++ {
|
||||
subsetKey := subsetKeys[i]
|
||||
subsetElement := subsetValue.MapIndex(subsetKey).Interface()
|
||||
listElement := listValue.MapIndex(subsetKey).Interface()
|
||||
for _, k := range subsetMap.MapKeys() {
|
||||
ev := subsetMap.MapIndex(k)
|
||||
av := actualMap.MapIndex(k)
|
||||
|
||||
if !ObjectsAreEqual(subsetElement, listElement) {
|
||||
if !av.IsValid() {
|
||||
return true
|
||||
}
|
||||
if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
|
||||
return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
|
||||
}
|
||||
|
||||
for i := 0; i < subsetValue.Len(); i++ {
|
||||
element := subsetValue.Index(i).Interface()
|
||||
subsetList := reflect.ValueOf(subset)
|
||||
for i := 0; i < subsetList.Len(); i++ {
|
||||
element := subsetList.Index(i).Interface()
|
||||
ok, found := containsElement(list, element)
|
||||
if !ok {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
|
||||
|
15
vendor/golang.org/x/sys/cpu/hwcap_linux.go
generated
vendored
15
vendor/golang.org/x/sys/cpu/hwcap_linux.go
generated
vendored
@ -24,6 +24,21 @@ var hwCap uint
|
||||
var hwCap2 uint
|
||||
|
||||
func readHWCAP() error {
|
||||
// For Go 1.21+, get auxv from the Go runtime.
|
||||
if a := getAuxv(); len(a) > 0 {
|
||||
for len(a) >= 2 {
|
||||
tag, val := a[0], uint(a[1])
|
||||
a = a[2:]
|
||||
switch tag {
|
||||
case _AT_HWCAP:
|
||||
hwCap = val
|
||||
case _AT_HWCAP2:
|
||||
hwCap2 = val
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadFile(procAuxv)
|
||||
if err != nil {
|
||||
// e.g. on android /proc/self/auxv is not accessible, so silently
|
||||
|
16
vendor/golang.org/x/sys/cpu/runtime_auxv.go
generated
vendored
Normal file
16
vendor/golang.org/x/sys/cpu/runtime_auxv.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init)
|
||||
// on platforms that use auxv.
|
||||
var getAuxvFn func() []uintptr
|
||||
|
||||
func getAuxv() []uintptr {
|
||||
if getAuxvFn == nil {
|
||||
return nil
|
||||
}
|
||||
return getAuxvFn()
|
||||
}
|
19
vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go
generated
vendored
Normal file
19
vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
package cpu
|
||||
|
||||
import (
|
||||
_ "unsafe" // for linkname
|
||||
)
|
||||
|
||||
//go:linkname runtime_getAuxv runtime.getAuxv
|
||||
func runtime_getAuxv() []uintptr
|
||||
|
||||
func init() {
|
||||
getAuxvFn = runtime_getAuxv
|
||||
}
|
2
vendor/golang.org/x/sys/execabs/execabs.go
generated
vendored
2
vendor/golang.org/x/sys/execabs/execabs.go
generated
vendored
@ -63,7 +63,7 @@ func LookPath(file string) (string, error) {
|
||||
}
|
||||
|
||||
func fixCmd(name string, cmd *exec.Cmd) {
|
||||
if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) {
|
||||
if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) {
|
||||
// exec.Command was called with a bare binary name and
|
||||
// exec.LookPath returned a path which is not absolute.
|
||||
// Set cmd.lookPathErr and clear cmd.Path so that it
|
||||
|
6
vendor/golang.org/x/sys/execabs/execabs_go118.go
generated
vendored
6
vendor/golang.org/x/sys/execabs/execabs_go118.go
generated
vendored
@ -7,6 +7,12 @@
|
||||
|
||||
package execabs
|
||||
|
||||
import "os/exec"
|
||||
|
||||
func isGo119ErrDot(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func isGo119ErrFieldSet(cmd *exec.Cmd) bool {
|
||||
return false
|
||||
}
|
||||
|
4
vendor/golang.org/x/sys/execabs/execabs_go119.go
generated
vendored
4
vendor/golang.org/x/sys/execabs/execabs_go119.go
generated
vendored
@ -15,3 +15,7 @@ import (
|
||||
func isGo119ErrDot(err error) bool {
|
||||
return errors.Is(err, exec.ErrDot)
|
||||
}
|
||||
|
||||
func isGo119ErrFieldSet(cmd *exec.Cmd) bool {
|
||||
return cmd.Err != nil
|
||||
}
|
||||
|
17
vendor/golang.org/x/sys/unix/ioctl.go
generated
vendored
17
vendor/golang.org/x/sys/unix/ioctl.go
generated
vendored
@ -8,7 +8,6 @@
|
||||
package unix
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -27,7 +26,7 @@ func IoctlSetInt(fd int, req uint, value int) error {
|
||||
// passing the integer value directly.
|
||||
func IoctlSetPointerInt(fd int, req uint, value int) error {
|
||||
v := int32(value)
|
||||
return ioctl(fd, req, uintptr(unsafe.Pointer(&v)))
|
||||
return ioctlPtr(fd, req, unsafe.Pointer(&v))
|
||||
}
|
||||
|
||||
// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
|
||||
@ -36,9 +35,7 @@ func IoctlSetPointerInt(fd int, req uint, value int) error {
|
||||
func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
|
||||
// TODO: if we get the chance, remove the req parameter and
|
||||
// hardcode TIOCSWINSZ.
|
||||
err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
|
||||
runtime.KeepAlive(value)
|
||||
return err
|
||||
return ioctlPtr(fd, req, unsafe.Pointer(value))
|
||||
}
|
||||
|
||||
// IoctlSetTermios performs an ioctl on fd with a *Termios.
|
||||
@ -46,9 +43,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
|
||||
// The req value will usually be TCSETA or TIOCSETA.
|
||||
func IoctlSetTermios(fd int, req uint, value *Termios) error {
|
||||
// TODO: if we get the chance, remove the req parameter.
|
||||
err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
|
||||
runtime.KeepAlive(value)
|
||||
return err
|
||||
return ioctlPtr(fd, req, unsafe.Pointer(value))
|
||||
}
|
||||
|
||||
// IoctlGetInt performs an ioctl operation which gets an integer value
|
||||
@ -58,18 +53,18 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error {
|
||||
// for those, IoctlRetInt should be used instead of this function.
|
||||
func IoctlGetInt(fd int, req uint) (int, error) {
|
||||
var value int
|
||||
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
|
||||
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
|
||||
return value, err
|
||||
}
|
||||
|
||||
func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
|
||||
var value Winsize
|
||||
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
|
||||
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
|
||||
return &value, err
|
||||
}
|
||||
|
||||
func IoctlGetTermios(fd int, req uint) (*Termios, error) {
|
||||
var value Termios
|
||||
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
|
||||
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
|
||||
return &value, err
|
||||
}
|
||||
|
8
vendor/golang.org/x/sys/unix/ioctl_zos.go
generated
vendored
8
vendor/golang.org/x/sys/unix/ioctl_zos.go
generated
vendored
@ -27,9 +27,7 @@ func IoctlSetInt(fd int, req uint, value int) error {
|
||||
func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
|
||||
// TODO: if we get the chance, remove the req parameter and
|
||||
// hardcode TIOCSWINSZ.
|
||||
err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
|
||||
runtime.KeepAlive(value)
|
||||
return err
|
||||
return ioctlPtr(fd, req, unsafe.Pointer(value))
|
||||
}
|
||||
|
||||
// IoctlSetTermios performs an ioctl on fd with a *Termios.
|
||||
@ -51,13 +49,13 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error {
|
||||
// for those, IoctlRetInt should be used instead of this function.
|
||||
func IoctlGetInt(fd int, req uint) (int, error) {
|
||||
var value int
|
||||
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
|
||||
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
|
||||
return value, err
|
||||
}
|
||||
|
||||
func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
|
||||
var value Winsize
|
||||
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
|
||||
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
|
||||
return &value, err
|
||||
}
|
||||
|
||||
|
6
vendor/golang.org/x/sys/unix/ptrace_darwin.go
generated
vendored
6
vendor/golang.org/x/sys/unix/ptrace_darwin.go
generated
vendored
@ -7,6 +7,12 @@
|
||||
|
||||
package unix
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func ptrace(request int, pid int, addr uintptr, data uintptr) error {
|
||||
return ptrace1(request, pid, addr, data)
|
||||
}
|
||||
|
||||
func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error {
|
||||
return ptrace1Ptr(request, pid, addr, data)
|
||||
}
|
||||
|
6
vendor/golang.org/x/sys/unix/ptrace_ios.go
generated
vendored
6
vendor/golang.org/x/sys/unix/ptrace_ios.go
generated
vendored
@ -7,6 +7,12 @@
|
||||
|
||||
package unix
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
|
||||
return ENOTSUP
|
||||
}
|
||||
|
||||
func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) {
|
||||
return ENOTSUP
|
||||
}
|
||||
|
5
vendor/golang.org/x/sys/unix/syscall_aix.go
generated
vendored
5
vendor/golang.org/x/sys/unix/syscall_aix.go
generated
vendored
@ -292,9 +292,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
|
||||
sa.Name = string(bytes)
|
||||
sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
|
||||
return sa, nil
|
||||
|
||||
case AF_INET:
|
||||
@ -411,6 +409,7 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 }
|
||||
func (w WaitStatus) TrapCause() int { return -1 }
|
||||
|
||||
//sys ioctl(fd int, req uint, arg uintptr) (err error)
|
||||
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = ioctl
|
||||
|
||||
// fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX
|
||||
// There is no way to create a custom fcntl and to keep //sys fcntl easily,
|
||||
|
3
vendor/golang.org/x/sys/unix/syscall_bsd.go
generated
vendored
3
vendor/golang.org/x/sys/unix/syscall_bsd.go
generated
vendored
@ -245,8 +245,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
|
||||
sa.Name = string(bytes)
|
||||
sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
|
||||
return sa, nil
|
||||
|
||||
case AF_INET:
|
||||
|
12
vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
12
vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
@ -14,7 +14,6 @@ package unix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
@ -376,11 +375,10 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) {
|
||||
func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) }
|
||||
|
||||
//sys ioctl(fd int, req uint, arg uintptr) (err error)
|
||||
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
|
||||
|
||||
func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error {
|
||||
err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo)))
|
||||
runtime.KeepAlive(ctlInfo)
|
||||
return err
|
||||
return ioctlPtr(fd, CTLIOCGINFO, unsafe.Pointer(ctlInfo))
|
||||
}
|
||||
|
||||
// IfreqMTU is struct ifreq used to get or set a network device's MTU.
|
||||
@ -394,16 +392,14 @@ type IfreqMTU struct {
|
||||
func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) {
|
||||
var ifreq IfreqMTU
|
||||
copy(ifreq.Name[:], ifname)
|
||||
err := ioctl(fd, SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq)))
|
||||
err := ioctlPtr(fd, SIOCGIFMTU, unsafe.Pointer(&ifreq))
|
||||
return &ifreq, err
|
||||
}
|
||||
|
||||
// IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU
|
||||
// of the network device specified by ifreq.Name.
|
||||
func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error {
|
||||
err := ioctl(fd, SIOCSIFMTU, uintptr(unsafe.Pointer(ifreq)))
|
||||
runtime.KeepAlive(ifreq)
|
||||
return err
|
||||
return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq))
|
||||
}
|
||||
|
||||
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL
|
||||
|
1
vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
generated
vendored
@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
|
||||
//sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64
|
||||
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
|
||||
//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
|
||||
//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
|
||||
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
|
||||
//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64
|
||||
|
1
vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
generated
vendored
@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
|
||||
//sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT
|
||||
//sys Lstat(path string, stat *Stat_t) (err error)
|
||||
//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
|
||||
//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
|
||||
//sys Stat(path string, stat *Stat_t) (err error)
|
||||
//sys Statfs(path string, stat *Statfs_t) (err error)
|
||||
|
1
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
1
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
@ -172,6 +172,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
|
||||
}
|
||||
|
||||
//sys ioctl(fd int, req uint, arg uintptr) (err error)
|
||||
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
|
||||
|
||||
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
|
||||
|
||||
|
43
vendor/golang.org/x/sys/unix/syscall_freebsd.go
generated
vendored
43
vendor/golang.org/x/sys/unix/syscall_freebsd.go
generated
vendored
@ -161,7 +161,8 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
//sys ioctl(fd int, req uint, arg uintptr) (err error)
|
||||
//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL
|
||||
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
|
||||
|
||||
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
|
||||
|
||||
@ -253,6 +254,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
|
||||
}
|
||||
|
||||
//sys ptrace(request int, pid int, addr uintptr, data int) (err error)
|
||||
//sys ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) = SYS_PTRACE
|
||||
|
||||
func PtraceAttach(pid int) (err error) {
|
||||
return ptrace(PT_ATTACH, pid, 0, 0)
|
||||
@ -267,19 +269,36 @@ func PtraceDetach(pid int) (err error) {
|
||||
}
|
||||
|
||||
func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) {
|
||||
return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0)
|
||||
return ptracePtr(PT_GETFPREGS, pid, unsafe.Pointer(fpregsout), 0)
|
||||
}
|
||||
|
||||
func PtraceGetRegs(pid int, regsout *Reg) (err error) {
|
||||
return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0)
|
||||
return ptracePtr(PT_GETREGS, pid, unsafe.Pointer(regsout), 0)
|
||||
}
|
||||
|
||||
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
|
||||
ioDesc := PtraceIoDesc{
|
||||
Op: int32(req),
|
||||
Offs: offs,
|
||||
}
|
||||
if countin > 0 {
|
||||
_ = out[:countin] // check bounds
|
||||
ioDesc.Addr = &out[0]
|
||||
} else if out != nil {
|
||||
ioDesc.Addr = (*byte)(unsafe.Pointer(&_zero))
|
||||
}
|
||||
ioDesc.SetLen(countin)
|
||||
|
||||
err = ptracePtr(PT_IO, pid, unsafe.Pointer(&ioDesc), 0)
|
||||
return int(ioDesc.Len), err
|
||||
}
|
||||
|
||||
func PtraceLwpEvents(pid int, enable int) (err error) {
|
||||
return ptrace(PT_LWP_EVENTS, pid, 0, enable)
|
||||
}
|
||||
|
||||
func PtraceLwpInfo(pid int, info uintptr) (err error) {
|
||||
return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{})))
|
||||
func PtraceLwpInfo(pid int, info *PtraceLwpInfoStruct) (err error) {
|
||||
return ptracePtr(PT_LWPINFO, pid, unsafe.Pointer(info), int(unsafe.Sizeof(*info)))
|
||||
}
|
||||
|
||||
func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
|
||||
@ -299,13 +318,25 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
|
||||
}
|
||||
|
||||
func PtraceSetRegs(pid int, regs *Reg) (err error) {
|
||||
return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0)
|
||||
return ptracePtr(PT_SETREGS, pid, unsafe.Pointer(regs), 0)
|
||||
}
|
||||
|
||||
func PtraceSingleStep(pid int) (err error) {
|
||||
return ptrace(PT_STEP, pid, 1, 0)
|
||||
}
|
||||
|
||||
func Dup3(oldfd, newfd, flags int) error {
|
||||
if oldfd == newfd || flags&^O_CLOEXEC != 0 {
|
||||
return EINVAL
|
||||
}
|
||||
how := F_DUP2FD
|
||||
if flags&O_CLOEXEC != 0 {
|
||||
how = F_DUP2FD_CLOEXEC
|
||||
}
|
||||
_, err := fcntl(oldfd, how, newfd)
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
* Exposed directly
|
||||
*/
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user