Merge pull request #8811 from kiashok/updateHcsshimTag

Update hcsshim tag to v0.10.0-rc.9
This commit is contained in:
Phil Estes 2023-07-17 20:33:16 -04:00 committed by GitHub
commit f86d585ce6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
204 changed files with 30930 additions and 2659 deletions

39
go.mod
View File

@ -5,8 +5,8 @@ go 1.19
require (
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // replaced; see replace rules for actual version used.
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652
github.com/Microsoft/go-winio v0.6.1-0.20230228163719-dd5de6900b62
github.com/Microsoft/hcsshim v0.10.0-rc.7
github.com/Microsoft/go-winio v0.6.1
github.com/Microsoft/hcsshim v0.10.0-rc.9
github.com/container-orchestrated-devices/container-device-interface v0.5.4
github.com/containerd/btrfs/v2 v2.0.0
github.com/containerd/cgroups/v3 v3.0.2
@ -36,7 +36,7 @@ require (
github.com/hashicorp/go-multierror v1.1.1
github.com/imdario/mergo v0.3.13
github.com/intel/goresctrl v0.3.0
github.com/klauspost/compress v1.16.0
github.com/klauspost/compress v1.16.5
github.com/minio/sha256-simd v1.0.0
github.com/moby/locker v1.0.1
github.com/moby/sys/mountinfo v0.6.2
@ -51,10 +51,10 @@ require (
github.com/opencontainers/selinux v1.11.0
github.com/pelletier/go-toml v1.9.5
github.com/prometheus/client_golang v1.14.0
github.com/sirupsen/logrus v1.9.0
github.com/stretchr/testify v1.8.2
github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.8.4
github.com/tchap/go-patricia/v2 v2.3.1
github.com/urfave/cli v1.22.12
github.com/urfave/cli v1.22.13
github.com/vishvananda/netlink v1.2.1-beta.2
go.etcd.io/bbolt v1.3.7
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0
@ -64,11 +64,11 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0
go.opentelemetry.io/otel/sdk v1.14.0
go.opentelemetry.io/otel/trace v1.14.0
golang.org/x/sync v0.1.0
golang.org/x/sys v0.8.0
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4
google.golang.org/grpc v1.53.0
google.golang.org/protobuf v1.28.1
golang.org/x/sync v0.3.0
golang.org/x/sys v0.9.0
google.golang.org/genproto v0.0.0-20230323212658-478b75c54725
google.golang.org/grpc v1.54.0
google.golang.org/protobuf v1.30.0
k8s.io/api v0.26.2
k8s.io/apimachinery v0.26.2
k8s.io/apiserver v0.26.2
@ -82,7 +82,7 @@ require (
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cilium/ebpf v0.9.1 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
@ -95,6 +95,7 @@ require (
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
@ -116,20 +117,20 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
github.com/vishvananda/netns v0.0.4 // indirect
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
go.opentelemetry.io/otel/metric v0.37.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.4.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/term v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.6.0 // indirect
golang.org/x/tools v0.8.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/square/go-jose.v2 v2.5.1 // indirect

76
go.sum
View File

@ -27,7 +27,7 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY=
cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
@ -75,8 +75,8 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.6.1-0.20230228163719-dd5de6900b62 h1:PNDnNt0QOfCBd3bmdl9bhAt4+/PRCZpthE3PL0CMMtI=
github.com/Microsoft/go-winio v0.6.1-0.20230228163719-dd5de6900b62/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
@ -89,8 +89,8 @@ github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8=
github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE=
github.com/Microsoft/hcsshim v0.10.0-rc.9 h1:B4mguSolFL2yOHl0JjQxo0Si2Vwipj/Cbib4pyJ4pKA=
github.com/Microsoft/hcsshim v0.10.0-rc.9/go.mod h1:1g6+xpige+npSTrEkdm8JOZxOjJ9McQiT0JkEpzyZqA=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@ -139,8 +139,8 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
@ -483,6 +483,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -639,8 +640,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.4 h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -894,8 +895,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
@ -938,8 +940,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@ -958,8 +961,8 @@ github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8=
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
github.com/urfave/cli v1.22.13 h1:wsLILXG8qCJNse/qAgLNf23737Cx05GflHg/PJGe1Ok=
github.com/urfave/cli v1.22.13/go.mod h1:VufqObjsMTF2BBwKawpx9R8eAneNEWhoO0yx8Vd+FkE=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
@ -969,8 +972,9 @@ github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhg
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@ -1082,8 +1086,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1119,8 +1123,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1179,8 +1183,8 @@ golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1209,8 +1213,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1314,15 +1318,15 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1332,8 +1336,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1409,8 +1413,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1496,8 +1500,8 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
google.golang.org/genproto v0.0.0-20230323212658-478b75c54725 h1:VmCWItVXcKboEMCwZaWge+1JLiTCQSngZeINF+wzO+g=
google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@ -1526,8 +1530,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1541,8 +1545,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -4,7 +4,7 @@ go 1.19
require (
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // replaced; see replace rules for actual version used.
github.com/Microsoft/hcsshim v0.10.0-rc.7
github.com/Microsoft/hcsshim v0.10.0-rc.9
github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1
github.com/containerd/cgroups/v3 v3.0.2
github.com/containerd/containerd v1.7.0 // see replace; the actual version of containerd is replaced with the code at the root of this repository
@ -15,15 +15,15 @@ require (
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0-rc3
github.com/opencontainers/runtime-spec v1.1.0-rc.2
github.com/stretchr/testify v1.8.2
github.com/stretchr/testify v1.8.4
go.opentelemetry.io/otel v1.14.0
go.opentelemetry.io/otel/sdk v1.14.0
golang.org/x/sys v0.8.0
golang.org/x/sys v0.9.0
)
require (
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652 // indirect
github.com/Microsoft/go-winio v0.6.1-0.20230228163719-dd5de6900b62 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/cilium/ebpf v0.9.1 // indirect
github.com/container-orchestrated-devices/container-device-interface v0.5.4 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
@ -39,11 +39,12 @@ require (
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
@ -54,18 +55,18 @@ require (
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/trace v1.14.0 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/tools v0.6.0 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.53.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/tools v0.8.0 // indirect
google.golang.org/genproto v0.0.0-20230323212658-478b75c54725 // indirect
google.golang.org/grpc v1.54.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect

View File

@ -42,13 +42,17 @@ cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wx
cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o=
cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE=
cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM=
cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ=
cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg=
cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ=
cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k=
cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4=
cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M=
cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE=
cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE=
cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk=
cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc=
cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8=
@ -57,25 +61,32 @@ cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn
cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8=
cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY=
cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM=
cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc=
cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU=
cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI=
cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8=
cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno=
cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak=
cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84=
cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A=
cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=
cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0=
cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY=
cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k=
cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=
cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk=
cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0=
cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc=
cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI=
cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ=
cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI=
cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=
cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s=
cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0=
cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ=
cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY=
cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo=
cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg=
cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=
cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw=
cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI=
@ -96,6 +107,7 @@ cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oe
cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4=
cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8=
cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM=
cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -105,12 +117,15 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7
cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA=
cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw=
cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc=
cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E=
cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac=
cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q=
cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY=
cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s=
cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI=
cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y=
cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss=
cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc=
cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM=
cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI=
cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0=
@ -122,9 +137,12 @@ cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5v
cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk=
cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk=
cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE=
cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU=
cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U=
cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA=
cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M=
cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg=
cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s=
cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM=
cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk=
cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA=
@ -133,6 +151,7 @@ cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uX
cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4=
cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI=
cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y=
cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
@ -146,6 +165,8 @@ cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARy
cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU=
cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
@ -156,9 +177,11 @@ cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iW
cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg=
cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo=
cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4=
cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM=
cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=
cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI=
cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s=
cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=
cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc=
@ -166,6 +189,7 @@ cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H
cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM=
cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M=
cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0=
cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8=
cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=
cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ=
cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE=
@ -173,6 +197,7 @@ cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KF
cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE=
cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0=
cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA=
cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE=
cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38=
cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w=
cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8=
@ -182,6 +207,7 @@ cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxB
cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA=
cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ=
cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs=
cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s=
cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
@ -196,9 +222,11 @@ cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W
cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g=
cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4=
cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs=
cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww=
cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c=
cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s=
cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI=
cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ=
cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4=
cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0=
cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8=
@ -206,6 +234,7 @@ cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz
cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0=
cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM=
cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4=
cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE=
cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM=
cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q=
cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4=
@ -214,12 +243,14 @@ cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX
cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k=
cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4=
cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM=
cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs=
cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y=
cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg=
cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE=
cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk=
cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w=
cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc=
cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY=
cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI=
cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8=
@ -227,9 +258,11 @@ cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aU
cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc=
cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw=
cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw=
cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY=
cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w=
cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI=
cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs=
cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE=
cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk=
@ -237,6 +270,7 @@ cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZ
cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY=
cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08=
cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw=
cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA=
cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM=
cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA=
cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w=
@ -251,6 +285,7 @@ cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+o
cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0=
cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0=
cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E=
cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw=
cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA=
cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI=
cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y=
@ -266,19 +301,24 @@ cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQE
cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY=
cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc=
cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A=
cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk=
cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo=
cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM=
cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY=
cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4=
cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs=
cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g=
cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o=
cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE=
cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg=
cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0=
cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg=
cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w=
cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24=
cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE=
@ -297,6 +337,7 @@ cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtq
cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA=
cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI=
cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw=
cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY=
cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=
cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I=
@ -313,22 +354,26 @@ cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJP
cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk=
cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4=
cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w=
cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw=
cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=
cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o=
cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM=
cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8=
cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E=
cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM=
cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8=
cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4=
cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY=
cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ=
cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU=
cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k=
cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU=
cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY=
cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34=
cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA=
cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0=
cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE=
cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ=
cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4=
cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs=
cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI=
@ -354,9 +399,11 @@ cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2om
cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg=
cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE=
cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw=
cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc=
cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0=
cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI=
cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg=
cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@ -364,8 +411,10 @@ cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjp
cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI=
cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0=
cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8=
cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4=
cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg=
cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k=
cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM=
cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o=
cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk=
@ -373,6 +422,7 @@ cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7d
cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE=
cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U=
cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA=
cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c=
cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg=
cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4=
cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac=
@ -389,6 +439,7 @@ cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0
cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA=
cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0=
cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots=
cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo=
cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU=
cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg=
cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA=
@ -400,11 +451,13 @@ cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQk
cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do=
cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo=
cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM=
cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg=
cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s=
cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI=
cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk=
cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44=
cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc=
cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc=
cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA=
cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4=
cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4=
@ -415,25 +468,32 @@ cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3s
cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q=
cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA=
cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8=
cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0=
cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU=
cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc=
cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk=
cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk=
cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0=
cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag=
cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU=
cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s=
cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA=
cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc=
cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk=
cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs=
cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg=
cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4=
cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U=
cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY=
cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s=
cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco=
cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo=
cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc=
cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4=
cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E=
cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU=
cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec=
cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA=
cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4=
cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw=
cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A=
@ -444,6 +504,7 @@ cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2m
cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0=
cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco=
cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0=
cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
@ -454,9 +515,11 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4=
cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4=
cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw=
cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM=
@ -471,12 +534,17 @@ cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV6
cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28=
cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y=
cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA=
cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk=
cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs=
cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg=
cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0=
cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk=
cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw=
cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg=
cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk=
cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4=
cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M=
@ -488,11 +556,14 @@ cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiC
cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY=
cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E=
cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY=
cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0=
cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE=
cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g=
cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc=
cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY=
cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208=
cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8=
cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY=
cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w=
cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8=
cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes=
@ -537,14 +608,13 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/Microsoft/go-winio v0.6.1-0.20230228163719-dd5de6900b62 h1:PNDnNt0QOfCBd3bmdl9bhAt4+/PRCZpthE3PL0CMMtI=
github.com/Microsoft/go-winio v0.6.1-0.20230228163719-dd5de6900b62/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8=
github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE=
github.com/Microsoft/hcsshim v0.10.0-rc.9 h1:B4mguSolFL2yOHl0JjQxo0Si2Vwipj/Cbib4pyJ4pKA=
github.com/Microsoft/hcsshim v0.10.0-rc.9/go.mod h1:1g6+xpige+npSTrEkdm8JOZxOjJ9McQiT0JkEpzyZqA=
github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1 h1:pVKfKyPkXna29XlGjxSr9J0A7vNucOUHZ/2ClcTWalw=
github.com/Microsoft/hcsshim/test v0.0.0-20210408205431-da33ecd607e1/go.mod h1:Cmvnhlie15Ha2UYrJs9EhgSx76Bq9RV2FgfEiT78GhI=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@ -573,6 +643,7 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0=
github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI=
github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
@ -602,6 +673,7 @@ github.com/bytecodealliance/wasmtime-go v0.36.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOE
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
@ -670,10 +742,11 @@ github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4J
github.com/containerd/imgcrypt v1.1.7/go.mod h1:FD8gqIcX5aTotCtOmjeCsi3A1dHmTZpnMISGKSczt4k=
github.com/containerd/nri v0.3.0/go.mod h1:Zw9q2lP16sdg0zYybemZ9yTDy8g7fPCIB3KXOGlggXI=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/stargz-snapshotter/estargz v0.12.1/go.mod h1:12VUuCq3qPq4y8yUW+l5w3+oXV3cx2Po3KSe/SmPGqw=
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
github.com/containerd/ttrpc v1.1.1-0.20220420014843-944ef4a40df3/go.mod h1:YYyNVhZrTMiaf51Vj6WhAJqJw+vl/nzABhj8pWrzle4=
github.com/containerd/ttrpc v1.1.2/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs=
github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak=
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
@ -707,6 +780,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
@ -718,8 +792,8 @@ github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnG
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M=
github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
@ -731,12 +805,13 @@ github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mo
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v20.10.20+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v23.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.20+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v23.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
@ -839,8 +914,8 @@ github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhO
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@ -872,6 +947,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -919,7 +995,7 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo=
github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -964,6 +1040,7 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK
github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
@ -1014,6 +1091,7 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
@ -1044,9 +1122,9 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -1065,11 +1143,12 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ=
github.com/lestrrat-go/blackmagic v1.0.1/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc=
github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY=
github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4=
github.com/lestrrat-go/jwx v1.2.26/go.mod h1:MaiCdGbn3/cckbOFSCluJlJMmp9dmZm5hDuIkx8ftpQ=
github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
@ -1126,6 +1205,7 @@ github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@ -1291,8 +1371,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@ -1311,6 +1392,7 @@ github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSW
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@ -1335,8 +1417,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@ -1350,16 +1433,18 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
github.com/urfave/cli v1.22.13/go.mod h1:VufqObjsMTF2BBwKawpx9R8eAneNEWhoO0yx8Vd+FkE=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
@ -1475,9 +1560,9 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -1535,8 +1620,9 @@ golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1609,8 +1695,10 @@ golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1636,9 +1724,10 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1654,8 +1743,9 @@ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1776,6 +1866,7 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -1783,8 +1874,10 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1793,6 +1886,8 @@ golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1807,8 +1902,9 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1818,6 +1914,7 @@ golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -1896,8 +1993,9 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1969,6 +2067,8 @@ google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/
google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0=
google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -2107,8 +2207,12 @@ google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA=
google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
google.golang.org/genproto v0.0.0-20230323212658-478b75c54725 h1:VmCWItVXcKboEMCwZaWge+1JLiTCQSngZeINF+wzO+g=
google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -2149,8 +2253,9 @@ google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCD
google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -2166,8 +2271,10 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -35,6 +35,18 @@ issues:
text: "^line-length-limit: "
source: "^//(go:generate|sys) "
#TODO: remove after upgrading to go1.18
# ignore comment spacing for nolint and sys directives
- linters:
- revive
text: "^comment-spacings: no space between comment delimiter and comment text"
source: "//(cspell:|nolint:|sys |todo)"
# not on go 1.18 yet, so no any
- linters:
- revive
text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
# allow unjustified ignores of error checks in defer statements
- linters:
- nolintlint

View File

@ -23,7 +23,7 @@ import (
const afHVSock = 34 // AF_HYPERV
// Well known Service and VM IDs
//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
@ -31,7 +31,7 @@ func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
}
// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff
func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff
return guid.GUID{
Data1: 0xffffffff,
Data2: 0xffff,
@ -246,7 +246,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
var addrbuf [addrlen * 2]byte
var bytes uint32
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o)
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
}

View File

@ -0,0 +1,2 @@
// This package contains Win32 filesystem functionality.
package fs

202
vendor/github.com/Microsoft/go-winio/internal/fs/fs.go generated vendored Normal file
View File

@ -0,0 +1,202 @@
//go:build windows
package fs
import (
"golang.org/x/sys/windows"
"github.com/Microsoft/go-winio/internal/stringbuffer"
)
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
const NullHandle windows.Handle = 0
// AccessMask defines standard, specific, and generic rights.
//
// Bitmask:
// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
// +---------------+---------------+-------------------------------+
// |G|G|G|G|Resvd|A| StandardRights| SpecificRights |
// |R|W|E|A| |S| | |
// +-+-------------+---------------+-------------------------------+
//
// GR Generic Read
// GW Generic Write
// GE Generic Exectue
// GA Generic All
// Resvd Reserved
// AS Access Security System
//
// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask
//
// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights
//
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants
type AccessMask = windows.ACCESS_MASK
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const (
// Not actually any.
//
// For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device"
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters
FILE_ANY_ACCESS AccessMask = 0
// Specific Object Access
// from ntioapi.h
FILE_READ_DATA AccessMask = (0x0001) // file & pipe
FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory
FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe
FILE_ADD_FILE AccessMask = (0x0002) // directory
FILE_APPEND_DATA AccessMask = (0x0004) // file
FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory
FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe
FILE_READ_EA AccessMask = (0x0008) // file & directory
FILE_READ_PROPERTIES AccessMask = FILE_READ_EA
FILE_WRITE_EA AccessMask = (0x0010) // file & directory
FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA
FILE_EXECUTE AccessMask = (0x0020) // file
FILE_TRAVERSE AccessMask = (0x0020) // directory
FILE_DELETE_CHILD AccessMask = (0x0040) // directory
FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all
FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all
FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF)
FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE)
FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE)
FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE)
SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF
// Standard Access
// from ntseapi.h
DELETE AccessMask = 0x0001_0000
READ_CONTROL AccessMask = 0x0002_0000
WRITE_DAC AccessMask = 0x0004_0000
WRITE_OWNER AccessMask = 0x0008_0000
SYNCHRONIZE AccessMask = 0x0010_0000
STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000
STANDARD_RIGHTS_READ AccessMask = READ_CONTROL
STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL
STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL
STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000
)
type FileShareMode uint32
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const (
FILE_SHARE_NONE FileShareMode = 0x00
FILE_SHARE_READ FileShareMode = 0x01
FILE_SHARE_WRITE FileShareMode = 0x02
FILE_SHARE_DELETE FileShareMode = 0x04
FILE_SHARE_VALID_FLAGS FileShareMode = 0x07
)
type FileCreationDisposition uint32
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const (
// from winbase.h
CREATE_NEW FileCreationDisposition = 0x01
CREATE_ALWAYS FileCreationDisposition = 0x02
OPEN_EXISTING FileCreationDisposition = 0x03
OPEN_ALWAYS FileCreationDisposition = 0x04
TRUNCATE_EXISTING FileCreationDisposition = 0x05
)
// CreateFile and co. take flags or attributes together as one parameter.
// Define alias until we can use generics to allow both
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
type FileFlagOrAttribute uint32
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const ( // from winnt.h
FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000
FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000
FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000
FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000
FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000
FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000
FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000
FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000
FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000
FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000
FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000
)
type FileSQSFlag = FileFlagOrAttribute
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const ( // from winbase.h
SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16)
SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16)
SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16)
SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16)
SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000
SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000
)
// GetFinalPathNameByHandle flags
//
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters
type GetFinalPathFlag uint32
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
const (
GetFinalPathDefaultFlag GetFinalPathFlag = 0x0
FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0
FILE_NAME_OPENED GetFinalPathFlag = 0x8
VOLUME_NAME_DOS GetFinalPathFlag = 0x0
VOLUME_NAME_GUID GetFinalPathFlag = 0x1
VOLUME_NAME_NT GetFinalPathFlag = 0x2
VOLUME_NAME_NONE GetFinalPathFlag = 0x4
)
// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle
// with the given handle and flags. It transparently takes care of creating a buffer of the
// correct size for the call.
//
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew
func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) {
b := stringbuffer.NewWString()
//TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n?
for {
n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags))
if err != nil {
return "", err
}
// If the buffer wasn't large enough, n will be the total size needed (including null terminator).
// Resize and try again.
if n > b.Cap() {
b.ResizeTo(n)
continue
}
// If the buffer is large enough, n will be the size not including the null terminator.
// Convert to a Go string and return.
return b.String(), nil
}
}

View File

@ -0,0 +1,12 @@
package fs
// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level
type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32`
// Impersonation levels
const (
SecurityAnonymous SecurityImpersonationLevel = 0
SecurityIdentification SecurityImpersonationLevel = 1
SecurityImpersonation SecurityImpersonationLevel = 2
SecurityDelegation SecurityImpersonationLevel = 3
)

View File

@ -0,0 +1,64 @@
//go:build windows
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package fs
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procCreateFileW = modkernel32.NewProc("CreateFileW")
)
func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = windows.Handle(r0)
if handle == windows.InvalidHandle {
err = errnoErr(e1)
}
return
}

View File

@ -100,8 +100,8 @@ func (f *runtimeFunc) Load() error {
(*byte)(unsafe.Pointer(&f.addr)),
uint32(unsafe.Sizeof(f.addr)),
&n,
nil, //overlapped
0, //completionRoutine
nil, // overlapped
0, // completionRoutine
)
})
return f.err

View File

@ -0,0 +1,132 @@
package stringbuffer
import (
"sync"
"unicode/utf16"
)
// TODO: worth exporting and using in mkwinsyscall?
// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate
// large path strings:
// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310.
const MinWStringCap = 310
// use *[]uint16 since []uint16 creates an extra allocation where the slice header
// is copied to heap and then referenced via pointer in the interface header that sync.Pool
// stores.
var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly
New: func() interface{} {
b := make([]uint16, MinWStringCap)
return &b
},
}
func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) }
// freeBuffer copies the slice header data, and puts a pointer to that in the pool.
// This avoids taking a pointer to the slice header in WString, which can be set to nil.
func freeBuffer(b []uint16) { pathPool.Put(&b) }
// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings
// for interacting with Win32 APIs.
// Sizes are specified as uint32 and not int.
//
// It is not thread safe.
type WString struct {
// type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future.
// raw buffer
b []uint16
}
// NewWString returns a [WString] allocated from a shared pool with an
// initial capacity of at least [MinWStringCap].
// Since the buffer may have been previously used, its contents are not guaranteed to be empty.
//
// The buffer should be freed via [WString.Free]
func NewWString() *WString {
return &WString{
b: newBuffer(),
}
}
func (b *WString) Free() {
if b.empty() {
return
}
freeBuffer(b.b)
b.b = nil
}
// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the
// previous buffer back into pool.
func (b *WString) ResizeTo(c uint32) uint32 {
// allready sufficient (or n is 0)
if c <= b.Cap() {
return b.Cap()
}
if c <= MinWStringCap {
c = MinWStringCap
}
// allocate at-least double buffer size, as is done in [bytes.Buffer] and other places
if c <= 2*b.Cap() {
c = 2 * b.Cap()
}
b2 := make([]uint16, c)
if !b.empty() {
copy(b2, b.b)
freeBuffer(b.b)
}
b.b = b2
return c
}
// Buffer returns the underlying []uint16 buffer.
func (b *WString) Buffer() []uint16 {
if b.empty() {
return nil
}
return b.b
}
// Pointer returns a pointer to the first uint16 in the buffer.
// If the [WString.Free] has already been called, the pointer will be nil.
func (b *WString) Pointer() *uint16 {
if b.empty() {
return nil
}
return &b.b[0]
}
// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer.
//
// It assumes that the data is null-terminated.
func (b *WString) String() string {
// Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows"
// and would make this code Windows-only, which makes no sense.
// So copy UTF16ToString code into here.
// If other windows-specific code is added, switch to [windows.UTF16ToString]
s := b.b
for i, v := range s {
if v == 0 {
s = s[:i]
break
}
}
return string(utf16.Decode(s))
}
// Cap returns the underlying buffer capacity.
func (b *WString) Cap() uint32 {
if b.empty() {
return 0
}
return b.cap()
}
func (b *WString) cap() uint32 { return uint32(cap(b.b)) }
func (b *WString) empty() bool { return b == nil || b.cap() == 0 }

View File

@ -16,11 +16,12 @@ import (
"unsafe"
"golang.org/x/sys/windows"
"github.com/Microsoft/go-winio/internal/fs"
)
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
@ -163,19 +164,21 @@ func (s pipeAddress) String() string {
}
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) {
for {
select {
case <-ctx.Done():
return syscall.Handle(0), ctx.Err()
default:
h, err := createFile(*path,
wh, err := fs.CreateFile(*path,
access,
0,
nil,
syscall.OPEN_EXISTING,
windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS,
0)
0, // mode
nil, // security attributes
fs.OPEN_EXISTING,
fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS,
0, // template file handle
)
h := syscall.Handle(wh)
if err == nil {
return h, nil
}
@ -219,7 +222,7 @@ func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
var err error
var h syscall.Handle
h, err = tryDialPipe(ctx, &path, access)
h, err = tryDialPipe(ctx, &path, fs.AccessMask(access))
if err != nil {
return nil, err
}
@ -279,6 +282,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
}
defer localFree(ntPath.Buffer)
oa.ObjectName = &ntPath
oa.Attributes = windows.OBJ_CASE_INSENSITIVE
// The security descriptor is only needed for the first pipe.
if first {

View File

@ -47,8 +47,6 @@ const (
)
// EventDescriptor represents various metadata for an ETW event.
//
//nolint:structcheck // task is currently unused
type eventDescriptor struct {
id uint16
version uint8

2
vendor/github.com/Microsoft/go-winio/pkg/fs/doc.go generated vendored Normal file
View File

@ -0,0 +1,2 @@
// This package contains Win32 filesystem functionality.
package fs

View File

@ -5,6 +5,8 @@ import (
"path/filepath"
"golang.org/x/sys/windows"
"github.com/Microsoft/go-winio/internal/stringbuffer"
)
var (
@ -13,19 +15,18 @@ var (
)
// GetFileSystemType obtains the type of a file system through GetVolumeInformation.
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx
//
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getvolumeinformationw
func GetFileSystemType(path string) (fsType string, err error) {
drive := filepath.VolumeName(path)
if len(drive) != 2 {
return "", ErrInvalidPath
}
var (
buf = make([]uint16, 255)
size = uint32(windows.MAX_PATH + 1)
)
buf := stringbuffer.NewWString()
defer buf.Free()
drive += `\`
err = windows.GetVolumeInformation(windows.StringToUTF16Ptr(drive), nil, 0, nil, nil, nil, &buf[0], size)
fsType = windows.UTF16ToString(buf)
return fsType, err
err = windows.GetVolumeInformation(windows.StringToUTF16Ptr(drive), nil, 0, nil, nil, nil, buf.Pointer(), buf.Cap())
return buf.String(), err
}

128
vendor/github.com/Microsoft/go-winio/pkg/fs/resolve.go generated vendored Normal file
View File

@ -0,0 +1,128 @@
//go:build windows
package fs
import (
"errors"
"os"
"strings"
"golang.org/x/sys/windows"
"github.com/Microsoft/go-winio/internal/fs"
)
// ResolvePath returns the final path to a file or directory represented, resolving symlinks,
// handling mount points, etc.
// The resolution works by using the Windows API GetFinalPathNameByHandle, which takes a
// handle and returns the final path to that file.
//
// It is intended to address short-comings of [filepath.EvalSymlinks], which does not work
// well on Windows.
func ResolvePath(path string) (string, error) {
// We are not able to use builtin Go functionality for opening a directory path:
// - os.Open on a directory returns a os.File where Fd() is a search handle from FindFirstFile.
// - syscall.Open does not provide a way to specify FILE_FLAG_BACKUP_SEMANTICS, which is needed to
// open a directory.
//
// We could use os.Open if the path is a file, but it's easier to just use the same code for both.
// Therefore, we call windows.CreateFile directly.
h, err := fs.CreateFile(
path,
fs.FILE_ANY_ACCESS, // access
fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE|fs.FILE_SHARE_DELETE,
nil, // security attributes
fs.OPEN_EXISTING,
fs.FILE_FLAG_BACKUP_SEMANTICS, // Needed to open a directory handle.
fs.NullHandle, // template file
)
if err != nil {
return "", &os.PathError{
Op: "CreateFile",
Path: path,
Err: err,
}
}
defer windows.CloseHandle(h) //nolint:errcheck
// We use the Windows API GetFinalPathNameByHandle to handle path resolution. GetFinalPathNameByHandle
// returns a resolved path name for a file or directory. The returned path can be in several different
// formats, based on the flags passed. There are several goals behind the design here:
// - Do as little manual path manipulation as possible. Since Windows path formatting can be quite
// complex, we try to just let the Windows APIs handle that for us.
// - Retain as much compatibility with existing Go path functions as we can. In particular, we try to
// ensure paths returned from resolvePath can be passed to EvalSymlinks.
//
// First, we query for the VOLUME_NAME_GUID path of the file. This will return a path in the form
// "\\?\Volume{8a25748f-cf34-4ac6-9ee2-c89400e886db}\dir\file.txt". If the path is a UNC share
// (e.g. "\\server\share\dir\file.txt"), then the VOLUME_NAME_GUID query will fail with ERROR_PATH_NOT_FOUND.
// In this case, we will next try a VOLUME_NAME_DOS query. This query will return a path for a UNC share
// in the form "\\?\UNC\server\share\dir\file.txt". This path will work with most functions, but EvalSymlinks
// fails on it. Therefore, we rewrite the path to the form "\\server\share\dir\file.txt" before returning it.
// This path rewrite may not be valid in all cases (see the notes in the next paragraph), but those should
// be very rare edge cases, and this case wouldn't have worked with EvalSymlinks anyways.
//
// The "\\?\" prefix indicates that no path parsing or normalization should be performed by Windows.
// Instead the path is passed directly to the object manager. The lack of parsing means that "." and ".." are
// interpreted literally and "\"" must be used as a path separator. Additionally, because normalization is
// not done, certain paths can only be represented in this format. For instance, "\\?\C:\foo." (with a trailing .)
// cannot be written as "C:\foo.", because path normalization will remove the trailing ".".
//
// FILE_NAME_NORMALIZED can fail on some UNC paths based on access restrictions.
// Attempt to query with FILE_NAME_NORMALIZED, and then fall back on FILE_NAME_OPENED if access is denied.
//
// Querying for VOLUME_NAME_DOS first instead of VOLUME_NAME_GUID would yield a "nicer looking" path in some cases.
// For instance, it could return "\\?\C:\dir\file.txt" instead of "\\?\Volume{8a25748f-cf34-4ac6-9ee2-c89400e886db}\dir\file.txt".
// However, we query for VOLUME_NAME_GUID first for two reasons:
// - The volume GUID path is more stable. A volume's mount point can change when it is remounted, but its
// volume GUID should not change.
// - If the volume is mounted at a non-drive letter path (e.g. mounted to "C:\mnt"), then VOLUME_NAME_DOS
// will return the mount path. EvalSymlinks fails on a path like this due to a bug.
//
// References:
// - GetFinalPathNameByHandle: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlea
// - Naming Files, Paths, and Namespaces: https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
// - Naming a Volume: https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-volume
normalize := true
guid := true
rPath := ""
for i := 1; i <= 4; i++ { // maximum of 4 different cases to try
var flags fs.GetFinalPathFlag
if normalize {
flags |= fs.FILE_NAME_NORMALIZED // nop; for clarity
} else {
flags |= fs.FILE_NAME_OPENED
}
if guid {
flags |= fs.VOLUME_NAME_GUID
} else {
flags |= fs.VOLUME_NAME_DOS // nop; for clarity
}
rPath, err = fs.GetFinalPathNameByHandle(h, flags)
switch {
case guid && errors.Is(err, windows.ERROR_PATH_NOT_FOUND):
// ERROR_PATH_NOT_FOUND is returned from the VOLUME_NAME_GUID query if the path is a
// network share (UNC path). In this case, query for the DOS name instead.
guid = false
continue
case normalize && errors.Is(err, windows.ERROR_ACCESS_DENIED):
// normalization failed when accessing individual components along path for SMB share
normalize = false
continue
default:
}
break
}
if err == nil && strings.HasPrefix(rPath, `\\?\UNC\`) {
// Convert \\?\UNC\server\share -> \\server\share. The \\?\UNC syntax does not work with
// some Go filepath functions such as EvalSymlinks. In the future if other components
// move away from EvalSymlinks and use GetFinalPathNameByHandle instead, we could remove
// this path munging.
rPath = `\\` + rPath[len(`\\?\UNC\`):]
}
return rPath, err
}

View File

@ -21,7 +21,6 @@ type (
trusteeForm uint32
trusteeType uint32
//nolint:structcheck // structcheck thinks fields are unused, but the are used to pass data to OS
explicitAccess struct {
accessPermissions accessMask
accessMode accessMode
@ -29,7 +28,6 @@ type (
trustee trustee
}
//nolint:structcheck,unused // structcheck thinks fields are unused, but the are used to pass data to OS
trustee struct {
multipleTrustee *trustee
multipleTrusteeOperation int32

View File

@ -477,15 +477,14 @@ func newFn(s string) (*Fn, error) {
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
}
s = trim(s[1:])
a := strings.Split(s, ".")
switch len(a) {
case 1:
f.dllfuncname = a[0]
case 2:
f.dllname = a[0]
f.dllfuncname = a[1]
default:
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
if i := strings.LastIndex(s, "."); i >= 0 {
f.dllname = s[:i]
f.dllfuncname = s[i+1:]
} else {
f.dllfuncname = s
}
if f.dllfuncname == "" {
return nil, fmt.Errorf("function name is not specified in %q", s)
}
if n := f.dllfuncname; endsIn(n, '?') {
f.dllfuncname = n[:len(n)-1]
@ -502,7 +501,23 @@ func (f *Fn) DLLName() string {
return f.dllname
}
// DLLName returns DLL function name for function f.
// DLLVar returns a valid Go identifier that represents DLLName.
func (f *Fn) DLLVar() string {
id := strings.Map(func(r rune) rune {
switch r {
case '.', '-':
return '_'
default:
return r
}
}, f.DLLName())
if !token.IsIdentifier(id) {
panic(fmt.Errorf("could not create Go identifier for DLLName %q", f.DLLName()))
}
return id
}
// DLLFuncName returns DLL function name for function f.
func (f *Fn) DLLFuncName() string {
if f.dllfuncname == "" {
return f.Name
@ -648,6 +663,13 @@ func (f *Fn) HelperName() string {
return "_" + f.Name
}
// DLL is a DLL's filename and a string that is valid in a Go identifier that should be used when
// naming a variable that refers to the DLL.
type DLL struct {
Name string
Var string
}
// Source files and functions.
type Source struct {
Funcs []*Fn
@ -697,18 +719,20 @@ func ParseFiles(fs []string) (*Source, error) {
}
// DLLs return dll names for a source set src.
func (src *Source) DLLs() []string {
func (src *Source) DLLs() []DLL {
uniq := make(map[string]bool)
r := make([]string, 0)
r := make([]DLL, 0)
for _, f := range src.Funcs {
name := f.DLLName()
if _, found := uniq[name]; !found {
uniq[name] = true
r = append(r, name)
id := f.DLLVar()
if _, found := uniq[id]; !found {
uniq[id] = true
r = append(r, DLL{f.DLLName(), id})
}
}
if *sortdecls {
sort.Strings(r)
sort.Slice(r, func(i, j int) bool {
return r[i].Var < r[j].Var
})
}
return r
}
@ -878,6 +902,22 @@ func (src *Source) Generate(w io.Writer) error {
return nil
}
func writeTempSourceFile(data []byte) (string, error) {
f, err := os.CreateTemp("", "mkwinsyscall-generated-*.go")
if err != nil {
return "", err
}
_, err = f.Write(data)
if closeErr := f.Close(); err == nil {
err = closeErr
}
if err != nil {
os.Remove(f.Name()) // best effort
return "", err
}
return f.Name(), nil
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: mkwinsyscall [flags] [path ...]\n")
flag.PrintDefaults()
@ -904,7 +944,12 @@ func main() {
data, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
log.Printf("failed to format source: %v", err)
f, err := writeTempSourceFile(buf.Bytes())
if err != nil {
log.Fatalf("failed to write unformatted source to file: %v", err)
}
log.Fatalf("for diagnosis, wrote unformatted source to %v", f)
}
if *filename == "" {
_, err = os.Stdout.Write(data)
@ -970,10 +1015,10 @@ var (
{{/* help functions */}}
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
{{define "dlls"}}{{range .DLLs}} mod{{.Var}} = {{newlazydll .Name}}
{{end}}{{end}}
{{define "funcnames"}}{{range .DLLFuncNames}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}")
{{define "funcnames"}}{{range .DLLFuncNames}} proc{{.DLLFuncName}} = mod{{.DLLVar}}.NewProc("{{.DLLFuncName}}")
{{end}}{{end}}
{{define "helperbody"}}

View File

@ -63,7 +63,6 @@ var (
procBackupWrite = modkernel32.NewProc("BackupWrite")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
@ -305,24 +304,6 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
return
}
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
err = errnoErr(e1)
}
return
}
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
newport = syscall.Handle(r0)

View File

@ -37,6 +37,10 @@ rootfs-conv/*
deps/*
out/*
# protobuf files
# only files at root of the repo, otherwise this will cause issues with vendoring
/protobuf/*
# test results
test/results

View File

@ -135,3 +135,9 @@ issues:
linters:
- stylecheck
Text: "ST1003:"
# v0 APIs are deprecated, but still retained for backwards compatability
- path: cmd\\ncproxy\\
linters:
- staticcheck
text: "^SA1019: (ncproxygrpcv0|nodenetsvcV0)"

View File

@ -94,23 +94,9 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho
tar -zcf $@ -C rootfs .
rm -rf rootfs
-include deps/cmd/gcs.gomake
-include deps/cmd/gcstools.gomake
-include deps/cmd/hooks/wait-paths.gomake
-include deps/cmd/tar2ext4.gomake
-include deps/internal/tools/snp-report.gomake
# Implicit rule for includes that define Go targets.
%.gomake: $(SRCROOT)/Makefile
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report:
@mkdir -p $(dir $@)
@/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new
@/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new
@/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new
@/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new
@/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new
@/bin/echo -e '\tmv $$@.new $$@' >> $@.new
@/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new
mv $@.new $@
GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%)
bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
@mkdir -p bin

View File

@ -9,6 +9,10 @@ plugins = ["grpc", "fieldpath"]
# treat the root of the project as an include, but this may not be necessary.
before = ["./protobuf"]
# defaults are "/usr/local/include" and "/usr/include", which don't exist on Windows.
# override defaults to supress errors about non-existant directories.
after = []
# Paths that should be treated as include roots in relation to the vendor
# directory. These will be calculated with the vendor directory nearest the
# target package.

View File

@ -178,29 +178,35 @@ func ReadDMVerityInfo(vhdPath string, offsetInBytes int64) (*VerityInfo, error)
return nil, errors.Errorf("failed to seek dm-verity super block: expected bytes=%d, actual=%d", offsetInBytes, s)
}
return ReadDMVerityInfoReader(vhd)
}
func ReadDMVerityInfoReader(r io.Reader) (*VerityInfo, error) {
block := make([]byte, blockSize)
if s, err := vhd.Read(block); err != nil || s != blockSize {
if s, err := r.Read(block); err != nil || s != blockSize {
if err != nil {
return nil, errors.Wrapf(err, "%s", ErrSuperBlockReadFailure)
return nil, fmt.Errorf("%s: %w", ErrSuperBlockReadFailure, err)
}
return nil, errors.Wrapf(ErrSuperBlockReadFailure, "unexpected bytes read: expected=%d, actual=%d", blockSize, s)
return nil, fmt.Errorf("unexpected bytes read expected=%d actual=%d: %w", blockSize, s, ErrSuperBlockReadFailure)
}
dmvSB := &dmveritySuperblock{}
b := bytes.NewBuffer(block)
if err := binary.Read(b, binary.LittleEndian, dmvSB); err != nil {
return nil, errors.Wrapf(err, "%s", ErrSuperBlockParseFailure)
return nil, fmt.Errorf("%s: %w", ErrSuperBlockParseFailure, err)
}
if string(bytes.Trim(dmvSB.Signature[:], "\x00")[:]) != VeritySignature {
return nil, ErrNotVeritySuperBlock
}
// read the merkle tree root
if s, err := vhd.Read(block); err != nil || s != blockSize {
if s, err := r.Read(block); err != nil || s != blockSize {
if err != nil {
return nil, errors.Wrapf(err, "%s", ErrRootHashReadFailure)
return nil, fmt.Errorf("%s: %w", ErrRootHashReadFailure, err)
}
return nil, errors.Wrapf(ErrRootHashReadFailure, "unexpected bytes read: expected=%d, actual=%d", blockSize, s)
return nil, fmt.Errorf("unexpected bytes read expected=%d, actual=%d: %w", blockSize, s, ErrRootHashReadFailure)
}
rootHash := hash2(dmvSB.Salt[:dmvSB.SaltSize], block)
return &VerityInfo{
RootDigest: fmt.Sprintf("%x", rootHash),
@ -215,12 +221,21 @@ func ReadDMVerityInfo(vhdPath string, offsetInBytes int64) (*VerityInfo, error)
}, nil
}
// ComputeAndWriteHashDevice builds merkle tree from a given io.ReadSeeker and writes the result
// hash device (dm-verity super-block combined with merkle tree) to io.WriteSeeker.
func ComputeAndWriteHashDevice(r io.ReadSeeker, w io.WriteSeeker) error {
// ComputeAndWriteHashDevice builds merkle tree from a given io.ReadSeeker and
// writes the result hash device (dm-verity super-block combined with merkle
// tree) to io.Writer.
func ComputeAndWriteHashDevice(r io.ReadSeeker, w io.Writer) error {
// save current reader position
currBytePos, err := r.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
// reset to the beginning to find the device size
if _, err := r.Seek(0, io.SeekStart); err != nil {
return err
}
tree, err := MerkleTree(r)
if err != nil {
return errors.Wrap(err, "failed to build merkle tree")
@ -230,10 +245,13 @@ func ComputeAndWriteHashDevice(r io.ReadSeeker, w io.WriteSeeker) error {
if err != nil {
return err
}
dmVeritySB := NewDMVeritySuperblock(uint64(devSize))
if _, err := w.Seek(0, io.SeekEnd); err != nil {
// reset reader to initial position
if _, err := r.Seek(currBytePos, io.SeekStart); err != nil {
return err
}
dmVeritySB := NewDMVeritySuperblock(uint64(devSize))
if err := binary.Write(w, binary.LittleEndian, dmVeritySB); err != nil {
return errors.Wrap(err, "failed to write dm-verity super-block")
}

View File

@ -13,6 +13,7 @@ import (
"github.com/Microsoft/hcsshim/ext4/dmverity"
"github.com/Microsoft/hcsshim/ext4/internal/compactext4"
"github.com/Microsoft/hcsshim/ext4/internal/format"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/pkg/errors"
)
@ -200,7 +201,19 @@ func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
return nil
}
// ReadExt4SuperBlock reads and returns ext4 super block from VHD
// ReadExt4SuperBlock reads and returns ext4 super block from given device.
func ReadExt4SuperBlock(devicePath string) (*format.SuperBlock, error) {
dev, err := os.OpenFile(devicePath, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer dev.Close()
return ReadExt4SuperBlockReadSeeker(dev)
}
// ReadExt4SuperBlockReadSeeker reads and returns ext4 super block given
// an io.ReadSeeker.
//
// The layout on disk is as follows:
// | Group 0 padding | - 1024 bytes
@ -215,28 +228,56 @@ func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
// More details can be found here https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout
//
// Our goal is to skip the Group 0 padding, read and return the ext4 SuperBlock
func ReadExt4SuperBlock(vhdPath string) (*format.SuperBlock, error) {
vhd, err := os.OpenFile(vhdPath, os.O_RDONLY, 0)
func ReadExt4SuperBlockReadSeeker(rsc io.ReadSeeker) (*format.SuperBlock, error) {
// save current reader position
currBytePos, err := rsc.Seek(0, io.SeekCurrent)
if err != nil {
return nil, err
}
defer vhd.Close()
// Skip padding at the start
if _, err := vhd.Seek(1024, io.SeekStart); err != nil {
if _, err := rsc.Seek(1024, io.SeekCurrent); err != nil {
return nil, err
}
var sb format.SuperBlock
if err := binary.Read(vhd, binary.LittleEndian, &sb); err != nil {
if err := binary.Read(rsc, binary.LittleEndian, &sb); err != nil {
return nil, err
}
// Make sure the magic bytes are correct.
// reset the reader to initial position
if _, err := rsc.Seek(currBytePos, io.SeekStart); err != nil {
return nil, err
}
if sb.Magic != format.SuperBlockMagic {
return nil, errors.New("not an ext4 file system")
}
return &sb, nil
}
// IsDeviceExt4 is will read the device's superblock and determine if it is
// and ext4 superblock.
func IsDeviceExt4(devicePath string) bool {
// ReadExt4SuperBlock will check the superblock magic number for us,
// so we know if no error is returned, this is an ext4 device.
_, err := ReadExt4SuperBlock(devicePath)
if err != nil {
log.L.Warnf("failed to read Ext4 superblock: %s", err)
}
return err == nil
}
// Ext4FileSystemSize reads ext4 superblock and returns the size of the underlying
// ext4 file system and its block size.
func Ext4FileSystemSize(r io.ReadSeeker) (int64, int, error) {
sb, err := ReadExt4SuperBlockReadSeeker(r)
if err != nil {
return 0, 0, fmt.Errorf("failed to read ext4 superblock: %w", err)
}
blockSize := 1024 * (1 << sb.LogBlockSize)
fsSize := int64(blockSize) * int64(sb.BlocksCountLow)
return fsSize, blockSize, nil
}
// ConvertAndComputeRootDigest writes a compact ext4 file system image that contains the files in the
// input tar stream, computes the resulting file image's cryptographic hashes (merkle tree) and returns
// merkle tree root digest. Convert is called with minimal options: ConvertWhiteout and MaximumDiskSize

View File

@ -0,0 +1,85 @@
package log
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"reflect"
"time"
"github.com/containerd/containerd/log"
)
const TimeFormat = log.RFC3339NanoFixed
func FormatTime(t time.Time) string {
return t.Format(TimeFormat)
}
// DurationFormat formats a [time.Duration] log entry.
//
// A nil value signals an error with the formatting.
type DurationFormat func(time.Duration) interface{}
func DurationFormatString(d time.Duration) interface{} { return d.String() }
func DurationFormatSeconds(d time.Duration) interface{} { return d.Seconds() }
func DurationFormatMilliseconds(d time.Duration) interface{} { return d.Milliseconds() }
// FormatIO formats net.Conn and other types that have an `Addr()` or `Name()`.
//
// See FormatEnabled for more information.
func FormatIO(ctx context.Context, v interface{}) string {
m := make(map[string]string)
m["type"] = reflect.TypeOf(v).String()
switch t := v.(type) {
case net.Conn:
m["localAddress"] = formatAddr(t.LocalAddr())
m["remoteAddress"] = formatAddr(t.RemoteAddr())
case interface{ Addr() net.Addr }:
m["address"] = formatAddr(t.Addr())
default:
return Format(ctx, t)
}
return Format(ctx, m)
}
func formatAddr(a net.Addr) string {
return a.Network() + "://" + a.String()
}
// Format formats an object into a JSON string, without any indendtation or
// HTML escapes.
// Context is used to output a log waring if the conversion fails.
//
// This is intended primarily for `trace.StringAttribute()`
func Format(ctx context.Context, v interface{}) string {
b, err := encode(v)
if err != nil {
G(ctx).WithError(err).Warning("could not format value")
return ""
}
return string(b)
}
func encode(v interface{}) ([]byte, error) {
return encodeBuffer(&bytes.Buffer{}, v)
}
func encodeBuffer(buf *bytes.Buffer, v interface{}) ([]byte, error) {
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
enc.SetIndent("", "")
if err := enc.Encode(v); err != nil {
err = fmt.Errorf("could not marshall %T to JSON for logging: %w", v, err)
return nil, err
}
// encoder.Encode appends a newline to the end
return bytes.TrimSpace(buf.Bytes()), nil
}

View File

@ -1,23 +1,58 @@
package log
import (
"bytes"
"reflect"
"time"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/containerd/containerd/log"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// Hook serves to intercept and format `logrus.Entry`s before they are passed
// to the ETW hook.
const nullString = "null"
// Hook intercepts and formats a [logrus.Entry] before it logged.
//
// The containerd shim discards the (formatted) logrus output, and outputs only via ETW.
// The Linux GCS outputs logrus entries over stdout, which is consumed by the shim and
// then re-output via the ETW hook.
type Hook struct{}
// The shim either outputs the logs through an ETW hook, discarding the (formatted) output
// or logs output to a pipe for logging binaries to consume.
// The Linux GCS outputs logrus entries over stdout, which is then consumed and re-output
// by the shim.
type Hook struct {
// EncodeAsJSON formats structs, maps, arrays, slices, and [bytes.Buffer] as JSON.
// Variables of [bytes.Buffer] will be converted to []byte.
//
// Default is false.
EncodeAsJSON bool
// FormatTime specifies the format for [time.Time] variables.
// An empty string disables formatting.
// When disabled, the fall back will the JSON encoding, if enabled.
//
// Default is [github.com/containerd/containerd/log.RFC3339NanoFixed].
TimeFormat string
// Duration format converts a [time.Duration] fields to an appropriate encoding.
// nil disables formatting.
// When disabled, the fall back will the JSON encoding, if enabled.
//
// Default is [DurationFormatString], which appends a duration unit after the value.
DurationFormat DurationFormat
// AddSpanContext adds [logfields.TraceID] and [logfields.SpanID] fields to
// the entry from the span context stored in [logrus.Entry.Context], if it exists.
AddSpanContext bool
}
var _ logrus.Hook = &Hook{}
func NewHook() *Hook {
return &Hook{}
return &Hook{
TimeFormat: log.RFC3339NanoFixed,
DurationFormat: DurationFormatString,
AddSpanContext: true,
}
}
func (h *Hook) Levels() []logrus.Level {
@ -25,14 +60,108 @@ func (h *Hook) Levels() []logrus.Level {
}
func (h *Hook) Fire(e *logrus.Entry) (err error) {
// JSON encode, if necessary, then add span information
h.encode(e)
h.addSpanContext(e)
return nil
}
// encode loops through all the fields in the [logrus.Entry] and encodes them according to
// the settings in [Hook].
// If [Hook.TimeFormat] is non-empty, it will be passed to [time.Time.Format] for
// fields of type [time.Time].
//
// If [Hook.EncodeAsJSON] is true, then fields that are not numeric, boolean, strings, or
// errors will be encoded via a [json.Marshal] (with HTML escaping disabled).
// Chanel- and function-typed fields, as well as unsafe pointers are left alone and not encoded.
//
// If [Hook.TimeFormat] and [Hook.DurationFormat] are empty and [Hook.EncodeAsJSON] is false,
// then this is a no-op.
func (h *Hook) encode(e *logrus.Entry) {
d := e.Data
formatTime := h.TimeFormat != ""
formatDuration := h.DurationFormat != nil
if !(h.EncodeAsJSON || formatTime || formatDuration) {
return
}
for k, v := range d {
// encode types with dedicated formatting options first
if vv, ok := v.(time.Time); formatTime && ok {
d[k] = vv.Format(h.TimeFormat)
continue
}
if vv, ok := v.(time.Duration); formatDuration && ok {
d[k] = h.DurationFormat(vv)
continue
}
// general case JSON encoding
if !h.EncodeAsJSON {
continue
}
switch vv := v.(type) {
// built in types
// "json" marshals errors as "{}", so leave alone here
case bool, string, error, uintptr,
int8, int16, int32, int64, int,
uint8, uint32, uint64, uint,
float32, float64:
continue
// Rather than setting d[k] = vv.String(), JSON encode []byte value, since it
// may be a binary payload and not representable as a string.
// `case bytes.Buffer,*bytes.Buffer:` resolves `vv` to `interface{}`,
// so cannot use `vv.Bytes`.
// Could move to below the `reflect.Indirect()` call below, but
// that would require additional typematching and dereferencing.
// Easier to keep these duplicate branches here.
case bytes.Buffer:
v = vv.Bytes()
case *bytes.Buffer:
v = vv.Bytes()
}
// dereference pointer or interface variables
rv := reflect.Indirect(reflect.ValueOf(v))
// check if `v` is a null pointer
if !rv.IsValid() {
d[k] = nullString
continue
}
switch rv.Kind() {
case reflect.Map, reflect.Struct, reflect.Array, reflect.Slice:
default:
// Bool, [U]?Int*, Float*, Complex*, Uintptr, String: encoded as normal
// Chan, Func: not supported by json
// Interface, Pointer: dereferenced above
// UnsafePointer: not supported by json, not safe to de-reference; leave alone
continue
}
b, err := encode(v)
if err != nil {
// Errors are written to stderr (ie, to `panic.log`) and stops the remaining
// hooks (ie, exporting to ETW) from firing. So add encoding errors to
// the entry data to be written out, but keep on processing.
d[k+"-"+logrus.ErrorKey] = err.Error()
// keep the original `v` as the value,
continue
}
d[k] = string(b)
}
}
func (h *Hook) addSpanContext(e *logrus.Entry) {
ctx := e.Context
if ctx == nil {
if !h.AddSpanContext || ctx == nil {
return
}
span := trace.FromContext(ctx)

View File

@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"errors"
"strings"
"sync/atomic"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
@ -56,11 +55,11 @@ func ScrubProcessParameters(s string) (string, error) {
}
pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement}
buf := bytes.NewBuffer(b[:0])
if err := encode(buf, pp); err != nil {
b, err := encodeBuffer(bytes.NewBuffer(b[:0]), pp)
if err != nil {
return "", err
}
return strings.TrimSpace(buf.String()), nil
return string(b), nil
}
// ScrubBridgeCreate scrubs requests sent over the bridge of type
@ -150,21 +149,12 @@ func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) {
return nil, err
}
buf := &bytes.Buffer{}
if err := encode(buf, m); err != nil {
b, err := encode(m)
if err != nil {
return nil, err
}
return bytes.TrimSpace(buf.Bytes()), nil
}
func encode(buf *bytes.Buffer, v interface{}) error {
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
if err := enc.Encode(v); err != nil {
return err
}
return nil
return b, nil
}
func isRequestBase(m genMap) bool {

View File

@ -0,0 +1,69 @@
package oc
import (
"errors"
"io"
"net"
"os"
"github.com/containerd/containerd/errdefs"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// todo: break import cycle with "internal/hcs/errors.go" and reference errors defined there
// todo: add errors defined in "internal/guest/gcserror" (Hresult does not implement error)
func toStatusCode(err error) codes.Code {
// checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status,
// wraps an error defined in "github.com/containerd/containerd/errdefs", or is a
// context timeout or cancelled error
if s, ok := status.FromError(errdefs.ToGRPC(err)); ok {
return s.Code()
}
switch {
// case isAny(err):
// return codes.Cancelled
case isAny(err, os.ErrInvalid):
return codes.InvalidArgument
case isAny(err, os.ErrDeadlineExceeded):
return codes.DeadlineExceeded
case isAny(err, os.ErrNotExist):
return codes.NotFound
case isAny(err, os.ErrExist):
return codes.AlreadyExists
case isAny(err, os.ErrPermission):
return codes.PermissionDenied
// case isAny(err):
// return codes.ResourceExhausted
case isAny(err, os.ErrClosed, net.ErrClosed, io.ErrClosedPipe, io.ErrShortBuffer):
return codes.FailedPrecondition
// case isAny(err):
// return codes.Aborted
// case isAny(err):
// return codes.OutOfRange
// case isAny(err):
// return codes.Unimplemented
case isAny(err, io.ErrNoProgress):
return codes.Internal
// case isAny(err):
// return codes.Unavailable
case isAny(err, io.ErrShortWrite, io.ErrUnexpectedEOF):
return codes.DataLoss
// case isAny(err):
// return codes.Unauthenticated
default:
return codes.Unknown
}
}
// isAny returns true if errors.Is is true for any of the provided errors, errs.
func isAny(err error, errs ...error) bool {
for _, e := range errs {
if errors.Is(err, e) {
return true
}
}
return false
}

View File

@ -3,19 +3,26 @@ package oc
import (
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/logfields"
)
var _ = (trace.Exporter)(&LogrusExporter{})
const spanMessage = "Span"
var _errorCodeKey = logrus.ErrorKey + "Code"
// LogrusExporter is an OpenCensus `trace.Exporter` that exports
// `trace.SpanData` to logrus output.
type LogrusExporter struct {
}
type LogrusExporter struct{}
var _ trace.Exporter = &LogrusExporter{}
// ExportSpan exports `s` based on the the following rules:
//
// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`,
// `s.ParentSpanID` for correlation
// 1. All output will contain `s.Attributes`, `s.SpanKind`, `s.TraceID`,
// `s.SpanID`, and `s.ParentSpanID` for correlation
//
// 2. Any calls to .Annotate will not be supported.
//
@ -23,21 +30,57 @@ type LogrusExporter struct {
// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel`
// providing `s.Status.Message` as the error value.
func (le *LogrusExporter) ExportSpan(s *trace.SpanData) {
// Combine all span annotations with traceID, spanID, parentSpanID
baseEntry := logrus.WithFields(logrus.Fields(s.Attributes))
baseEntry.Data["traceID"] = s.TraceID.String()
baseEntry.Data["spanID"] = s.SpanID.String()
baseEntry.Data["parentSpanID"] = s.ParentSpanID.String()
baseEntry.Data["startTime"] = s.StartTime
baseEntry.Data["endTime"] = s.EndTime
baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String()
baseEntry.Data["name"] = s.Name
baseEntry.Time = s.StartTime
if s.DroppedAnnotationCount > 0 {
logrus.WithFields(logrus.Fields{
"name": s.Name,
logfields.TraceID: s.TraceID.String(),
logfields.SpanID: s.SpanID.String(),
"dropped": s.DroppedAttributeCount,
"maxAttributes": len(s.Attributes),
}).Warning("span had dropped attributes")
}
entry := log.L.Dup()
// Combine all span annotations with span data (eg, trace ID, span ID, parent span ID,
// error, status code)
// (OC) Span attributes are guaranteed to be strings, bools, or int64s, so we can
// can skip overhead in entry.WithFields() and add them directly to entry.Data.
// Preallocate ahead of time, since we should add, at most, 10 additional entries
data := make(logrus.Fields, len(entry.Data)+len(s.Attributes)+10)
// Default log entry may have prexisting/application-wide data
for k, v := range entry.Data {
data[k] = v
}
for k, v := range s.Attributes {
data[k] = v
}
data[logfields.Name] = s.Name
data[logfields.TraceID] = s.TraceID.String()
data[logfields.SpanID] = s.SpanID.String()
data[logfields.ParentSpanID] = s.ParentSpanID.String()
data[logfields.StartTime] = s.StartTime
data[logfields.EndTime] = s.EndTime
data[logfields.Duration] = s.EndTime.Sub(s.StartTime)
if sk := spanKindToString(s.SpanKind); sk != "" {
data["spanKind"] = sk
}
level := logrus.InfoLevel
if s.Status.Code != 0 {
level = logrus.ErrorLevel
baseEntry.Data[logrus.ErrorKey] = s.Status.Message
// don't overwrite an existing "error" or "errorCode" attributes
if _, ok := data[logrus.ErrorKey]; !ok {
data[logrus.ErrorKey] = s.Status.Message
}
if _, ok := data[_errorCodeKey]; !ok {
data[_errorCodeKey] = codes.Code(s.Status.Code).String()
}
}
baseEntry.Log(level, "Span")
entry.Data = data
entry.Time = s.StartTime
entry.Log(level, spanMessage)
}

View File

@ -14,8 +14,7 @@ var DefaultSampler = trace.AlwaysSample()
func SetSpanStatus(span *trace.Span, err error) {
status := trace.Status{}
if err != nil {
// TODO: JTERRY75 - Handle errors in a non-generic way
status.Code = trace.StatusCodeUnknown
status.Code = int32(toStatusCode(err))
status.Message = err.Error()
}
span.SetStatus(status)
@ -46,3 +45,14 @@ func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) {
var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer)
var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient)
func spanKindToString(sk int) string {
switch sk {
case trace.SpanKindClient:
return "client"
case trace.SpanKindServer:
return "server"
default:
return ""
}
}

View File

@ -23,20 +23,14 @@ type (
)
type explicitAccess struct {
//nolint:structcheck
accessPermissions accessMask
//nolint:structcheck
accessMode accessMode
//nolint:structcheck
inheritance inheritMode
//nolint:structcheck
trustee trustee
accessMode accessMode
inheritance inheritMode
trustee trustee
}
type trustee struct {
//nolint:unused,structcheck
multipleTrustee *trustee
//nolint:unused,structcheck
multipleTrustee *trustee
multipleTrusteeOperation int32
trusteeForm trusteeForm
trusteeType trusteeType

View File

@ -0,0 +1,35 @@
package osversion
// List of stable ABI compliant ltsc releases
// Note: List must be sorted in ascending order
var compatLTSCReleases = []uint16{
V21H2Server,
}
// CheckHostAndContainerCompat checks if given host and container
// OS versions are compatible.
// It includes support for stable ABI compliant versions as well.
// Every release after WS 2022 will support the previous ltsc
// container image. Stable ABI is in preview mode for windows 11 client.
// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility
func CheckHostAndContainerCompat(host, ctr OSVersion) bool {
// check major minor versions of host and guest
if host.MajorVersion != ctr.MajorVersion ||
host.MinorVersion != ctr.MinorVersion {
return false
}
// If host is < WS 2022, exact version match is required
if host.Build < V21H2Server {
return host.Build == ctr.Build
}
var supportedLtscRelease uint16
for i := len(compatLTSCReleases) - 1; i >= 0; i-- {
if host.Build >= compatLTSCReleases[i] {
supportedLtscRelease = compatLTSCReleases[i]
break
}
}
return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build
}

View File

@ -5,6 +5,7 @@ package runhcs
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"os/exec"
@ -36,6 +37,11 @@ func getCommandPath() string {
pathi := runhcsPath.Load()
if pathi == nil {
path, err := exec.LookPath(command)
if err != nil {
if errors.Is(err, exec.ErrDot) {
err = nil
}
}
if err != nil {
// LookPath only finds current directory matches based on the
// callers current directory but the caller is not likely in the

View File

@ -2,4 +2,12 @@
package hcsshim
import _ "github.com/Microsoft/go-winio/tools/mkwinsyscall"
import (
// for go generate directives
// generate Win32 API code
_ "github.com/Microsoft/go-winio/tools/mkwinsyscall"
// mock gRPC client and servers
_ "github.com/golang/mock/mockgen"
)

12
vendor/github.com/golang/mock/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,12 @@
# This is the official list of GoMock authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Alex Reece <awreece@gmail.com>
Google Inc.

37
vendor/github.com/golang/mock/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,37 @@
# This is the official list of people who can contribute (and typically
# have contributed) code to the gomock repository.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# The submission process automatically checks to make sure
# that people submitting code are listed in this file (by email address).
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# http://code.google.com/legal/individual-cla-v1.0.html
# http://code.google.com/legal/corporate-cla-v1.0.html
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
# Names should be added to this file like so:
# Name <email address>
#
# An entry with two email addresses specifies that the
# first address should be used in the submit logs and
# that the second address should be recognized as the
# same person when interacting with Rietveld.
# Please keep the list sorted.
Aaron Jacobs <jacobsa@google.com> <aaronjjacobs@gmail.com>
Alex Reece <awreece@gmail.com>
David Symonds <dsymonds@golang.org>
Ryan Barrett <ryanb@google.com>

202
vendor/github.com/golang/mock/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

701
vendor/github.com/golang/mock/mockgen/mockgen.go generated vendored Normal file
View File

@ -0,0 +1,701 @@
// Copyright 2010 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// MockGen generates mock implementations of Go interfaces.
package main
// TODO: This does not support recursive embedded interfaces.
// TODO: This does not support embedding package-local interfaces in a separate file.
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"unicode"
"github.com/golang/mock/mockgen/model"
"golang.org/x/mod/modfile"
toolsimports "golang.org/x/tools/imports"
)
const (
gomockImportPath = "github.com/golang/mock/gomock"
)
var (
version = ""
commit = "none"
date = "unknown"
)
var (
source = flag.String("source", "", "(source mode) Input Go source file; enables source mode.")
destination = flag.String("destination", "", "Output file; defaults to stdout.")
mockNames = flag.String("mock_names", "", "Comma-separated interfaceName=mockName pairs of explicit mock names to use. Mock names default to 'Mock'+ interfaceName suffix.")
packageOut = flag.String("package", "", "Package of the generated code; defaults to the package of the input with a 'mock_' prefix.")
selfPackage = flag.String("self_package", "", "The full package import path for the generated code. The purpose of this flag is to prevent import cycles in the generated code by trying to include its own package. This can happen if the mock's package is set to one of its inputs (usually the main one) and the output is stdio so mockgen cannot detect the final output package. Setting this flag will then tell mockgen which import to exclude.")
writePkgComment = flag.Bool("write_package_comment", true, "Writes package documentation comment (godoc) if true.")
copyrightFile = flag.String("copyright_file", "", "Copyright file used to add copyright header")
debugParser = flag.Bool("debug_parser", false, "Print out parser results only.")
showVersion = flag.Bool("version", false, "Print version.")
)
func main() {
flag.Usage = usage
flag.Parse()
if *showVersion {
printVersion()
return
}
var pkg *model.Package
var err error
var packageName string
if *source != "" {
pkg, err = sourceMode(*source)
} else {
if flag.NArg() != 2 {
usage()
log.Fatal("Expected exactly two arguments")
}
packageName = flag.Arg(0)
interfaces := strings.Split(flag.Arg(1), ",")
if packageName == "." {
dir, err := os.Getwd()
if err != nil {
log.Fatalf("Get current directory failed: %v", err)
}
packageName, err = packageNameOfDir(dir)
if err != nil {
log.Fatalf("Parse package name failed: %v", err)
}
}
pkg, err = reflectMode(packageName, interfaces)
}
if err != nil {
log.Fatalf("Loading input failed: %v", err)
}
if *debugParser {
pkg.Print(os.Stdout)
return
}
dst := os.Stdout
if len(*destination) > 0 {
if err := os.MkdirAll(filepath.Dir(*destination), os.ModePerm); err != nil {
log.Fatalf("Unable to create directory: %v", err)
}
f, err := os.Create(*destination)
if err != nil {
log.Fatalf("Failed opening destination file: %v", err)
}
defer f.Close()
dst = f
}
outputPackageName := *packageOut
if outputPackageName == "" {
// pkg.Name in reflect mode is the base name of the import path,
// which might have characters that are illegal to have in package names.
outputPackageName = "mock_" + sanitize(pkg.Name)
}
// outputPackagePath represents the fully qualified name of the package of
// the generated code. Its purposes are to prevent the module from importing
// itself and to prevent qualifying type names that come from its own
// package (i.e. if there is a type called X then we want to print "X" not
// "package.X" since "package" is this package). This can happen if the mock
// is output into an already existing package.
outputPackagePath := *selfPackage
if outputPackagePath == "" && *destination != "" {
dstPath, err := filepath.Abs(filepath.Dir(*destination))
if err == nil {
pkgPath, err := parsePackageImport(dstPath)
if err == nil {
outputPackagePath = pkgPath
} else {
log.Println("Unable to infer -self_package from destination file path:", err)
}
} else {
log.Println("Unable to determine destination file path:", err)
}
}
g := new(generator)
if *source != "" {
g.filename = *source
} else {
g.srcPackage = packageName
g.srcInterfaces = flag.Arg(1)
}
g.destination = *destination
if *mockNames != "" {
g.mockNames = parseMockNames(*mockNames)
}
if *copyrightFile != "" {
header, err := ioutil.ReadFile(*copyrightFile)
if err != nil {
log.Fatalf("Failed reading copyright file: %v", err)
}
g.copyrightHeader = string(header)
}
if err := g.Generate(pkg, outputPackageName, outputPackagePath); err != nil {
log.Fatalf("Failed generating mock: %v", err)
}
if _, err := dst.Write(g.Output()); err != nil {
log.Fatalf("Failed writing to destination: %v", err)
}
}
func parseMockNames(names string) map[string]string {
mocksMap := make(map[string]string)
for _, kv := range strings.Split(names, ",") {
parts := strings.SplitN(kv, "=", 2)
if len(parts) != 2 || parts[1] == "" {
log.Fatalf("bad mock names spec: %v", kv)
}
mocksMap[parts[0]] = parts[1]
}
return mocksMap
}
func usage() {
_, _ = io.WriteString(os.Stderr, usageText)
flag.PrintDefaults()
}
const usageText = `mockgen has two modes of operation: source and reflect.
Source mode generates mock interfaces from a source file.
It is enabled by using the -source flag. Other flags that
may be useful in this mode are -imports and -aux_files.
Example:
mockgen -source=foo.go [other options]
Reflect mode generates mock interfaces by building a program
that uses reflection to understand interfaces. It is enabled
by passing two non-flag arguments: an import path, and a
comma-separated list of symbols.
Example:
mockgen database/sql/driver Conn,Driver
`
type generator struct {
buf bytes.Buffer
indent string
mockNames map[string]string // may be empty
filename string // may be empty
destination string // may be empty
srcPackage, srcInterfaces string // may be empty
copyrightHeader string
packageMap map[string]string // map from import path to package name
}
func (g *generator) p(format string, args ...interface{}) {
fmt.Fprintf(&g.buf, g.indent+format+"\n", args...)
}
func (g *generator) in() {
g.indent += "\t"
}
func (g *generator) out() {
if len(g.indent) > 0 {
g.indent = g.indent[0 : len(g.indent)-1]
}
}
// sanitize cleans up a string to make a suitable package name.
func sanitize(s string) string {
t := ""
for _, r := range s {
if t == "" {
if unicode.IsLetter(r) || r == '_' {
t += string(r)
continue
}
} else {
if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
t += string(r)
continue
}
}
t += "_"
}
if t == "_" {
t = "x"
}
return t
}
func (g *generator) Generate(pkg *model.Package, outputPkgName string, outputPackagePath string) error {
if outputPkgName != pkg.Name && *selfPackage == "" {
// reset outputPackagePath if it's not passed in through -self_package
outputPackagePath = ""
}
if g.copyrightHeader != "" {
lines := strings.Split(g.copyrightHeader, "\n")
for _, line := range lines {
g.p("// %s", line)
}
g.p("")
}
g.p("// Code generated by MockGen. DO NOT EDIT.")
if g.filename != "" {
g.p("// Source: %v", g.filename)
} else {
g.p("// Source: %v (interfaces: %v)", g.srcPackage, g.srcInterfaces)
}
g.p("")
// Get all required imports, and generate unique names for them all.
im := pkg.Imports()
im[gomockImportPath] = true
// Only import reflect if it's used. We only use reflect in mocked methods
// so only import if any of the mocked interfaces have methods.
for _, intf := range pkg.Interfaces {
if len(intf.Methods) > 0 {
im["reflect"] = true
break
}
}
// Sort keys to make import alias generation predictable
sortedPaths := make([]string, len(im))
x := 0
for pth := range im {
sortedPaths[x] = pth
x++
}
sort.Strings(sortedPaths)
packagesName := createPackageMap(sortedPaths)
g.packageMap = make(map[string]string, len(im))
localNames := make(map[string]bool, len(im))
for _, pth := range sortedPaths {
base, ok := packagesName[pth]
if !ok {
base = sanitize(path.Base(pth))
}
// Local names for an imported package can usually be the basename of the import path.
// A couple of situations don't permit that, such as duplicate local names
// (e.g. importing "html/template" and "text/template"), or where the basename is
// a keyword (e.g. "foo/case").
// try base0, base1, ...
pkgName := base
i := 0
for localNames[pkgName] || token.Lookup(pkgName).IsKeyword() {
pkgName = base + strconv.Itoa(i)
i++
}
// Avoid importing package if source pkg == output pkg
if pth == pkg.PkgPath && outputPackagePath == pkg.PkgPath {
continue
}
g.packageMap[pth] = pkgName
localNames[pkgName] = true
}
if *writePkgComment {
g.p("// Package %v is a generated GoMock package.", outputPkgName)
}
g.p("package %v", outputPkgName)
g.p("")
g.p("import (")
g.in()
for pkgPath, pkgName := range g.packageMap {
if pkgPath == outputPackagePath {
continue
}
g.p("%v %q", pkgName, pkgPath)
}
for _, pkgPath := range pkg.DotImports {
g.p(". %q", pkgPath)
}
g.out()
g.p(")")
for _, intf := range pkg.Interfaces {
if err := g.GenerateMockInterface(intf, outputPackagePath); err != nil {
return err
}
}
return nil
}
// The name of the mock type to use for the given interface identifier.
func (g *generator) mockName(typeName string) string {
if mockName, ok := g.mockNames[typeName]; ok {
return mockName
}
return "Mock" + typeName
}
func (g *generator) GenerateMockInterface(intf *model.Interface, outputPackagePath string) error {
mockType := g.mockName(intf.Name)
g.p("")
g.p("// %v is a mock of %v interface.", mockType, intf.Name)
g.p("type %v struct {", mockType)
g.in()
g.p("ctrl *gomock.Controller")
g.p("recorder *%vMockRecorder", mockType)
g.out()
g.p("}")
g.p("")
g.p("// %vMockRecorder is the mock recorder for %v.", mockType, mockType)
g.p("type %vMockRecorder struct {", mockType)
g.in()
g.p("mock *%v", mockType)
g.out()
g.p("}")
g.p("")
g.p("// New%v creates a new mock instance.", mockType)
g.p("func New%v(ctrl *gomock.Controller) *%v {", mockType, mockType)
g.in()
g.p("mock := &%v{ctrl: ctrl}", mockType)
g.p("mock.recorder = &%vMockRecorder{mock}", mockType)
g.p("return mock")
g.out()
g.p("}")
g.p("")
// XXX: possible name collision here if someone has EXPECT in their interface.
g.p("// EXPECT returns an object that allows the caller to indicate expected use.")
g.p("func (m *%v) EXPECT() *%vMockRecorder {", mockType, mockType)
g.in()
g.p("return m.recorder")
g.out()
g.p("}")
g.GenerateMockMethods(mockType, intf, outputPackagePath)
return nil
}
type byMethodName []*model.Method
func (b byMethodName) Len() int { return len(b) }
func (b byMethodName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byMethodName) Less(i, j int) bool { return b[i].Name < b[j].Name }
func (g *generator) GenerateMockMethods(mockType string, intf *model.Interface, pkgOverride string) {
sort.Sort(byMethodName(intf.Methods))
for _, m := range intf.Methods {
g.p("")
_ = g.GenerateMockMethod(mockType, m, pkgOverride)
g.p("")
_ = g.GenerateMockRecorderMethod(mockType, m)
}
}
func makeArgString(argNames, argTypes []string) string {
args := make([]string, len(argNames))
for i, name := range argNames {
// specify the type only once for consecutive args of the same type
if i+1 < len(argTypes) && argTypes[i] == argTypes[i+1] {
args[i] = name
} else {
args[i] = name + " " + argTypes[i]
}
}
return strings.Join(args, ", ")
}
// GenerateMockMethod generates a mock method implementation.
// If non-empty, pkgOverride is the package in which unqualified types reside.
func (g *generator) GenerateMockMethod(mockType string, m *model.Method, pkgOverride string) error {
argNames := g.getArgNames(m)
argTypes := g.getArgTypes(m, pkgOverride)
argString := makeArgString(argNames, argTypes)
rets := make([]string, len(m.Out))
for i, p := range m.Out {
rets[i] = p.Type.String(g.packageMap, pkgOverride)
}
retString := strings.Join(rets, ", ")
if len(rets) > 1 {
retString = "(" + retString + ")"
}
if retString != "" {
retString = " " + retString
}
ia := newIdentifierAllocator(argNames)
idRecv := ia.allocateIdentifier("m")
g.p("// %v mocks base method.", m.Name)
g.p("func (%v *%v) %v(%v)%v {", idRecv, mockType, m.Name, argString, retString)
g.in()
g.p("%s.ctrl.T.Helper()", idRecv)
var callArgs string
if m.Variadic == nil {
if len(argNames) > 0 {
callArgs = ", " + strings.Join(argNames, ", ")
}
} else {
// Non-trivial. The generated code must build a []interface{},
// but the variadic argument may be any type.
idVarArgs := ia.allocateIdentifier("varargs")
idVArg := ia.allocateIdentifier("a")
g.p("%s := []interface{}{%s}", idVarArgs, strings.Join(argNames[:len(argNames)-1], ", "))
g.p("for _, %s := range %s {", idVArg, argNames[len(argNames)-1])
g.in()
g.p("%s = append(%s, %s)", idVarArgs, idVarArgs, idVArg)
g.out()
g.p("}")
callArgs = ", " + idVarArgs + "..."
}
if len(m.Out) == 0 {
g.p(`%v.ctrl.Call(%v, %q%v)`, idRecv, idRecv, m.Name, callArgs)
} else {
idRet := ia.allocateIdentifier("ret")
g.p(`%v := %v.ctrl.Call(%v, %q%v)`, idRet, idRecv, idRecv, m.Name, callArgs)
// Go does not allow "naked" type assertions on nil values, so we use the two-value form here.
// The value of that is either (x.(T), true) or (Z, false), where Z is the zero value for T.
// Happily, this coincides with the semantics we want here.
retNames := make([]string, len(rets))
for i, t := range rets {
retNames[i] = ia.allocateIdentifier(fmt.Sprintf("ret%d", i))
g.p("%s, _ := %s[%d].(%s)", retNames[i], idRet, i, t)
}
g.p("return " + strings.Join(retNames, ", "))
}
g.out()
g.p("}")
return nil
}
func (g *generator) GenerateMockRecorderMethod(mockType string, m *model.Method) error {
argNames := g.getArgNames(m)
var argString string
if m.Variadic == nil {
argString = strings.Join(argNames, ", ")
} else {
argString = strings.Join(argNames[:len(argNames)-1], ", ")
}
if argString != "" {
argString += " interface{}"
}
if m.Variadic != nil {
if argString != "" {
argString += ", "
}
argString += fmt.Sprintf("%s ...interface{}", argNames[len(argNames)-1])
}
ia := newIdentifierAllocator(argNames)
idRecv := ia.allocateIdentifier("mr")
g.p("// %v indicates an expected call of %v.", m.Name, m.Name)
g.p("func (%s *%vMockRecorder) %v(%v) *gomock.Call {", idRecv, mockType, m.Name, argString)
g.in()
g.p("%s.mock.ctrl.T.Helper()", idRecv)
var callArgs string
if m.Variadic == nil {
if len(argNames) > 0 {
callArgs = ", " + strings.Join(argNames, ", ")
}
} else {
if len(argNames) == 1 {
// Easy: just use ... to push the arguments through.
callArgs = ", " + argNames[0] + "..."
} else {
// Hard: create a temporary slice.
idVarArgs := ia.allocateIdentifier("varargs")
g.p("%s := append([]interface{}{%s}, %s...)",
idVarArgs,
strings.Join(argNames[:len(argNames)-1], ", "),
argNames[len(argNames)-1])
callArgs = ", " + idVarArgs + "..."
}
}
g.p(`return %s.mock.ctrl.RecordCallWithMethodType(%s.mock, "%s", reflect.TypeOf((*%s)(nil).%s)%s)`, idRecv, idRecv, m.Name, mockType, m.Name, callArgs)
g.out()
g.p("}")
return nil
}
func (g *generator) getArgNames(m *model.Method) []string {
argNames := make([]string, len(m.In))
for i, p := range m.In {
name := p.Name
if name == "" || name == "_" {
name = fmt.Sprintf("arg%d", i)
}
argNames[i] = name
}
if m.Variadic != nil {
name := m.Variadic.Name
if name == "" {
name = fmt.Sprintf("arg%d", len(m.In))
}
argNames = append(argNames, name)
}
return argNames
}
func (g *generator) getArgTypes(m *model.Method, pkgOverride string) []string {
argTypes := make([]string, len(m.In))
for i, p := range m.In {
argTypes[i] = p.Type.String(g.packageMap, pkgOverride)
}
if m.Variadic != nil {
argTypes = append(argTypes, "..."+m.Variadic.Type.String(g.packageMap, pkgOverride))
}
return argTypes
}
type identifierAllocator map[string]struct{}
func newIdentifierAllocator(taken []string) identifierAllocator {
a := make(identifierAllocator, len(taken))
for _, s := range taken {
a[s] = struct{}{}
}
return a
}
func (o identifierAllocator) allocateIdentifier(want string) string {
id := want
for i := 2; ; i++ {
if _, ok := o[id]; !ok {
o[id] = struct{}{}
return id
}
id = want + "_" + strconv.Itoa(i)
}
}
// Output returns the generator's output, formatted in the standard Go style.
func (g *generator) Output() []byte {
src, err := toolsimports.Process(g.destination, g.buf.Bytes(), nil)
if err != nil {
log.Fatalf("Failed to format generated source code: %s\n%s", err, g.buf.String())
}
return src
}
// createPackageMap returns a map of import path to package name
// for specified importPaths.
func createPackageMap(importPaths []string) map[string]string {
var pkg struct {
Name string
ImportPath string
}
pkgMap := make(map[string]string)
b := bytes.NewBuffer(nil)
args := []string{"list", "-json"}
args = append(args, importPaths...)
cmd := exec.Command("go", args...)
cmd.Stdout = b
cmd.Run()
dec := json.NewDecoder(b)
for dec.More() {
err := dec.Decode(&pkg)
if err != nil {
log.Printf("failed to decode 'go list' output: %v", err)
continue
}
pkgMap[pkg.ImportPath] = pkg.Name
}
return pkgMap
}
func printVersion() {
if version != "" {
fmt.Printf("v%s\nCommit: %s\nDate: %s\n", version, commit, date)
} else {
printModuleVersion()
}
}
// parseImportPackage get package import path via source file
// an alternative implementation is to use:
// cfg := &packages.Config{Mode: packages.NeedName, Tests: true, Dir: srcDir}
// pkgs, err := packages.Load(cfg, "file="+source)
// However, it will call "go list" and slow down the performance
func parsePackageImport(srcDir string) (string, error) {
moduleMode := os.Getenv("GO111MODULE")
// trying to find the module
if moduleMode != "off" {
currentDir := srcDir
for {
dat, err := ioutil.ReadFile(filepath.Join(currentDir, "go.mod"))
if os.IsNotExist(err) {
if currentDir == filepath.Dir(currentDir) {
// at the root
break
}
currentDir = filepath.Dir(currentDir)
continue
} else if err != nil {
return "", err
}
modulePath := modfile.ModulePath(dat)
return filepath.ToSlash(filepath.Join(modulePath, strings.TrimPrefix(srcDir, currentDir))), nil
}
}
// fall back to GOPATH mode
goPaths := os.Getenv("GOPATH")
if goPaths == "" {
return "", fmt.Errorf("GOPATH is not set")
}
goPathList := strings.Split(goPaths, string(os.PathListSeparator))
for _, goPath := range goPathList {
sourceRoot := filepath.Join(goPath, "src") + string(os.PathSeparator)
if strings.HasPrefix(srcDir, sourceRoot) {
return filepath.ToSlash(strings.TrimPrefix(srcDir, sourceRoot)), nil
}
}
return "", errOutsideGoPath
}

495
vendor/github.com/golang/mock/mockgen/model/model.go generated vendored Normal file
View File

@ -0,0 +1,495 @@
// Copyright 2012 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package model contains the data model necessary for generating mock implementations.
package model
import (
"encoding/gob"
"fmt"
"io"
"reflect"
"strings"
)
// pkgPath is the importable path for package model
const pkgPath = "github.com/golang/mock/mockgen/model"
// Package is a Go package. It may be a subset.
type Package struct {
Name string
PkgPath string
Interfaces []*Interface
DotImports []string
}
// Print writes the package name and its exported interfaces.
func (pkg *Package) Print(w io.Writer) {
_, _ = fmt.Fprintf(w, "package %s\n", pkg.Name)
for _, intf := range pkg.Interfaces {
intf.Print(w)
}
}
// Imports returns the imports needed by the Package as a set of import paths.
func (pkg *Package) Imports() map[string]bool {
im := make(map[string]bool)
for _, intf := range pkg.Interfaces {
intf.addImports(im)
}
return im
}
// Interface is a Go interface.
type Interface struct {
Name string
Methods []*Method
}
// Print writes the interface name and its methods.
func (intf *Interface) Print(w io.Writer) {
_, _ = fmt.Fprintf(w, "interface %s\n", intf.Name)
for _, m := range intf.Methods {
m.Print(w)
}
}
func (intf *Interface) addImports(im map[string]bool) {
for _, m := range intf.Methods {
m.addImports(im)
}
}
// AddMethod adds a new method, de-duplicating by method name.
func (intf *Interface) AddMethod(m *Method) {
for _, me := range intf.Methods {
if me.Name == m.Name {
return
}
}
intf.Methods = append(intf.Methods, m)
}
// Method is a single method of an interface.
type Method struct {
Name string
In, Out []*Parameter
Variadic *Parameter // may be nil
}
// Print writes the method name and its signature.
func (m *Method) Print(w io.Writer) {
_, _ = fmt.Fprintf(w, " - method %s\n", m.Name)
if len(m.In) > 0 {
_, _ = fmt.Fprintf(w, " in:\n")
for _, p := range m.In {
p.Print(w)
}
}
if m.Variadic != nil {
_, _ = fmt.Fprintf(w, " ...:\n")
m.Variadic.Print(w)
}
if len(m.Out) > 0 {
_, _ = fmt.Fprintf(w, " out:\n")
for _, p := range m.Out {
p.Print(w)
}
}
}
func (m *Method) addImports(im map[string]bool) {
for _, p := range m.In {
p.Type.addImports(im)
}
if m.Variadic != nil {
m.Variadic.Type.addImports(im)
}
for _, p := range m.Out {
p.Type.addImports(im)
}
}
// Parameter is an argument or return parameter of a method.
type Parameter struct {
Name string // may be empty
Type Type
}
// Print writes a method parameter.
func (p *Parameter) Print(w io.Writer) {
n := p.Name
if n == "" {
n = `""`
}
_, _ = fmt.Fprintf(w, " - %v: %v\n", n, p.Type.String(nil, ""))
}
// Type is a Go type.
type Type interface {
String(pm map[string]string, pkgOverride string) string
addImports(im map[string]bool)
}
func init() {
gob.Register(&ArrayType{})
gob.Register(&ChanType{})
gob.Register(&FuncType{})
gob.Register(&MapType{})
gob.Register(&NamedType{})
gob.Register(&PointerType{})
// Call gob.RegisterName to make sure it has the consistent name registered
// for both gob decoder and encoder.
//
// For a non-pointer type, gob.Register will try to get package full path by
// calling rt.PkgPath() for a name to register. If your project has vendor
// directory, it is possible that PkgPath will get a path like this:
// ../../../vendor/github.com/golang/mock/mockgen/model
gob.RegisterName(pkgPath+".PredeclaredType", PredeclaredType(""))
}
// ArrayType is an array or slice type.
type ArrayType struct {
Len int // -1 for slices, >= 0 for arrays
Type Type
}
func (at *ArrayType) String(pm map[string]string, pkgOverride string) string {
s := "[]"
if at.Len > -1 {
s = fmt.Sprintf("[%d]", at.Len)
}
return s + at.Type.String(pm, pkgOverride)
}
func (at *ArrayType) addImports(im map[string]bool) { at.Type.addImports(im) }
// ChanType is a channel type.
type ChanType struct {
Dir ChanDir // 0, 1 or 2
Type Type
}
func (ct *ChanType) String(pm map[string]string, pkgOverride string) string {
s := ct.Type.String(pm, pkgOverride)
if ct.Dir == RecvDir {
return "<-chan " + s
}
if ct.Dir == SendDir {
return "chan<- " + s
}
return "chan " + s
}
func (ct *ChanType) addImports(im map[string]bool) { ct.Type.addImports(im) }
// ChanDir is a channel direction.
type ChanDir int
// Constants for channel directions.
const (
RecvDir ChanDir = 1
SendDir ChanDir = 2
)
// FuncType is a function type.
type FuncType struct {
In, Out []*Parameter
Variadic *Parameter // may be nil
}
func (ft *FuncType) String(pm map[string]string, pkgOverride string) string {
args := make([]string, len(ft.In))
for i, p := range ft.In {
args[i] = p.Type.String(pm, pkgOverride)
}
if ft.Variadic != nil {
args = append(args, "..."+ft.Variadic.Type.String(pm, pkgOverride))
}
rets := make([]string, len(ft.Out))
for i, p := range ft.Out {
rets[i] = p.Type.String(pm, pkgOverride)
}
retString := strings.Join(rets, ", ")
if nOut := len(ft.Out); nOut == 1 {
retString = " " + retString
} else if nOut > 1 {
retString = " (" + retString + ")"
}
return "func(" + strings.Join(args, ", ") + ")" + retString
}
func (ft *FuncType) addImports(im map[string]bool) {
for _, p := range ft.In {
p.Type.addImports(im)
}
if ft.Variadic != nil {
ft.Variadic.Type.addImports(im)
}
for _, p := range ft.Out {
p.Type.addImports(im)
}
}
// MapType is a map type.
type MapType struct {
Key, Value Type
}
func (mt *MapType) String(pm map[string]string, pkgOverride string) string {
return "map[" + mt.Key.String(pm, pkgOverride) + "]" + mt.Value.String(pm, pkgOverride)
}
func (mt *MapType) addImports(im map[string]bool) {
mt.Key.addImports(im)
mt.Value.addImports(im)
}
// NamedType is an exported type in a package.
type NamedType struct {
Package string // may be empty
Type string
}
func (nt *NamedType) String(pm map[string]string, pkgOverride string) string {
if pkgOverride == nt.Package {
return nt.Type
}
prefix := pm[nt.Package]
if prefix != "" {
return prefix + "." + nt.Type
}
return nt.Type
}
func (nt *NamedType) addImports(im map[string]bool) {
if nt.Package != "" {
im[nt.Package] = true
}
}
// PointerType is a pointer to another type.
type PointerType struct {
Type Type
}
func (pt *PointerType) String(pm map[string]string, pkgOverride string) string {
return "*" + pt.Type.String(pm, pkgOverride)
}
func (pt *PointerType) addImports(im map[string]bool) { pt.Type.addImports(im) }
// PredeclaredType is a predeclared type such as "int".
type PredeclaredType string
func (pt PredeclaredType) String(map[string]string, string) string { return string(pt) }
func (pt PredeclaredType) addImports(map[string]bool) {}
// The following code is intended to be called by the program generated by ../reflect.go.
// InterfaceFromInterfaceType returns a pointer to an interface for the
// given reflection interface type.
func InterfaceFromInterfaceType(it reflect.Type) (*Interface, error) {
if it.Kind() != reflect.Interface {
return nil, fmt.Errorf("%v is not an interface", it)
}
intf := &Interface{}
for i := 0; i < it.NumMethod(); i++ {
mt := it.Method(i)
// TODO: need to skip unexported methods? or just raise an error?
m := &Method{
Name: mt.Name,
}
var err error
m.In, m.Variadic, m.Out, err = funcArgsFromType(mt.Type)
if err != nil {
return nil, err
}
intf.AddMethod(m)
}
return intf, nil
}
// t's Kind must be a reflect.Func.
func funcArgsFromType(t reflect.Type) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) {
nin := t.NumIn()
if t.IsVariadic() {
nin--
}
var p *Parameter
for i := 0; i < nin; i++ {
p, err = parameterFromType(t.In(i))
if err != nil {
return
}
in = append(in, p)
}
if t.IsVariadic() {
p, err = parameterFromType(t.In(nin).Elem())
if err != nil {
return
}
variadic = p
}
for i := 0; i < t.NumOut(); i++ {
p, err = parameterFromType(t.Out(i))
if err != nil {
return
}
out = append(out, p)
}
return
}
func parameterFromType(t reflect.Type) (*Parameter, error) {
tt, err := typeFromType(t)
if err != nil {
return nil, err
}
return &Parameter{Type: tt}, nil
}
var errorType = reflect.TypeOf((*error)(nil)).Elem()
var byteType = reflect.TypeOf(byte(0))
func typeFromType(t reflect.Type) (Type, error) {
// Hack workaround for https://golang.org/issue/3853.
// This explicit check should not be necessary.
if t == byteType {
return PredeclaredType("byte"), nil
}
if imp := t.PkgPath(); imp != "" {
return &NamedType{
Package: impPath(imp),
Type: t.Name(),
}, nil
}
// only unnamed or predeclared types after here
// Lots of types have element types. Let's do the parsing and error checking for all of them.
var elemType Type
switch t.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice:
var err error
elemType, err = typeFromType(t.Elem())
if err != nil {
return nil, err
}
}
switch t.Kind() {
case reflect.Array:
return &ArrayType{
Len: t.Len(),
Type: elemType,
}, nil
case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String:
return PredeclaredType(t.Kind().String()), nil
case reflect.Chan:
var dir ChanDir
switch t.ChanDir() {
case reflect.RecvDir:
dir = RecvDir
case reflect.SendDir:
dir = SendDir
}
return &ChanType{
Dir: dir,
Type: elemType,
}, nil
case reflect.Func:
in, variadic, out, err := funcArgsFromType(t)
if err != nil {
return nil, err
}
return &FuncType{
In: in,
Out: out,
Variadic: variadic,
}, nil
case reflect.Interface:
// Two special interfaces.
if t.NumMethod() == 0 {
return PredeclaredType("interface{}"), nil
}
if t == errorType {
return PredeclaredType("error"), nil
}
case reflect.Map:
kt, err := typeFromType(t.Key())
if err != nil {
return nil, err
}
return &MapType{
Key: kt,
Value: elemType,
}, nil
case reflect.Ptr:
return &PointerType{
Type: elemType,
}, nil
case reflect.Slice:
return &ArrayType{
Len: -1,
Type: elemType,
}, nil
case reflect.Struct:
if t.NumField() == 0 {
return PredeclaredType("struct{}"), nil
}
}
// TODO: Struct, UnsafePointer
return nil, fmt.Errorf("can't yet turn %v (%v) into a model.Type", t, t.Kind())
}
// impPath sanitizes the package path returned by `PkgPath` method of a reflect Type so that
// it is importable. PkgPath might return a path that includes "vendor". These paths do not
// compile, so we need to remove everything up to and including "/vendor/".
// See https://github.com/golang/go/issues/12019.
func impPath(imp string) string {
if strings.HasPrefix(imp, "vendor/") {
imp = "/" + imp
}
if i := strings.LastIndex(imp, "/vendor/"); i != -1 {
imp = imp[i+len("/vendor/"):]
}
return imp
}
// ErrorInterface represent built-in error interface.
var ErrorInterface = Interface{
Name: "error",
Methods: []*Method{
{
Name: "Error",
Out: []*Parameter{
{
Name: "",
Type: PredeclaredType("string"),
},
},
},
},
}

644
vendor/github.com/golang/mock/mockgen/parse.go generated vendored Normal file
View File

@ -0,0 +1,644 @@
// Copyright 2012 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
// This file contains the model construction by parsing source files.
import (
"errors"
"flag"
"fmt"
"go/ast"
"go/build"
"go/importer"
"go/parser"
"go/token"
"go/types"
"io/ioutil"
"log"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/golang/mock/mockgen/model"
)
var (
imports = flag.String("imports", "", "(source mode) Comma-separated name=path pairs of explicit imports to use.")
auxFiles = flag.String("aux_files", "", "(source mode) Comma-separated pkg=path pairs of auxiliary Go source files.")
)
// sourceMode generates mocks via source file.
func sourceMode(source string) (*model.Package, error) {
srcDir, err := filepath.Abs(filepath.Dir(source))
if err != nil {
return nil, fmt.Errorf("failed getting source directory: %v", err)
}
packageImport, err := parsePackageImport(srcDir)
if err != nil {
return nil, err
}
fs := token.NewFileSet()
file, err := parser.ParseFile(fs, source, nil, 0)
if err != nil {
return nil, fmt.Errorf("failed parsing source file %v: %v", source, err)
}
p := &fileParser{
fileSet: fs,
imports: make(map[string]importedPackage),
importedInterfaces: make(map[string]map[string]*ast.InterfaceType),
auxInterfaces: make(map[string]map[string]*ast.InterfaceType),
srcDir: srcDir,
}
// Handle -imports.
dotImports := make(map[string]bool)
if *imports != "" {
for _, kv := range strings.Split(*imports, ",") {
eq := strings.Index(kv, "=")
k, v := kv[:eq], kv[eq+1:]
if k == "." {
dotImports[v] = true
} else {
p.imports[k] = importedPkg{path: v}
}
}
}
// Handle -aux_files.
if err := p.parseAuxFiles(*auxFiles); err != nil {
return nil, err
}
p.addAuxInterfacesFromFile(packageImport, file) // this file
pkg, err := p.parseFile(packageImport, file)
if err != nil {
return nil, err
}
for pkgPath := range dotImports {
pkg.DotImports = append(pkg.DotImports, pkgPath)
}
return pkg, nil
}
type importedPackage interface {
Path() string
Parser() *fileParser
}
type importedPkg struct {
path string
parser *fileParser
}
func (i importedPkg) Path() string { return i.path }
func (i importedPkg) Parser() *fileParser { return i.parser }
// duplicateImport is a bit of a misnomer. Currently the parser can't
// handle cases of multi-file packages importing different packages
// under the same name. Often these imports would not be problematic,
// so this type lets us defer raising an error unless the package name
// is actually used.
type duplicateImport struct {
name string
duplicates []string
}
func (d duplicateImport) Error() string {
return fmt.Sprintf("%q is ambiguous because of duplicate imports: %v", d.name, d.duplicates)
}
func (d duplicateImport) Path() string { log.Fatal(d.Error()); return "" }
func (d duplicateImport) Parser() *fileParser { log.Fatal(d.Error()); return nil }
type fileParser struct {
fileSet *token.FileSet
imports map[string]importedPackage // package name => imported package
importedInterfaces map[string]map[string]*ast.InterfaceType // package (or "") => name => interface
auxFiles []*ast.File
auxInterfaces map[string]map[string]*ast.InterfaceType // package (or "") => name => interface
srcDir string
}
func (p *fileParser) errorf(pos token.Pos, format string, args ...interface{}) error {
ps := p.fileSet.Position(pos)
format = "%s:%d:%d: " + format
args = append([]interface{}{ps.Filename, ps.Line, ps.Column}, args...)
return fmt.Errorf(format, args...)
}
func (p *fileParser) parseAuxFiles(auxFiles string) error {
auxFiles = strings.TrimSpace(auxFiles)
if auxFiles == "" {
return nil
}
for _, kv := range strings.Split(auxFiles, ",") {
parts := strings.SplitN(kv, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("bad aux file spec: %v", kv)
}
pkg, fpath := parts[0], parts[1]
file, err := parser.ParseFile(p.fileSet, fpath, nil, 0)
if err != nil {
return err
}
p.auxFiles = append(p.auxFiles, file)
p.addAuxInterfacesFromFile(pkg, file)
}
return nil
}
func (p *fileParser) addAuxInterfacesFromFile(pkg string, file *ast.File) {
if _, ok := p.auxInterfaces[pkg]; !ok {
p.auxInterfaces[pkg] = make(map[string]*ast.InterfaceType)
}
for ni := range iterInterfaces(file) {
p.auxInterfaces[pkg][ni.name.Name] = ni.it
}
}
// parseFile loads all file imports and auxiliary files import into the
// fileParser, parses all file interfaces and returns package model.
func (p *fileParser) parseFile(importPath string, file *ast.File) (*model.Package, error) {
allImports, dotImports := importsOfFile(file)
// Don't stomp imports provided by -imports. Those should take precedence.
for pkg, pkgI := range allImports {
if _, ok := p.imports[pkg]; !ok {
p.imports[pkg] = pkgI
}
}
// Add imports from auxiliary files, which might be needed for embedded interfaces.
// Don't stomp any other imports.
for _, f := range p.auxFiles {
auxImports, _ := importsOfFile(f)
for pkg, pkgI := range auxImports {
if _, ok := p.imports[pkg]; !ok {
p.imports[pkg] = pkgI
}
}
}
var is []*model.Interface
for ni := range iterInterfaces(file) {
i, err := p.parseInterface(ni.name.String(), importPath, ni.it)
if err != nil {
return nil, err
}
is = append(is, i)
}
return &model.Package{
Name: file.Name.String(),
PkgPath: importPath,
Interfaces: is,
DotImports: dotImports,
}, nil
}
// parsePackage loads package specified by path, parses it and returns
// a new fileParser with the parsed imports and interfaces.
func (p *fileParser) parsePackage(path string) (*fileParser, error) {
newP := &fileParser{
fileSet: token.NewFileSet(),
imports: make(map[string]importedPackage),
importedInterfaces: make(map[string]map[string]*ast.InterfaceType),
auxInterfaces: make(map[string]map[string]*ast.InterfaceType),
srcDir: p.srcDir,
}
var pkgs map[string]*ast.Package
if imp, err := build.Import(path, newP.srcDir, build.FindOnly); err != nil {
return nil, err
} else if pkgs, err = parser.ParseDir(newP.fileSet, imp.Dir, nil, 0); err != nil {
return nil, err
}
for _, pkg := range pkgs {
file := ast.MergePackageFiles(pkg, ast.FilterFuncDuplicates|ast.FilterUnassociatedComments|ast.FilterImportDuplicates)
if _, ok := newP.importedInterfaces[path]; !ok {
newP.importedInterfaces[path] = make(map[string]*ast.InterfaceType)
}
for ni := range iterInterfaces(file) {
newP.importedInterfaces[path][ni.name.Name] = ni.it
}
imports, _ := importsOfFile(file)
for pkgName, pkgI := range imports {
newP.imports[pkgName] = pkgI
}
}
return newP, nil
}
func (p *fileParser) parseInterface(name, pkg string, it *ast.InterfaceType) (*model.Interface, error) {
iface := &model.Interface{Name: name}
for _, field := range it.Methods.List {
switch v := field.Type.(type) {
case *ast.FuncType:
if nn := len(field.Names); nn != 1 {
return nil, fmt.Errorf("expected one name for interface %v, got %d", iface.Name, nn)
}
m := &model.Method{
Name: field.Names[0].String(),
}
var err error
m.In, m.Variadic, m.Out, err = p.parseFunc(pkg, v)
if err != nil {
return nil, err
}
iface.AddMethod(m)
case *ast.Ident:
// Embedded interface in this package.
embeddedIfaceType := p.auxInterfaces[pkg][v.String()]
if embeddedIfaceType == nil {
embeddedIfaceType = p.importedInterfaces[pkg][v.String()]
}
var embeddedIface *model.Interface
if embeddedIfaceType != nil {
var err error
embeddedIface, err = p.parseInterface(v.String(), pkg, embeddedIfaceType)
if err != nil {
return nil, err
}
} else {
// This is built-in error interface.
if v.String() == model.ErrorInterface.Name {
embeddedIface = &model.ErrorInterface
} else {
return nil, p.errorf(v.Pos(), "unknown embedded interface %s", v.String())
}
}
// Copy the methods.
for _, m := range embeddedIface.Methods {
iface.AddMethod(m)
}
case *ast.SelectorExpr:
// Embedded interface in another package.
filePkg, sel := v.X.(*ast.Ident).String(), v.Sel.String()
embeddedPkg, ok := p.imports[filePkg]
if !ok {
return nil, p.errorf(v.X.Pos(), "unknown package %s", filePkg)
}
var embeddedIface *model.Interface
var err error
embeddedIfaceType := p.auxInterfaces[filePkg][sel]
if embeddedIfaceType != nil {
embeddedIface, err = p.parseInterface(sel, filePkg, embeddedIfaceType)
if err != nil {
return nil, err
}
} else {
path := embeddedPkg.Path()
parser := embeddedPkg.Parser()
if parser == nil {
ip, err := p.parsePackage(path)
if err != nil {
return nil, p.errorf(v.Pos(), "could not parse package %s: %v", path, err)
}
parser = ip
p.imports[filePkg] = importedPkg{
path: embeddedPkg.Path(),
parser: parser,
}
}
if embeddedIfaceType = parser.importedInterfaces[path][sel]; embeddedIfaceType == nil {
return nil, p.errorf(v.Pos(), "unknown embedded interface %s.%s", path, sel)
}
embeddedIface, err = parser.parseInterface(sel, path, embeddedIfaceType)
if err != nil {
return nil, err
}
}
// Copy the methods.
// TODO: apply shadowing rules.
for _, m := range embeddedIface.Methods {
iface.AddMethod(m)
}
default:
return nil, fmt.Errorf("don't know how to mock method of type %T", field.Type)
}
}
return iface, nil
}
func (p *fileParser) parseFunc(pkg string, f *ast.FuncType) (inParam []*model.Parameter, variadic *model.Parameter, outParam []*model.Parameter, err error) {
if f.Params != nil {
regParams := f.Params.List
if isVariadic(f) {
n := len(regParams)
varParams := regParams[n-1:]
regParams = regParams[:n-1]
vp, err := p.parseFieldList(pkg, varParams)
if err != nil {
return nil, nil, nil, p.errorf(varParams[0].Pos(), "failed parsing variadic argument: %v", err)
}
variadic = vp[0]
}
inParam, err = p.parseFieldList(pkg, regParams)
if err != nil {
return nil, nil, nil, p.errorf(f.Pos(), "failed parsing arguments: %v", err)
}
}
if f.Results != nil {
outParam, err = p.parseFieldList(pkg, f.Results.List)
if err != nil {
return nil, nil, nil, p.errorf(f.Pos(), "failed parsing returns: %v", err)
}
}
return
}
func (p *fileParser) parseFieldList(pkg string, fields []*ast.Field) ([]*model.Parameter, error) {
nf := 0
for _, f := range fields {
nn := len(f.Names)
if nn == 0 {
nn = 1 // anonymous parameter
}
nf += nn
}
if nf == 0 {
return nil, nil
}
ps := make([]*model.Parameter, nf)
i := 0 // destination index
for _, f := range fields {
t, err := p.parseType(pkg, f.Type)
if err != nil {
return nil, err
}
if len(f.Names) == 0 {
// anonymous arg
ps[i] = &model.Parameter{Type: t}
i++
continue
}
for _, name := range f.Names {
ps[i] = &model.Parameter{Name: name.Name, Type: t}
i++
}
}
return ps, nil
}
func (p *fileParser) parseType(pkg string, typ ast.Expr) (model.Type, error) {
switch v := typ.(type) {
case *ast.ArrayType:
ln := -1
if v.Len != nil {
var value string
switch val := v.Len.(type) {
case (*ast.BasicLit):
value = val.Value
case (*ast.Ident):
// when the length is a const defined locally
value = val.Obj.Decl.(*ast.ValueSpec).Values[0].(*ast.BasicLit).Value
case (*ast.SelectorExpr):
// when the length is a const defined in an external package
usedPkg, err := importer.Default().Import(fmt.Sprintf("%s", val.X))
if err != nil {
return nil, p.errorf(v.Len.Pos(), "unknown package in array length: %v", err)
}
ev, err := types.Eval(token.NewFileSet(), usedPkg, token.NoPos, val.Sel.Name)
if err != nil {
return nil, p.errorf(v.Len.Pos(), "unknown constant in array length: %v", err)
}
value = ev.Value.String()
}
x, err := strconv.Atoi(value)
if err != nil {
return nil, p.errorf(v.Len.Pos(), "bad array size: %v", err)
}
ln = x
}
t, err := p.parseType(pkg, v.Elt)
if err != nil {
return nil, err
}
return &model.ArrayType{Len: ln, Type: t}, nil
case *ast.ChanType:
t, err := p.parseType(pkg, v.Value)
if err != nil {
return nil, err
}
var dir model.ChanDir
if v.Dir == ast.SEND {
dir = model.SendDir
}
if v.Dir == ast.RECV {
dir = model.RecvDir
}
return &model.ChanType{Dir: dir, Type: t}, nil
case *ast.Ellipsis:
// assume we're parsing a variadic argument
return p.parseType(pkg, v.Elt)
case *ast.FuncType:
in, variadic, out, err := p.parseFunc(pkg, v)
if err != nil {
return nil, err
}
return &model.FuncType{In: in, Out: out, Variadic: variadic}, nil
case *ast.Ident:
if v.IsExported() {
// `pkg` may be an aliased imported pkg
// if so, patch the import w/ the fully qualified import
maybeImportedPkg, ok := p.imports[pkg]
if ok {
pkg = maybeImportedPkg.Path()
}
// assume type in this package
return &model.NamedType{Package: pkg, Type: v.Name}, nil
}
// assume predeclared type
return model.PredeclaredType(v.Name), nil
case *ast.InterfaceType:
if v.Methods != nil && len(v.Methods.List) > 0 {
return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed interface types")
}
return model.PredeclaredType("interface{}"), nil
case *ast.MapType:
key, err := p.parseType(pkg, v.Key)
if err != nil {
return nil, err
}
value, err := p.parseType(pkg, v.Value)
if err != nil {
return nil, err
}
return &model.MapType{Key: key, Value: value}, nil
case *ast.SelectorExpr:
pkgName := v.X.(*ast.Ident).String()
pkg, ok := p.imports[pkgName]
if !ok {
return nil, p.errorf(v.Pos(), "unknown package %q", pkgName)
}
return &model.NamedType{Package: pkg.Path(), Type: v.Sel.String()}, nil
case *ast.StarExpr:
t, err := p.parseType(pkg, v.X)
if err != nil {
return nil, err
}
return &model.PointerType{Type: t}, nil
case *ast.StructType:
if v.Fields != nil && len(v.Fields.List) > 0 {
return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed struct types")
}
return model.PredeclaredType("struct{}"), nil
case *ast.ParenExpr:
return p.parseType(pkg, v.X)
}
return nil, fmt.Errorf("don't know how to parse type %T", typ)
}
// importsOfFile returns a map of package name to import path
// of the imports in file.
func importsOfFile(file *ast.File) (normalImports map[string]importedPackage, dotImports []string) {
var importPaths []string
for _, is := range file.Imports {
if is.Name != nil {
continue
}
importPath := is.Path.Value[1 : len(is.Path.Value)-1] // remove quotes
importPaths = append(importPaths, importPath)
}
packagesName := createPackageMap(importPaths)
normalImports = make(map[string]importedPackage)
dotImports = make([]string, 0)
for _, is := range file.Imports {
var pkgName string
importPath := is.Path.Value[1 : len(is.Path.Value)-1] // remove quotes
if is.Name != nil {
// Named imports are always certain.
if is.Name.Name == "_" {
continue
}
pkgName = is.Name.Name
} else {
pkg, ok := packagesName[importPath]
if !ok {
// Fallback to import path suffix. Note that this is uncertain.
_, last := path.Split(importPath)
// If the last path component has dots, the first dot-delimited
// field is used as the name.
pkgName = strings.SplitN(last, ".", 2)[0]
} else {
pkgName = pkg
}
}
if pkgName == "." {
dotImports = append(dotImports, importPath)
} else {
if pkg, ok := normalImports[pkgName]; ok {
switch p := pkg.(type) {
case duplicateImport:
normalImports[pkgName] = duplicateImport{
name: p.name,
duplicates: append([]string{importPath}, p.duplicates...),
}
case importedPkg:
normalImports[pkgName] = duplicateImport{
name: pkgName,
duplicates: []string{p.path, importPath},
}
}
} else {
normalImports[pkgName] = importedPkg{path: importPath}
}
}
}
return
}
type namedInterface struct {
name *ast.Ident
it *ast.InterfaceType
}
// Create an iterator over all interfaces in file.
func iterInterfaces(file *ast.File) <-chan namedInterface {
ch := make(chan namedInterface)
go func() {
for _, decl := range file.Decls {
gd, ok := decl.(*ast.GenDecl)
if !ok || gd.Tok != token.TYPE {
continue
}
for _, spec := range gd.Specs {
ts, ok := spec.(*ast.TypeSpec)
if !ok {
continue
}
it, ok := ts.Type.(*ast.InterfaceType)
if !ok {
continue
}
ch <- namedInterface{ts.Name, it}
}
}
close(ch)
}()
return ch
}
// isVariadic returns whether the function is variadic.
func isVariadic(f *ast.FuncType) bool {
nargs := len(f.Params.List)
if nargs == 0 {
return false
}
_, ok := f.Params.List[nargs-1].Type.(*ast.Ellipsis)
return ok
}
// packageNameOfDir get package import path via dir
func packageNameOfDir(srcDir string) (string, error) {
files, err := ioutil.ReadDir(srcDir)
if err != nil {
log.Fatal(err)
}
var goFilePath string
for _, file := range files {
if !file.IsDir() && strings.HasSuffix(file.Name(), ".go") {
goFilePath = file.Name()
break
}
}
if goFilePath == "" {
return "", fmt.Errorf("go source file not found %s", srcDir)
}
packageImport, err := parsePackageImport(srcDir)
if err != nil {
return "", err
}
return packageImport, nil
}
var errOutsideGoPath = errors.New("source directory is outside GOPATH")

256
vendor/github.com/golang/mock/mockgen/reflect.go generated vendored Normal file
View File

@ -0,0 +1,256 @@
// Copyright 2012 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
// This file contains the model construction by reflection.
import (
"bytes"
"encoding/gob"
"flag"
"fmt"
"go/build"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"text/template"
"github.com/golang/mock/mockgen/model"
)
var (
progOnly = flag.Bool("prog_only", false, "(reflect mode) Only generate the reflection program; write it to stdout and exit.")
execOnly = flag.String("exec_only", "", "(reflect mode) If set, execute this reflection program.")
buildFlags = flag.String("build_flags", "", "(reflect mode) Additional flags for go build.")
)
// reflectMode generates mocks via reflection on an interface.
func reflectMode(importPath string, symbols []string) (*model.Package, error) {
if *execOnly != "" {
return run(*execOnly)
}
program, err := writeProgram(importPath, symbols)
if err != nil {
return nil, err
}
if *progOnly {
if _, err := os.Stdout.Write(program); err != nil {
return nil, err
}
os.Exit(0)
}
wd, _ := os.Getwd()
// Try to run the reflection program in the current working directory.
if p, err := runInDir(program, wd); err == nil {
return p, nil
}
// Try to run the program in the same directory as the input package.
if p, err := build.Import(importPath, wd, build.FindOnly); err == nil {
dir := p.Dir
if p, err := runInDir(program, dir); err == nil {
return p, nil
}
}
// Try to run it in a standard temp directory.
return runInDir(program, "")
}
func writeProgram(importPath string, symbols []string) ([]byte, error) {
var program bytes.Buffer
data := reflectData{
ImportPath: importPath,
Symbols: symbols,
}
if err := reflectProgram.Execute(&program, &data); err != nil {
return nil, err
}
return program.Bytes(), nil
}
// run the given program and parse the output as a model.Package.
func run(program string) (*model.Package, error) {
f, err := ioutil.TempFile("", "")
if err != nil {
return nil, err
}
filename := f.Name()
defer os.Remove(filename)
if err := f.Close(); err != nil {
return nil, err
}
// Run the program.
cmd := exec.Command(program, "-output", filename)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return nil, err
}
f, err = os.Open(filename)
if err != nil {
return nil, err
}
// Process output.
var pkg model.Package
if err := gob.NewDecoder(f).Decode(&pkg); err != nil {
return nil, err
}
if err := f.Close(); err != nil {
return nil, err
}
return &pkg, nil
}
// runInDir writes the given program into the given dir, runs it there, and
// parses the output as a model.Package.
func runInDir(program []byte, dir string) (*model.Package, error) {
// We use TempDir instead of TempFile so we can control the filename.
tmpDir, err := ioutil.TempDir(dir, "gomock_reflect_")
if err != nil {
return nil, err
}
defer func() {
if err := os.RemoveAll(tmpDir); err != nil {
log.Printf("failed to remove temp directory: %s", err)
}
}()
const progSource = "prog.go"
var progBinary = "prog.bin"
if runtime.GOOS == "windows" {
// Windows won't execute a program unless it has a ".exe" suffix.
progBinary += ".exe"
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, progSource), program, 0600); err != nil {
return nil, err
}
cmdArgs := []string{}
cmdArgs = append(cmdArgs, "build")
if *buildFlags != "" {
cmdArgs = append(cmdArgs, strings.Split(*buildFlags, " ")...)
}
cmdArgs = append(cmdArgs, "-o", progBinary, progSource)
// Build the program.
buf := bytes.NewBuffer(nil)
cmd := exec.Command("go", cmdArgs...)
cmd.Dir = tmpDir
cmd.Stdout = os.Stdout
cmd.Stderr = io.MultiWriter(os.Stderr, buf)
if err := cmd.Run(); err != nil {
sErr := buf.String()
if strings.Contains(sErr, `cannot find package "."`) &&
strings.Contains(sErr, "github.com/golang/mock/mockgen/model") {
fmt.Fprint(os.Stderr, "Please reference the steps in the README to fix this error:\n\thttps://github.com/golang/mock#reflect-vendoring-error.")
return nil, err
}
return nil, err
}
return run(filepath.Join(tmpDir, progBinary))
}
type reflectData struct {
ImportPath string
Symbols []string
}
// This program reflects on an interface value, and prints the
// gob encoding of a model.Package to standard output.
// JSON doesn't work because of the model.Type interface.
var reflectProgram = template.Must(template.New("program").Parse(`
package main
import (
"encoding/gob"
"flag"
"fmt"
"os"
"path"
"reflect"
"github.com/golang/mock/mockgen/model"
pkg_ {{printf "%q" .ImportPath}}
)
var output = flag.String("output", "", "The output file name, or empty to use stdout.")
func main() {
flag.Parse()
its := []struct{
sym string
typ reflect.Type
}{
{{range .Symbols}}
{ {{printf "%q" .}}, reflect.TypeOf((*pkg_.{{.}})(nil)).Elem()},
{{end}}
}
pkg := &model.Package{
// NOTE: This behaves contrary to documented behaviour if the
// package name is not the final component of the import path.
// The reflect package doesn't expose the package name, though.
Name: path.Base({{printf "%q" .ImportPath}}),
}
for _, it := range its {
intf, err := model.InterfaceFromInterfaceType(it.typ)
if err != nil {
fmt.Fprintf(os.Stderr, "Reflection: %v\n", err)
os.Exit(1)
}
intf.Name = it.sym
pkg.Interfaces = append(pkg.Interfaces, intf)
}
outfile := os.Stdout
if len(*output) != 0 {
var err error
outfile, err = os.Create(*output)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to open output file %q", *output)
}
defer func() {
if err := outfile.Close(); err != nil {
fmt.Fprintf(os.Stderr, "failed to close output file %q", *output)
os.Exit(1)
}
}()
}
if err := gob.NewEncoder(outfile).Encode(pkg); err != nil {
fmt.Fprintf(os.Stderr, "gob encode: %v\n", err)
os.Exit(1)
}
}
`))

26
vendor/github.com/golang/mock/mockgen/version.1.11.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.12
package main
import (
"log"
)
func printModuleVersion() {
log.Printf("No version information is available for Mockgen compiled with " +
"version 1.11")
}

35
vendor/github.com/golang/mock/mockgen/version.1.12.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build go1.12
package main
import (
"fmt"
"log"
"runtime/debug"
)
func printModuleVersion() {
if bi, exists := debug.ReadBuildInfo(); exists {
fmt.Println(bi.Main.Version)
} else {
log.Printf("No version information found. Make sure to use " +
"GO111MODULE=on when running 'go get' in order to use specific " +
"version of the binary.")
}
}

View File

@ -16,6 +16,30 @@ This package provides various compression algorithms.
# changelog
* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
* zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
* s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
* zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
* huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
* s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
* s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
* s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
* Jan 21st, 2023 (v1.15.15)
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
@ -600,6 +624,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
# license

View File

@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error {
// If the buffer is over-read an error is returned.
func (s *Scratch) decompress() error {
br := &s.bits
br.init(s.br.unread())
if err := br.init(s.br.unread()); err != nil {
return err
}
var s1, s2 decoder
// Initialize and decode first state and symbol.

View File

@ -60,6 +60,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits
}
// encFourSymbols adds up to 32 bits from four symbols.
// It will not check if there is space for them,
// so the caller must ensure that b has been flushed recently.
func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
bitsA := encA.nBits
bitsB := bitsA + encB.nBits
bitsC := bitsB + encC.nBits
bitsD := bitsC + encD.nBits
combined := uint64(encA.val) |
(uint64(encB.val) << (bitsA & 63)) |
(uint64(encC.val) << (bitsB & 63)) |
(uint64(encD.val) << (bitsC & 63))
b.bitContainer |= combined << (b.nBits & 63)
b.nBits += bitsD
}
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {

View File

@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
tmp := src[n : n+4]
// tmp should be len 4
bw.flush32()
bw.encTwoSymbols(cTable, tmp[3], tmp[2])
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
}
} else {
for ; n >= 0; n -= 4 {

View File

@ -9,6 +9,7 @@ import (
"encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
@ -442,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
}
}
var err error
if debugDecoder {
println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
}
huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil {
println("reading huffman table:", err)

View File

@ -473,7 +473,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
return b.encodeLits(b.literals, rawAllLits)
}
// We want some difference to at least account for the headers.
saved := b.size - len(b.literals) - (b.size >> 5)
saved := b.size - len(b.literals) - (b.size >> 6)
if saved < 16 {
if org == nil {
return errIncompressible
@ -779,10 +779,13 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
}
b.output = wr.out
// Maybe even add a bigger margin.
if len(b.output)-3-bhOffset >= b.size {
// Maybe even add a bigger margin.
// Discard and encode as raw block.
b.output = b.encodeRawTo(b.output[:bhOffset], org)
b.popOffsets()
b.litEnc.Reuse = huff0.ReusePolicyNone
return errIncompressible
return nil
}
// Size is output minus block header.

View File

@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
func (b *byteBuf) readByte() (byte, error) {
bb := *b
if len(bb) < 1 {
return 0, nil
return 0, io.ErrUnexpectedEOF
}
r := bb[0]
*b = bb[1:]
@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
}
func (r *readerWrapper) readByte() (byte, error) {
n2, err := r.r.Read(r.tmp[:1])
n2, err := io.ReadFull(r.r, r.tmp[:1])
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF

View File

@ -455,12 +455,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
}
if len(next.b) > 0 {
n, err := d.current.crc.Write(next.b)
if err == nil {
if n != len(next.b) {
d.current.err = io.ErrShortWrite
}
}
d.current.crc.Write(next.b)
}
if next.err == nil && next.d != nil && next.d.hasCRC {
got := uint32(d.current.crc.Sum64())

View File

@ -32,10 +32,9 @@ type match struct {
length int32
rep int32
est int32
_ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
}
const highScore = 25000
const highScore = maxMatchLen * 8
// estBits will estimate output bits from predefined tables.
func (m *match) estBits(bitsPerByte int32) {
@ -160,7 +159,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
@ -174,7 +172,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
_ = addLiterals
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
@ -189,53 +186,96 @@ encodeLoop:
panic("offset0 was 0")
}
bestOf := func(a, b *match) *match {
if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
return a
}
return b
}
const goodEnough = 100
const goodEnough = 250
cv := load6432(src, s)
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
// Set m to a match at offset if it looks like that will improve compression.
improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
return match{s: s, est: highScore}
return
}
if debugAsserts {
if offset <= 0 {
panic(offset)
}
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
}
}
m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
m.estBits(bitsPerByte)
return m
// Try to quick reject if we already have a long match.
if m.length > 16 {
left := len(src) - int(m.s+m.length)
// If we are too close to the end, keep as is.
if left <= 0 {
return
}
checkLen := m.length - (s - m.s) - 8
if left > 2 && checkLen > 4 {
// Check 4 bytes, 4 bytes from the end of the current match.
a := load3232(src, offset+checkLen)
b := load3232(src, s+checkLen)
if a != b {
return
}
}
}
l := 4 + e.matchlen(s+4, offset+4, src)
if rep < 0 {
// Extend candidate match backwards as far as possible.
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
s--
offset--
l++
}
}
cand := match{offset: offset, s: s, length: l, rep: rep}
cand.estBits(bitsPerByte)
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
*m = cand
}
}
m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))
best := match{s: s, est: highScore}
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
if canRepeat && best.length < goodEnough {
cv32 := uint32(cv >> 8)
spp := s + 1
m1 := matchAt(spp-offset1, spp, cv32, 1)
m2 := matchAt(spp-offset2, spp, cv32, 2)
m3 := matchAt(spp-offset3, spp, cv32, 3)
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
if best.length > 0 {
cv32 = uint32(cv >> 24)
spp += 2
m1 := matchAt(spp-offset1, spp, cv32, 1)
m2 := matchAt(spp-offset2, spp, cv32, 2)
m3 := matchAt(spp-offset3, spp, cv32, 3)
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
if s == nextEmit {
// Check repeats straight after a match.
improve(&best, s-offset2, s, uint32(cv), 1|4)
improve(&best, s-offset3, s, uint32(cv), 2|4)
if offset1 > 1 {
improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
}
}
// If either no match or a non-repeat match, check at + 1
if best.rep <= 0 {
cv32 := uint32(cv >> 8)
spp := s + 1
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
if best.rep < 0 {
cv32 = uint32(cv >> 24)
spp += 2
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
}
}
}
// Load next and check...
@ -250,47 +290,45 @@ encodeLoop:
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
continue
}
s++
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
cv = load6432(src, s)
cv2 := load6432(src, s+1)
cv = load6432(src, s+1)
cv2 := load6432(src, s+2)
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1
m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
// Long at s+1, s+2
m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
if false {
// Short at s+3.
// Too often worse...
m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
best = bestOf(best, &m)
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
}
// See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL]
// Start check at a fixed offset to allow for a few mismatches.
// For this compression level 2 yields the best results.
const skipBeginning = 2
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
bestEnd := bestOf(best, &m)
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
bestEnd = bestOf(bestEnd, &m)
// Start check at a fixed offset to allow for a few mismatches.
// For this compression level 2 yields the best results.
// We cannot do this if we have already indexed this position.
const skipBeginning = 2
if best.s > s-skipBeginning {
// See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL]
if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
}
}
best = bestEnd
}
}
}
@ -303,51 +341,34 @@ encodeLoop:
// We have a match, we can store the forward value
if best.rep > 0 {
s = best.s
var seq seq
seq.matchLen = uint32(best.length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := best.s
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
if debugAsserts && s <= nextEmit {
panic("s <= nextEmit")
}
repIndex := best.offset
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
addLiterals(&seq, best.s)
// rep 0
seq.offset = uint32(best.rep)
// Repeat. If bit 4 is set, this is a non-lit repeat.
seq.offset = uint32(best.rep & 3)
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
index0 := s
// Index old s + 1 -> s - 1
index0 := s + 1
s = best.s + best.length
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
// Index skipped...
off := index0 + e.cur
for index0 < s-1 {
for index0 < s {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@ -357,17 +378,19 @@ encodeLoop:
index0++
}
switch best.rep {
case 2:
case 2, 4 | 1:
offset1, offset2 = offset2, offset1
case 3:
case 3, 4 | 2:
offset1, offset2, offset3 = offset3, offset1, offset2
case 4 | 3:
offset1, offset2, offset3 = offset1-1, offset1, offset2
}
cv = load6432(src, s)
continue
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
index0 := s + 1
s = best.s
t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2
@ -380,22 +403,9 @@ encodeLoop:
panic("invalid offset")
}
// Extend the n-byte match as long as possible.
l := best.length
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
l++
}
// Write our sequence
var seq seq
l := best.length
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
@ -412,10 +422,8 @@ encodeLoop:
break encodeLoop
}
// Index match start+1 (long) -> s - 1
index0 := s - l + 1
// every entry
for index0 < s-1 {
// Index old s + 1 -> s - 1
for index0 < s {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@ -424,50 +432,6 @@ encodeLoop:
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
index0++
}
cv = load6432(src, s)
if !canRepeat {
continue
}
// Check offset 2
for {
o2 := s - offset2
if load3232(src, o2) != uint32(cv) {
// Do regular search
break
}
// Store this, since we have it.
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
l := 4 + e.matchlen(s+4, o2+4, src)
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
nextEmit = s
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
// Finished
break encodeLoop
}
cv = load6432(src, s)
}
}
if int(nextEmit) < len(src) {

View File

@ -277,23 +277,9 @@ func (e *Encoder) nextBlock(final bool) error {
s.eofWritten = true
}
err := errIncompressible
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
blk.encodeRaw(src)
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
case nil:
default:
s.err = err
return err
s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if s.err != nil {
return s.err
}
_, s.err = s.w.Write(blk.output)
s.nWritten += int64(len(blk.output))
@ -343,22 +329,8 @@ func (e *Encoder) nextBlock(final bool) error {
}
s.wWg.Done()
}()
err := errIncompressible
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
blk.encodeRaw(src)
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
case nil:
default:
s.writeErr = err
s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if s.writeErr != nil {
return
}
_, s.writeErr = s.w.Write(blk.output)
@ -568,25 +540,15 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
err := errIncompressible
oldout := blk.output
if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
// Output directly to dst
blk.output = dst
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
// Output directly to dst
blk.output = dst
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
dst = blk.encodeRawTo(dst, src)
case nil:
dst = blk.output
default:
err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if err != nil {
panic(err)
}
dst = blk.output
blk.output = oldout
} else {
enc.Reset(e.o.dict, false)
@ -605,25 +567,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if len(src) == 0 {
blk.last = true
}
err := errIncompressible
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
dst = blk.encodeRawTo(dst, todo)
blk.popOffsets()
case nil:
dst = append(dst, blk.output...)
default:
err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
if err != nil {
panic(err)
}
dst = append(dst, blk.output...)
blk.reset(nil)
}
}

View File

@ -39,7 +39,7 @@ func (o *encoderOptions) setDefault() {
blockSize: maxCompressedBlockSize,
windowSize: 8 << 20,
level: SpeedDefault,
allLitEntropy: true,
allLitEntropy: false,
lowMem: false,
}
}
@ -238,7 +238,7 @@ func WithEncoderLevel(l EncoderLevel) EOption {
}
}
if !o.customALEntropy {
o.allLitEntropy = l > SpeedFastest
o.allLitEntropy = l > SpeedDefault
}
return nil

View File

@ -293,13 +293,9 @@ func (d *frameDec) next(block *blockDec) error {
return nil
}
// checkCRC will check the checksum if the frame has one.
// checkCRC will check the checksum, assuming the frame has one.
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
func (d *frameDec) checkCRC() error {
if !d.HasCheckSum {
return nil
}
// We can overwrite upper tmp now
buf, err := d.rawInput.readSmall(4)
if err != nil {
@ -307,10 +303,6 @@ func (d *frameDec) checkCRC() error {
return err
}
if d.o.ignoreChecksum {
return nil
}
want := binary.LittleEndian.Uint32(buf[:4])
got := uint32(d.crc.Sum64())
@ -326,17 +318,13 @@ func (d *frameDec) checkCRC() error {
return nil
}
// consumeCRC reads the checksum data if the frame has one.
// consumeCRC skips over the checksum, assuming the frame has one.
func (d *frameDec) consumeCRC() error {
if d.HasCheckSum {
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
}
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
}
return nil
return err
}
// runDecoder will run the decoder for the remainder of the frame.
@ -415,15 +403,8 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if d.o.ignoreChecksum {
err = d.consumeCRC()
} else {
var n int
n, err = d.crc.Write(dst[crcStart:])
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
} else {
err = d.checkCRC()
}
}
d.crc.Write(dst[crcStart:])
err = d.checkCRC()
}
}
}

View File

@ -236,9 +236,12 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
maxBlockSize = s.windowSize
}
if debugDecoder {
println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
}
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
printf("reading sequence %d, exceeded available data\n", seqs-i)
printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
return io.ErrUnexpectedEOF
}
var ll, mo, ml int
@ -314,9 +317,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
}
size := ll + ml + len(out)
if size-startSize > maxBlockSize {
if size-startSize == 424242 {
panic("here")
}
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
if size > cap(out) {
@ -427,8 +427,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
}
}
// Check if space for literals
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}

View File

@ -5,6 +5,7 @@ package zstd
import (
"fmt"
"io"
"github.com/klauspost/compress/internal/cpuinfo"
)
@ -134,6 +135,9 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
ctx.ll, ctx.litRemain+ctx.ll)
case errorOverread:
return true, io.ErrUnexpectedEOF
case errorNotEnoughSpace:
size := ctx.outPosition + ctx.ll + ctx.ml
if debugDecoder {
@ -148,7 +152,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
err := br.close()
if err != nil {
@ -203,6 +206,9 @@ const errorNotEnoughLiterals = 4
// error reported when capacity of `out` is too small
const errorNotEnoughSpace = 5
// error reported when bits are overread.
const errorOverread = 6
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
@ -248,6 +254,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
litRemain: len(s.literals),
}
if debugDecoder {
println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
}
s.seqSize = 0
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
var errCode int
@ -278,6 +288,8 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
case errorNotEnoughLiterals:
ll := ctx.seqs[i].ll
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
case errorOverread:
return io.ErrUnexpectedEOF
}
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
@ -292,6 +304,9 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
if s.seqSize > maxBlockSize {
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
if debugDecoder {
println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)

View File

@ -38,7 +38,7 @@ sequenceDecs_decode_amd64_main_loop:
sequenceDecs_decode_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decode_amd64_fill_end
JLE sequenceDecs_decode_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_amd64_fill_end
SHLQ $0x08, DX
@ -49,6 +49,10 @@ sequenceDecs_decode_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decode_amd64_fill_byte_by_byte
sequenceDecs_decode_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decode_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -105,7 +109,7 @@ sequenceDecs_decode_amd64_ml_update_zero:
sequenceDecs_decode_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decode_amd64_fill_2_end
JLE sequenceDecs_decode_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_amd64_fill_2_end
SHLQ $0x08, DX
@ -116,6 +120,10 @@ sequenceDecs_decode_amd64_fill_2_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
sequenceDecs_decode_amd64_fill_2_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decode_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@ -320,6 +328,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
@ -356,7 +369,7 @@ sequenceDecs_decode_56_amd64_main_loop:
sequenceDecs_decode_56_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decode_56_amd64_fill_end
JLE sequenceDecs_decode_56_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_56_amd64_fill_end
SHLQ $0x08, DX
@ -367,6 +380,10 @@ sequenceDecs_decode_56_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
sequenceDecs_decode_56_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decode_56_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -613,6 +630,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
@ -649,7 +671,7 @@ sequenceDecs_decode_bmi2_main_loop:
sequenceDecs_decode_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decode_bmi2_fill_end
JLE sequenceDecs_decode_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_bmi2_fill_end
SHLQ $0x08, AX
@ -660,6 +682,10 @@ sequenceDecs_decode_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
sequenceDecs_decode_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decode_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -700,7 +726,7 @@ sequenceDecs_decode_bmi2_fill_end:
sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decode_bmi2_fill_2_end
JLE sequenceDecs_decode_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_bmi2_fill_2_end
SHLQ $0x08, AX
@ -711,6 +737,10 @@ sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
sequenceDecs_decode_bmi2_fill_2_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decode_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@ -889,6 +919,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
@ -925,7 +960,7 @@ sequenceDecs_decode_56_bmi2_main_loop:
sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decode_56_bmi2_fill_end
JLE sequenceDecs_decode_56_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_56_bmi2_fill_end
SHLQ $0x08, AX
@ -936,6 +971,10 @@ sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
sequenceDecs_decode_56_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decode_56_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -1140,6 +1179,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Requires: SSE
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
@ -1804,7 +1848,7 @@ sequenceDecs_decodeSync_amd64_main_loop:
sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_amd64_fill_end
JLE sequenceDecs_decodeSync_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_amd64_fill_end
SHLQ $0x08, DX
@ -1815,6 +1859,10 @@ sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
sequenceDecs_decodeSync_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -1871,7 +1919,7 @@ sequenceDecs_decodeSync_amd64_ml_update_zero:
sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_amd64_fill_2_end
JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_amd64_fill_2_end
SHLQ $0x08, DX
@ -1882,6 +1930,10 @@ sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
sequenceDecs_decodeSync_amd64_fill_2_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@ -2291,6 +2343,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@ -2356,7 +2413,7 @@ sequenceDecs_decodeSync_bmi2_main_loop:
sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_bmi2_fill_end
JLE sequenceDecs_decodeSync_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_bmi2_fill_end
SHLQ $0x08, AX
@ -2367,6 +2424,10 @@ sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
sequenceDecs_decodeSync_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -2407,7 +2468,7 @@ sequenceDecs_decodeSync_bmi2_fill_end:
sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
SHLQ $0x08, AX
@ -2418,6 +2479,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
sequenceDecs_decodeSync_bmi2_fill_2_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@ -2801,6 +2866,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@ -2866,7 +2936,7 @@ sequenceDecs_decodeSync_safe_amd64_main_loop:
sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
SHLQ $0x08, DX
@ -2877,6 +2947,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
sequenceDecs_decodeSync_safe_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -2933,7 +3007,7 @@ sequenceDecs_decodeSync_safe_amd64_ml_update_zero:
sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
SHLQ $0x08, DX
@ -2944,6 +3018,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@ -3455,6 +3533,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@ -3520,7 +3603,7 @@ sequenceDecs_decodeSync_safe_bmi2_main_loop:
sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
SHLQ $0x08, AX
@ -3531,6 +3614,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
sequenceDecs_decodeSync_safe_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -3571,7 +3658,7 @@ sequenceDecs_decodeSync_safe_bmi2_fill_end:
sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
SHLQ $0x08, AX
@ -3582,6 +3669,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@ -4067,6 +4158,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX

View File

@ -128,11 +128,11 @@ func matchLen(a, b []byte) (n int) {
}
func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[i:])
return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
}
func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:])
return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
}
type byter interface {

View File

@ -9,7 +9,7 @@ the last thing you want from your Logging library (again...).
This does not mean Logrus is dead. Logrus will continue to be maintained for
security, (backwards compatible) bug fixes, and performance (where we are
limited by the interface).
limited by the interface).
I believe Logrus' biggest contribution is to have played a part in today's
widespread use of structured logging in Golang. There doesn't seem to be a
@ -43,7 +43,7 @@ plain text):
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
```json
```text
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
@ -99,7 +99,7 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr
```
Note that this does add measurable overhead - the cost will depend on the version of Go, but is
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
environment via benchmarks:
environment via benchmarks:
```
go test -bench=.*CallerTracing
```
@ -317,6 +317,8 @@ log.SetLevel(log.InfoLevel)
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging).
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are

View File

@ -4,6 +4,7 @@ import (
"bufio"
"io"
"runtime"
"strings"
)
// Writer at INFO level. See WriterLevel for details.
@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
return NewEntry(logger).WriterLevel(level)
}
// Writer returns an io.Writer that writes to the logger at the info log level
func (entry *Entry) Writer() *io.PipeWriter {
return entry.WriterLevel(InfoLevel)
}
// WriterLevel returns an io.Writer that writes to the logger at the given log level
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
// Determine which log function to use based on the specified log level
switch level {
case TraceLevel:
printFunc = entry.Trace
@ -48,23 +52,51 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
printFunc = entry.Print
}
// Start a new goroutine to scan the input and write it to the logger using the specified print function.
// It splits the input into chunks of up to 64KB to avoid buffer overflows.
go entry.writerScanner(reader, printFunc)
// Set a finalizer function to close the writer when it is garbage collected
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
// writerScanner scans the input from the reader and writes it to the logger
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
printFunc(scanner.Text())
// Set the buffer size to the maximum token size to avoid buffer overflows
scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize)
// Define a split function to split the input into chunks of up to 64KB
chunkSize := bufio.MaxScanTokenSize // 64KB
splitFunc := func(data []byte, atEOF bool) (int, []byte, error) {
if len(data) >= chunkSize {
return chunkSize, data[:chunkSize], nil
}
return bufio.ScanLines(data, atEOF)
}
// Use the custom split function to split the input
scanner.Split(splitFunc)
// Scan the input and write it to the logger using the specified print function
for scanner.Scan() {
printFunc(strings.TrimRight(scanner.Text(), "\r\n"))
}
// If there was an error while scanning the input, log an error
if err := scanner.Err(); err != nil {
entry.Errorf("Error while reading from Writer: %s", err)
}
// Close the reader when we are done
reader.Close()
}
// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}

View File

@ -352,9 +352,9 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
// Greater asserts that the first element is greater than the second
//
// assert.Greater(t, 2, 1)
// assert.Greater(t, float64(2), float64(1))
// assert.Greater(t, "b", "a")
// assert.Greater(t, 2, 1)
// assert.Greater(t, float64(2), float64(1))
// assert.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -364,10 +364,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
// GreaterOrEqual asserts that the first element is greater than or equal to the second
//
// assert.GreaterOrEqual(t, 2, 1)
// assert.GreaterOrEqual(t, 2, 2)
// assert.GreaterOrEqual(t, "b", "a")
// assert.GreaterOrEqual(t, "b", "b")
// assert.GreaterOrEqual(t, 2, 1)
// assert.GreaterOrEqual(t, 2, 2)
// assert.GreaterOrEqual(t, "b", "a")
// assert.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -377,9 +377,9 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
// Less asserts that the first element is less than the second
//
// assert.Less(t, 1, 2)
// assert.Less(t, float64(1), float64(2))
// assert.Less(t, "a", "b")
// assert.Less(t, 1, 2)
// assert.Less(t, float64(1), float64(2))
// assert.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -389,10 +389,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
// LessOrEqual asserts that the first element is less than or equal to the second
//
// assert.LessOrEqual(t, 1, 2)
// assert.LessOrEqual(t, 2, 2)
// assert.LessOrEqual(t, "a", "b")
// assert.LessOrEqual(t, "b", "b")
// assert.LessOrEqual(t, 1, 2)
// assert.LessOrEqual(t, 2, 2)
// assert.LessOrEqual(t, "a", "b")
// assert.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -402,8 +402,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
// Positive asserts that the specified element is positive
//
// assert.Positive(t, 1)
// assert.Positive(t, 1.23)
// assert.Positive(t, 1)
// assert.Positive(t, 1.23)
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -414,8 +414,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
// Negative asserts that the specified element is negative
//
// assert.Negative(t, -1)
// assert.Negative(t, -1.23)
// assert.Negative(t, -1)
// assert.Negative(t, -1.23)
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()

View File

@ -22,9 +22,9 @@ func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bo
// Containsf asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -56,7 +56,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// assert.Emptyf(t, obj, "error message %s", "formatted")
// assert.Emptyf(t, obj, "error message %s", "formatted")
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -66,7 +66,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) boo
// Equalf asserts that two objects are equal.
//
// assert.Equalf(t, 123, 123, "error message %s", "formatted")
// assert.Equalf(t, 123, 123, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
@ -81,8 +81,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar
// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
// actualObj, err := SomeFunction()
// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -90,10 +90,27 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args
return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
}
// EqualExportedValuesf asserts that the types of two objects are equal and their public
// fields are also equal. This is useful for comparing structs that have private fields
// that could potentially differ.
//
// type S struct {
// Exported int
// notExported int
// }
// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -103,10 +120,10 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
// if assert.Errorf(t, err, "error message %s", "formatted") {
// assert.Equal(t, expectedErrorf, err)
// }
// actualObj, err := SomeFunction()
// if assert.Errorf(t, err, "error message %s", "formatted") {
// assert.Equal(t, expectedErrorf, err)
// }
func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -126,8 +143,8 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
// actualObj, err := SomeFunction()
// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -147,7 +164,7 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface
// Eventuallyf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -155,9 +172,34 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick
return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
}
// EventuallyWithTf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick. In contrast to Eventually,
// it supplies a CollectT to the condition function, so that the condition
// function can use the CollectT to call other assertions.
// The condition is considered "met" if no errors are raised in a tick.
// The supplied CollectT collects all errors from one tick (if there are any).
// If the condition is not met before waitFor, the collected errors of
// the last tick are copied to t.
//
// externalValue := false
// go func() {
// time.Sleep(8*time.Second)
// externalValue = true
// }()
// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
}
// Exactlyf asserts that two objects are equal in value and type.
//
// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -183,7 +225,7 @@ func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}
// Falsef asserts that the specified value is false.
//
// assert.Falsef(t, myBool, "error message %s", "formatted")
// assert.Falsef(t, myBool, "error message %s", "formatted")
func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -202,9 +244,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool
// Greaterf asserts that the first element is greater than the second
//
// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -214,10 +256,10 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in
// GreaterOrEqualf asserts that the first element is greater than or equal to the second
//
// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -228,7 +270,7 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg
// HTTPBodyContainsf asserts that a specified handler returns a
// body that contains a string.
//
// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@ -241,7 +283,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url
// HTTPBodyNotContainsf asserts that a specified handler returns a
// body that does not contain a string.
//
// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@ -253,7 +295,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u
// HTTPErrorf asserts that a specified handler returns an error status code.
//
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@ -265,7 +307,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string,
// HTTPRedirectf asserts that a specified handler returns a redirect status code.
//
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@ -277,7 +319,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri
// HTTPStatusCodef asserts that a specified handler returns a specified status code.
//
// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
@ -289,7 +331,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st
// HTTPSuccessf asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@ -301,7 +343,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin
// Implementsf asserts that an object is implemented by the specified interface.
//
// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -311,7 +353,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms
// InDeltaf asserts that the two numerals are within delta of each other.
//
// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -353,9 +395,9 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil
// IsDecreasingf asserts that the collection is decreasing
//
// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -365,9 +407,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface
// IsIncreasingf asserts that the collection is increasing
//
// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -377,9 +419,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface
// IsNonDecreasingf asserts that the collection is not decreasing
//
// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -389,9 +431,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf
// IsNonIncreasingf asserts that the collection is not increasing
//
// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -409,7 +451,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin
// JSONEqf asserts that two JSON strings are equivalent.
//
// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -420,7 +462,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int
// Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept.
//
// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -430,9 +472,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf
// Lessf asserts that the first element is less than the second
//
// assert.Lessf(t, 1, 2, "error message %s", "formatted")
// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
// assert.Lessf(t, "a", "b", "error message %s", "formatted")
// assert.Lessf(t, 1, 2, "error message %s", "formatted")
// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
// assert.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -442,10 +484,10 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter
// LessOrEqualf asserts that the first element is less than or equal to the second
//
// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -455,8 +497,8 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
// Negativef asserts that the specified element is negative
//
// assert.Negativef(t, -1, "error message %s", "formatted")
// assert.Negativef(t, -1.23, "error message %s", "formatted")
// assert.Negativef(t, -1, "error message %s", "formatted")
// assert.Negativef(t, -1.23, "error message %s", "formatted")
func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -467,7 +509,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -477,7 +519,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.
// Nilf asserts that the specified object is nil.
//
// assert.Nilf(t, err, "error message %s", "formatted")
// assert.Nilf(t, err, "error message %s", "formatted")
func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -496,10 +538,10 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool
// NoErrorf asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
// if assert.NoErrorf(t, err, "error message %s", "formatted") {
// assert.Equal(t, expectedObj, actualObj)
// }
// actualObj, err := SomeFunction()
// if assert.NoErrorf(t, err, "error message %s", "formatted") {
// assert.Equal(t, expectedObj, actualObj)
// }
func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -519,9 +561,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) boo
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -532,9 +574,9 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1])
// }
// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1])
// }
func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -544,7 +586,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{})
// NotEqualf asserts that the specified values are NOT equal.
//
// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
@ -557,7 +599,7 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string,
// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
//
// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -576,7 +618,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf
// NotNilf asserts that the specified object is not nil.
//
// assert.NotNilf(t, err, "error message %s", "formatted")
// assert.NotNilf(t, err, "error message %s", "formatted")
func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -586,7 +628,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bo
// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
//
// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -596,8 +638,8 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo
// NotRegexpf asserts that a specified regexp does not match a string.
//
// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -607,7 +649,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ..
// NotSamef asserts that two pointers do not reference the same object.
//
// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@ -621,7 +663,7 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
// NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -639,7 +681,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
// Panicsf asserts that the code inside the specified PanicTestFunc panics.
//
// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -651,7 +693,7 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -662,7 +704,7 @@ func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string,
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -672,8 +714,8 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str
// Positivef asserts that the specified element is positive
//
// assert.Positivef(t, 1, "error message %s", "formatted")
// assert.Positivef(t, 1.23, "error message %s", "formatted")
// assert.Positivef(t, 1, "error message %s", "formatted")
// assert.Positivef(t, 1.23, "error message %s", "formatted")
func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -683,8 +725,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool
// Regexpf asserts that a specified regexp matches a string.
//
// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -694,7 +736,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in
// Samef asserts that two pointers reference the same object.
//
// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@ -708,7 +750,7 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
// Subsetf asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
//
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -718,7 +760,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args
// Truef asserts that the specified value is true.
//
// assert.Truef(t, myBool, "error message %s", "formatted")
// assert.Truef(t, myBool, "error message %s", "formatted")
func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -728,7 +770,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
// WithinDurationf asserts that the two times are within duration delta of each other.
//
// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -738,7 +780,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim
// WithinRangef asserts that a time is within a time range (inclusive).
//
// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()

File diff suppressed because it is too large Load Diff

View File

@ -46,36 +46,36 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT
// IsIncreasing asserts that the collection is increasing
//
// assert.IsIncreasing(t, []int{1, 2, 3})
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
// assert.IsIncreasing(t, []int{1, 2, 3})
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// IsNonIncreasing asserts that the collection is not increasing
//
// assert.IsNonIncreasing(t, []int{2, 1, 1})
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
// assert.IsNonIncreasing(t, []int{2, 1, 1})
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// IsDecreasing asserts that the collection is decreasing
//
// assert.IsDecreasing(t, []int{2, 1, 0})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
// assert.IsDecreasing(t, []int{2, 1, 0})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// IsNonDecreasing asserts that the collection is not decreasing
//
// assert.IsNonDecreasing(t, []int{1, 1, 2})
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
// assert.IsNonDecreasing(t, []int{1, 1, 2})
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}

View File

@ -75,6 +75,77 @@ func ObjectsAreEqual(expected, actual interface{}) bool {
return bytes.Equal(exp, act)
}
// copyExportedFields iterates downward through nested data structures and creates a copy
// that only contains the exported struct fields.
func copyExportedFields(expected interface{}) interface{} {
if isNil(expected) {
return expected
}
expectedType := reflect.TypeOf(expected)
expectedKind := expectedType.Kind()
expectedValue := reflect.ValueOf(expected)
switch expectedKind {
case reflect.Struct:
result := reflect.New(expectedType).Elem()
for i := 0; i < expectedType.NumField(); i++ {
field := expectedType.Field(i)
isExported := field.IsExported()
if isExported {
fieldValue := expectedValue.Field(i)
if isNil(fieldValue) || isNil(fieldValue.Interface()) {
continue
}
newValue := copyExportedFields(fieldValue.Interface())
result.Field(i).Set(reflect.ValueOf(newValue))
}
}
return result.Interface()
case reflect.Ptr:
result := reflect.New(expectedType.Elem())
unexportedRemoved := copyExportedFields(expectedValue.Elem().Interface())
result.Elem().Set(reflect.ValueOf(unexportedRemoved))
return result.Interface()
case reflect.Array, reflect.Slice:
result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
for i := 0; i < expectedValue.Len(); i++ {
index := expectedValue.Index(i)
if isNil(index) {
continue
}
unexportedRemoved := copyExportedFields(index.Interface())
result.Index(i).Set(reflect.ValueOf(unexportedRemoved))
}
return result.Interface()
case reflect.Map:
result := reflect.MakeMap(expectedType)
for _, k := range expectedValue.MapKeys() {
index := expectedValue.MapIndex(k)
unexportedRemoved := copyExportedFields(index.Interface())
result.SetMapIndex(k, reflect.ValueOf(unexportedRemoved))
}
return result.Interface()
default:
return expected
}
}
// ObjectsExportedFieldsAreEqual determines if the exported (public) fields of two objects are
// considered equal. This comparison of only exported fields is applied recursively to nested data
// structures.
//
// This function does no assertion of any kind.
func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool {
expectedCleaned := copyExportedFields(expected)
actualCleaned := copyExportedFields(actual)
return ObjectsAreEqualValues(expectedCleaned, actualCleaned)
}
// ObjectsAreEqualValues gets whether two objects are equal, or if their
// values are equal.
func ObjectsAreEqualValues(expected, actual interface{}) bool {
@ -271,7 +342,7 @@ type labeledContent struct {
// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
//
// \t{{label}}:{{align_spaces}}\t{{content}}\n
// \t{{label}}:{{align_spaces}}\t{{content}}\n
//
// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
@ -294,7 +365,7 @@ func labeledOutput(content ...labeledContent) string {
// Implements asserts that an object is implemented by the specified interface.
//
// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -326,7 +397,7 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs
// Equal asserts that two objects are equal.
//
// assert.Equal(t, 123, 123)
// assert.Equal(t, 123, 123)
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
@ -367,7 +438,7 @@ func validateEqualArgs(expected, actual interface{}) error {
// Same asserts that two pointers reference the same object.
//
// assert.Same(t, ptr1, ptr2)
// assert.Same(t, ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@ -387,7 +458,7 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
// NotSame asserts that two pointers do not reference the same object.
//
// assert.NotSame(t, ptr1, ptr2)
// assert.NotSame(t, ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@ -455,7 +526,7 @@ func truncatingFormat(data interface{}) string {
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
// assert.EqualValues(t, uint32(123), int32(123))
// assert.EqualValues(t, uint32(123), int32(123))
func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -473,9 +544,53 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa
}
// EqualExportedValues asserts that the types of two objects are equal and their public
// fields are also equal. This is useful for comparing structs that have private fields
// that could potentially differ.
//
// type S struct {
// Exported int
// notExported int
// }
// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true
// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false
func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
aType := reflect.TypeOf(expected)
bType := reflect.TypeOf(actual)
if aType != bType {
return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
}
if aType.Kind() != reflect.Struct {
return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
}
if bType.Kind() != reflect.Struct {
return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
}
expected = copyExportedFields(expected)
actual = copyExportedFields(actual)
if !ObjectsAreEqualValues(expected, actual) {
diff := diff(expected, actual)
expected, actual = formatUnequalValues(expected, actual)
return Fail(t, fmt.Sprintf("Not equal (comparing only exported fields): \n"+
"expected: %s\n"+
"actual : %s%s", expected, actual, diff), msgAndArgs...)
}
return true
}
// Exactly asserts that two objects are equal in value and type.
//
// assert.Exactly(t, int32(123), int64(123))
// assert.Exactly(t, int32(123), int64(123))
func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -494,7 +609,7 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
// NotNil asserts that the specified object is not nil.
//
// assert.NotNil(t, err)
// assert.NotNil(t, err)
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
if !isNil(object) {
return true
@ -540,7 +655,7 @@ func isNil(object interface{}) bool {
// Nil asserts that the specified object is nil.
//
// assert.Nil(t, err)
// assert.Nil(t, err)
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
if isNil(object) {
return true
@ -583,7 +698,7 @@ func isEmpty(object interface{}) bool {
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// assert.Empty(t, obj)
// assert.Empty(t, obj)
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
pass := isEmpty(object)
if !pass {
@ -600,9 +715,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// if assert.NotEmpty(t, obj) {
// assert.Equal(t, "two", obj[1])
// }
// if assert.NotEmpty(t, obj) {
// assert.Equal(t, "two", obj[1])
// }
func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
pass := !isEmpty(object)
if !pass {
@ -631,7 +746,7 @@ func getLen(x interface{}) (ok bool, length int) {
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
// assert.Len(t, mySlice, 3)
// assert.Len(t, mySlice, 3)
func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -649,7 +764,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
// True asserts that the specified value is true.
//
// assert.True(t, myBool)
// assert.True(t, myBool)
func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
if !value {
if h, ok := t.(tHelper); ok {
@ -664,7 +779,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
// False asserts that the specified value is false.
//
// assert.False(t, myBool)
// assert.False(t, myBool)
func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
if value {
if h, ok := t.(tHelper); ok {
@ -679,7 +794,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
// NotEqual asserts that the specified values are NOT equal.
//
// assert.NotEqual(t, obj1, obj2)
// assert.NotEqual(t, obj1, obj2)
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
@ -702,7 +817,7 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{
// NotEqualValues asserts that two objects are not equal even when converted to the same type
//
// assert.NotEqualValues(t, obj1, obj2)
// assert.NotEqualValues(t, obj1, obj2)
func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -761,9 +876,9 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) {
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
// assert.Contains(t, "Hello World", "World")
// assert.Contains(t, ["Hello", "World"], "World")
// assert.Contains(t, {"Hello": "World"}, "Hello")
// assert.Contains(t, "Hello World", "World")
// assert.Contains(t, ["Hello", "World"], "World")
// assert.Contains(t, {"Hello": "World"}, "Hello")
func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -784,9 +899,9 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
// assert.NotContains(t, "Hello World", "Earth")
// assert.NotContains(t, ["Hello", "World"], "Earth")
// assert.NotContains(t, {"Hello": "World"}, "Earth")
// assert.NotContains(t, "Hello World", "Earth")
// assert.NotContains(t, ["Hello", "World"], "Earth")
// assert.NotContains(t, {"Hello": "World"}, "Earth")
func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -794,10 +909,10 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
ok, found := containsElement(s, contains)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
}
if found {
return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
return Fail(t, fmt.Sprintf("%#v should not contain %#v", s, contains), msgAndArgs...)
}
return true
@ -807,7 +922,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
// Subset asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
//
// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -863,7 +978,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
// NotSubset asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1048,7 +1163,7 @@ func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
// assert.Panics(t, func(){ GoCrazy() })
// assert.Panics(t, func(){ GoCrazy() })
func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1064,7 +1179,7 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1085,7 +1200,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1105,7 +1220,7 @@ func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs .
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
// assert.NotPanics(t, func(){ RemainCalm() })
// assert.NotPanics(t, func(){ RemainCalm() })
func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1120,7 +1235,7 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
// WithinDuration asserts that the two times are within duration delta of each other.
//
// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1136,7 +1251,7 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration,
// WithinRange asserts that a time is within a time range (inclusive).
//
// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1195,7 +1310,7 @@ func toFloat(x interface{}) (float64, bool) {
// InDelta asserts that the two numerals are within delta of each other.
//
// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1368,10 +1483,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
// NoError asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
// if assert.NoError(t, err) {
// assert.Equal(t, expectedObj, actualObj)
// }
// actualObj, err := SomeFunction()
// if assert.NoError(t, err) {
// assert.Equal(t, expectedObj, actualObj)
// }
func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err != nil {
if h, ok := t.(tHelper); ok {
@ -1385,10 +1500,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// Error asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
// if assert.Error(t, err) {
// assert.Equal(t, expectedError, err)
// }
// actualObj, err := SomeFunction()
// if assert.Error(t, err) {
// assert.Equal(t, expectedError, err)
// }
func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err == nil {
if h, ok := t.(tHelper); ok {
@ -1403,8 +1518,8 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
// assert.EqualError(t, err, expectedErrorString)
// actualObj, err := SomeFunction()
// assert.EqualError(t, err, expectedErrorString)
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1426,8 +1541,8 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte
// ErrorContains asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
// assert.ErrorContains(t, err, expectedErrorSubString)
// actualObj, err := SomeFunction()
// assert.ErrorContains(t, err, expectedErrorSubString)
func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1460,8 +1575,8 @@ func matchRegexp(rx interface{}, str interface{}) bool {
// Regexp asserts that a specified regexp matches a string.
//
// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
// assert.Regexp(t, "start...$", "it's not starting")
// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
// assert.Regexp(t, "start...$", "it's not starting")
func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1478,8 +1593,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface
// NotRegexp asserts that a specified regexp does not match a string.
//
// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
// assert.NotRegexp(t, "^start", "it's not starting")
// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
// assert.NotRegexp(t, "^start", "it's not starting")
func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1591,7 +1706,7 @@ func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
// JSONEq asserts that two JSON strings are equivalent.
//
// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1714,7 +1829,7 @@ type tHelper interface {
// Eventually asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@ -1744,10 +1859,93 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
}
}
// CollectT implements the TestingT interface and collects all errors.
type CollectT struct {
errors []error
}
// Errorf collects the error.
func (c *CollectT) Errorf(format string, args ...interface{}) {
c.errors = append(c.errors, fmt.Errorf(format, args...))
}
// FailNow panics.
func (c *CollectT) FailNow() {
panic("Assertion failed")
}
// Reset clears the collected errors.
func (c *CollectT) Reset() {
c.errors = nil
}
// Copy copies the collected errors to the supplied t.
func (c *CollectT) Copy(t TestingT) {
if tt, ok := t.(tHelper); ok {
tt.Helper()
}
for _, err := range c.errors {
t.Errorf("%v", err)
}
}
// EventuallyWithT asserts that given condition will be met in waitFor time,
// periodically checking target function each tick. In contrast to Eventually,
// it supplies a CollectT to the condition function, so that the condition
// function can use the CollectT to call other assertions.
// The condition is considered "met" if no errors are raised in a tick.
// The supplied CollectT collects all errors from one tick (if there are any).
// If the condition is not met before waitFor, the collected errors of
// the last tick are copied to t.
//
// externalValue := false
// go func() {
// time.Sleep(8*time.Second)
// externalValue = true
// }()
// assert.EventuallyWithT(t, func(c *assert.CollectT) {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
collect := new(CollectT)
ch := make(chan bool, 1)
timer := time.NewTimer(waitFor)
defer timer.Stop()
ticker := time.NewTicker(tick)
defer ticker.Stop()
for tick := ticker.C; ; {
select {
case <-timer.C:
collect.Copy(t)
return Fail(t, "Condition never satisfied", msgAndArgs...)
case <-tick:
tick = nil
collect.Reset()
go func() {
condition(collect)
ch <- len(collect.errors) == 0
}()
case v := <-ch:
if v {
return true
}
tick = ticker.C
}
}
}
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()

View File

@ -1,39 +1,40 @@
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
//
// Example Usage
// # Example Usage
//
// The following is a complete example using assert in a standard test function:
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// )
//
// func TestSomething(t *testing.T) {
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// )
//
// var a string = "Hello"
// var b string = "Hello"
// func TestSomething(t *testing.T) {
//
// assert.Equal(t, a, b, "The two words should be the same.")
// var a string = "Hello"
// var b string = "Hello"
//
// }
// assert.Equal(t, a, b, "The two words should be the same.")
//
// }
//
// if you assert many times, use the format below:
//
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// )
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// )
//
// func TestSomething(t *testing.T) {
// assert := assert.New(t)
// func TestSomething(t *testing.T) {
// assert := assert.New(t)
//
// var a string = "Hello"
// var b string = "Hello"
// var a string = "Hello"
// var b string = "Hello"
//
// assert.Equal(a, b, "The two words should be the same.")
// }
// assert.Equal(a, b, "The two words should be the same.")
// }
//
// Assertions
// # Assertions
//
// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
// All assertion functions take, as the first argument, the `*testing.T` object provided by the

View File

@ -23,7 +23,7 @@ func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@ -45,7 +45,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@ -67,7 +67,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
// HTTPError asserts that a specified handler returns an error status code.
//
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@ -89,7 +89,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
// HTTPStatusCode asserts that a specified handler returns a specified status code.
//
// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
@ -124,7 +124,7 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) s
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@ -144,7 +144,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {

View File

@ -1,24 +1,25 @@
// Package require implements the same assertions as the `assert` package but
// stops test execution when a test fails.
//
// Example Usage
// # Example Usage
//
// The following is a complete example using require in a standard test function:
// import (
// "testing"
// "github.com/stretchr/testify/require"
// )
//
// func TestSomething(t *testing.T) {
// import (
// "testing"
// "github.com/stretchr/testify/require"
// )
//
// var a string = "Hello"
// var b string = "Hello"
// func TestSomething(t *testing.T) {
//
// require.Equal(t, a, b, "The two words should be the same.")
// var a string = "Hello"
// var b string = "Hello"
//
// }
// require.Equal(t, a, b, "The two words should be the same.")
//
// Assertions
// }
//
// # Assertions
//
// The `require` package have same global functions as in the `assert` package,
// but instead of returning a boolean result they call `t.FailNow()`.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

2
vendor/github.com/vishvananda/netns/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,2 @@
run:
timeout: 5m

View File

@ -23,6 +23,7 @@ import (
"fmt"
"net"
"runtime"
"github.com/vishvananda/netns"
)
@ -48,14 +49,3 @@ func main() {
}
```
## NOTE
The library can be safely used only with Go >= 1.10 due to [golang/go#20676](https://github.com/golang/go/issues/20676).
After locking a goroutine to its current OS thread with `runtime.LockOSThread()`
and changing its network namespace, any new subsequent goroutine won't be
scheduled on that thread while it's locked. Therefore, the new goroutine
will run in a different namespace leading to unexpected results.
See [here](https://www.weave.works/blog/linux-namespaces-golang-followup) for more details.

9
vendor/github.com/vishvananda/netns/doc.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// Package netns allows ultra-simple network namespace handling. NsHandles
// can be retrieved and set. Note that the current namespace is thread
// local so actions that set and reset namespaces should use LockOSThread
// to make sure the namespace doesn't change due to a goroutine switch.
// It is best to close NsHandles when you are done with them. This can be
// accomplished via a `defer ns.Close()` on the handle. Changing namespaces
// requires elevated privileges, so in most cases this code needs to be run
// as root.
package netns

View File

@ -1,33 +1,31 @@
// +build linux,go1.10
package netns
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"syscall"
"golang.org/x/sys/unix"
)
// Deprecated: use syscall pkg instead (go >= 1.5 needed).
// Deprecated: use golang.org/x/sys/unix pkg instead.
const (
CLONE_NEWUTS = 0x04000000 /* New utsname group? */
CLONE_NEWIPC = 0x08000000 /* New ipcs */
CLONE_NEWUSER = 0x10000000 /* New user namespace */
CLONE_NEWPID = 0x20000000 /* New pid namespace */
CLONE_NEWNET = 0x40000000 /* New network namespace */
CLONE_IO = 0x80000000 /* Get io context */
bindMountPath = "/run/netns" /* Bind mount path for named netns */
CLONE_NEWUTS = unix.CLONE_NEWUTS /* New utsname group? */
CLONE_NEWIPC = unix.CLONE_NEWIPC /* New ipcs */
CLONE_NEWUSER = unix.CLONE_NEWUSER /* New user namespace */
CLONE_NEWPID = unix.CLONE_NEWPID /* New pid namespace */
CLONE_NEWNET = unix.CLONE_NEWNET /* New network namespace */
CLONE_IO = unix.CLONE_IO /* Get io context */
)
// Setns sets namespace using syscall. Note that this should be a method
// in syscall but it has not been added.
const bindMountPath = "/run/netns" /* Bind mount path for named netns */
// Setns sets namespace using golang.org/x/sys/unix.Setns.
//
// Deprecated: Use golang.org/x/sys/unix.Setns instead.
func Setns(ns NsHandle, nstype int) (err error) {
return unix.Setns(int(ns), nstype)
}
@ -35,19 +33,20 @@ func Setns(ns NsHandle, nstype int) (err error) {
// Set sets the current network namespace to the namespace represented
// by NsHandle.
func Set(ns NsHandle) (err error) {
return Setns(ns, CLONE_NEWNET)
return unix.Setns(int(ns), unix.CLONE_NEWNET)
}
// New creates a new network namespace, sets it as current and returns
// a handle to it.
func New() (ns NsHandle, err error) {
if err := unix.Unshare(CLONE_NEWNET); err != nil {
if err := unix.Unshare(unix.CLONE_NEWNET); err != nil {
return -1, err
}
return Get()
}
// NewNamed creates a new named network namespace and returns a handle to it
// NewNamed creates a new named network namespace, sets it as current,
// and returns a handle to it
func NewNamed(name string) (NsHandle, error) {
if _, err := os.Stat(bindMountPath); os.IsNotExist(err) {
err = os.MkdirAll(bindMountPath, 0755)
@ -65,13 +64,15 @@ func NewNamed(name string) (NsHandle, error) {
f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0444)
if err != nil {
newNs.Close()
return None(), err
}
f.Close()
nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid())
err = syscall.Mount(nsPath, namedPath, "bind", syscall.MS_BIND, "")
nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
err = unix.Mount(nsPath, namedPath, "bind", unix.MS_BIND, "")
if err != nil {
newNs.Close()
return None(), err
}
@ -82,7 +83,7 @@ func NewNamed(name string) (NsHandle, error) {
func DeleteNamed(name string) error {
namedPath := path.Join(bindMountPath, name)
err := syscall.Unmount(namedPath, syscall.MNT_DETACH)
err := unix.Unmount(namedPath, unix.MNT_DETACH)
if err != nil {
return err
}
@ -108,7 +109,7 @@ func GetFromPath(path string) (NsHandle, error) {
// GetFromName gets a handle to a named network namespace such as one
// created by `ip netns add`.
func GetFromName(name string) (NsHandle, error) {
return GetFromPath(fmt.Sprintf("/var/run/netns/%s", name))
return GetFromPath(filepath.Join(bindMountPath, name))
}
// GetFromPid gets a handle to the network namespace of a given pid.
@ -133,33 +134,38 @@ func GetFromDocker(id string) (NsHandle, error) {
}
// borrowed from docker/utils/utils.go
func findCgroupMountpoint(cgroupType string) (string, error) {
output, err := ioutil.ReadFile("/proc/mounts")
func findCgroupMountpoint(cgroupType string) (int, string, error) {
output, err := os.ReadFile("/proc/mounts")
if err != nil {
return "", err
return -1, "", err
}
// /proc/mounts has 6 fields per line, one mount per line, e.g.
// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
for _, line := range strings.Split(string(output), "\n") {
parts := strings.Split(line, " ")
if len(parts) == 6 && parts[2] == "cgroup" {
for _, opt := range strings.Split(parts[3], ",") {
if opt == cgroupType {
return parts[1], nil
if len(parts) == 6 {
switch parts[2] {
case "cgroup2":
return 2, parts[1], nil
case "cgroup":
for _, opt := range strings.Split(parts[3], ",") {
if opt == cgroupType {
return 1, parts[1], nil
}
}
}
}
}
return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
return -1, "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
}
// Returns the relative path to the cgroup docker is running in.
// borrowed from docker/utils/utils.go
// modified to get the docker pid instead of using /proc/self
func getThisCgroup(cgroupType string) (string, error) {
dockerpid, err := ioutil.ReadFile("/var/run/docker.pid")
func getDockerCgroup(cgroupVer int, cgroupType string) (string, error) {
dockerpid, err := os.ReadFile("/var/run/docker.pid")
if err != nil {
return "", err
}
@ -171,14 +177,15 @@ func getThisCgroup(cgroupType string) (string, error) {
if err != nil {
return "", err
}
output, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid))
output, err := os.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid))
if err != nil {
return "", err
}
for _, line := range strings.Split(string(output), "\n") {
parts := strings.Split(line, ":")
// any type used by docker should work
if parts[1] == cgroupType {
if (cgroupVer == 1 && parts[1] == cgroupType) ||
(cgroupVer == 2 && parts[1] == "") {
return parts[2], nil
}
}
@ -190,46 +197,56 @@ func getThisCgroup(cgroupType string) (string, error) {
// modified to only return the first pid
// modified to glob with id
// modified to search for newer docker containers
// modified to look for cgroups v2
func getPidForContainer(id string) (int, error) {
pid := 0
// memory is chosen randomly, any cgroup used by docker works
cgroupType := "memory"
cgroupRoot, err := findCgroupMountpoint(cgroupType)
cgroupVer, cgroupRoot, err := findCgroupMountpoint(cgroupType)
if err != nil {
return pid, err
}
cgroupThis, err := getThisCgroup(cgroupType)
cgroupDocker, err := getDockerCgroup(cgroupVer, cgroupType)
if err != nil {
return pid, err
}
id += "*"
var pidFile string
if cgroupVer == 1 {
pidFile = "tasks"
} else if cgroupVer == 2 {
pidFile = "cgroup.procs"
} else {
return -1, fmt.Errorf("Invalid cgroup version '%d'", cgroupVer)
}
attempts := []string{
filepath.Join(cgroupRoot, cgroupThis, id, "tasks"),
filepath.Join(cgroupRoot, cgroupDocker, id, pidFile),
// With more recent lxc versions use, cgroup will be in lxc/
filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks"),
filepath.Join(cgroupRoot, cgroupDocker, "lxc", id, pidFile),
// With more recent docker, cgroup will be in docker/
filepath.Join(cgroupRoot, cgroupThis, "docker", id, "tasks"),
filepath.Join(cgroupRoot, cgroupDocker, "docker", id, pidFile),
// Even more recent docker versions under systemd use docker-<id>.scope/
filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", "tasks"),
filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", pidFile),
// Even more recent docker versions under cgroup/systemd/docker/<id>/
filepath.Join(cgroupRoot, "..", "systemd", "docker", id, "tasks"),
filepath.Join(cgroupRoot, "..", "systemd", "docker", id, pidFile),
// Kubernetes with docker and CNI is even more different. Works for BestEffort and Burstable QoS
filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, "tasks"),
filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, pidFile),
// Same as above but for Guaranteed QoS
filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "pod*", id, "tasks"),
filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "pod*", id, pidFile),
// Another flavor of containers location in recent kubernetes 1.11+. Works for BestEffort and Burstable QoS
filepath.Join(cgroupRoot, cgroupThis, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", "tasks"),
filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile),
// Same as above but for Guaranteed QoS
filepath.Join(cgroupRoot, cgroupThis, "kubepods.slice", "*", "docker-"+id+".scope", "tasks"),
filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*", "docker-"+id+".scope", pidFile),
// When runs inside of a container with recent kubernetes 1.11+. Works for BestEffort and Burstable QoS
filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", "tasks"),
filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile),
// Same as above but for Guaranteed QoS
filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", "tasks"),
filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", pidFile),
}
var filename string
@ -247,7 +264,7 @@ func getPidForContainer(id string) (int, error) {
return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1])
}
output, err := ioutil.ReadFile(filename)
output, err := os.ReadFile(filename)
if err != nil {
return pid, err
}

View File

@ -1,3 +1,4 @@
//go:build !linux
// +build !linux
package netns
@ -10,6 +11,14 @@ var (
ErrNotImplemented = errors.New("not implemented")
)
// Setns sets namespace using golang.org/x/sys/unix.Setns on Linux. It
// is not implemented on other platforms.
//
// Deprecated: Use golang.org/x/sys/unix.Setns instead.
func Setns(ns NsHandle, nstype int) (err error) {
return ErrNotImplemented
}
func Set(ns NsHandle) (err error) {
return ErrNotImplemented
}
@ -18,6 +27,14 @@ func New() (ns NsHandle, err error) {
return -1, ErrNotImplemented
}
func NewNamed(name string) (NsHandle, error) {
return -1, ErrNotImplemented
}
func DeleteNamed(name string) error {
return ErrNotImplemented
}
func Get() (NsHandle, error) {
return -1, ErrNotImplemented
}

View File

@ -1,11 +1,3 @@
// Package netns allows ultra-simple network namespace handling. NsHandles
// can be retrieved and set. Note that the current namespace is thread
// local so actions that set and reset namespaces should use LockOSThread
// to make sure the namespace doesn't change due to a goroutine switch.
// It is best to close NsHandles when you are done with them. This can be
// accomplished via a `defer ns.Close()` on the handle. Changing namespaces
// requires elevated privileges, so in most cases this code needs to be run
// as root.
package netns
import (
@ -38,7 +30,7 @@ func (ns NsHandle) Equal(other NsHandle) bool {
// String shows the file descriptor number and its dev and inode.
func (ns NsHandle) String() string {
if ns == -1 {
return "NS(None)"
return "NS(none)"
}
var s unix.Stat_t
if err := unix.Fstat(int(ns), &s); err != nil {
@ -71,7 +63,7 @@ func (ns *NsHandle) Close() error {
if err := unix.Close(int(*ns)); err != nil {
return err
}
(*ns) = -1
*ns = -1
return nil
}

45
vendor/github.com/vishvananda/netns/nshandle_others.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
//go:build !linux
// +build !linux
package netns
// NsHandle is a handle to a network namespace. It can only be used on Linux,
// but provides stub methods on other platforms.
type NsHandle int
// Equal determines if two network handles refer to the same network
// namespace. It is only implemented on Linux.
func (ns NsHandle) Equal(_ NsHandle) bool {
return false
}
// String shows the file descriptor number and its dev and inode.
// It is only implemented on Linux, and returns "NS(none)" on other
// platforms.
func (ns NsHandle) String() string {
return "NS(none)"
}
// UniqueId returns a string which uniquely identifies the namespace
// associated with the network handle. It is only implemented on Linux,
// and returns "NS(none)" on other platforms.
func (ns NsHandle) UniqueId() string {
return "NS(none)"
}
// IsOpen returns true if Close() has not been called. It is only implemented
// on Linux and always returns false on other platforms.
func (ns NsHandle) IsOpen() bool {
return false
}
// Close closes the NsHandle and resets its file descriptor to -1.
// It is only implemented on Linux.
func (ns *NsHandle) Close() error {
return nil
}
// None gets an empty (closed) NsHandle.
func None() NsHandle {
return NsHandle(-1)
}

View File

@ -13,7 +13,10 @@
// golang.org/x/crypto/chacha20poly1305).
package cast5 // import "golang.org/x/crypto/cast5"
import "errors"
import (
"errors"
"math/bits"
)
const BlockSize = 8
const KeySize = 16
@ -241,19 +244,19 @@ func (c *Cipher) keySchedule(in []byte) {
// These are the three 'f' functions. See RFC 2144, section 2.2.
func f1(d, m uint32, r uint8) uint32 {
t := m + d
I := (t << r) | (t >> (32 - r))
I := bits.RotateLeft32(t, int(r))
return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
}
func f2(d, m uint32, r uint8) uint32 {
t := m ^ d
I := (t << r) | (t >> (32 - r))
I := bits.RotateLeft32(t, int(r))
return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
}
func f3(d, m uint32, r uint8) uint32 {
t := m - d
I := (t << r) | (t >> (32 - r))
I := bits.RotateLeft32(t, int(r))
return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
}

View File

@ -156,7 +156,7 @@ func (r *openpgpReader) Read(p []byte) (n int, err error) {
n, err = r.b64Reader.Read(p)
r.currentCRC = crc24(r.currentCRC, p[:n])
if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
if err == io.EOF && r.lReader.crcSet && r.lReader.crc != r.currentCRC&crc24Mask {
return 0, ArmorCorrupt
}

View File

@ -61,7 +61,7 @@ type Key struct {
type KeyRing interface {
// KeysById returns the set of keys that have the given key id.
KeysById(id uint64) []Key
// KeysByIdAndUsage returns the set of keys with the given id
// KeysByIdUsage returns the set of keys with the given id
// that also meet the key usage given by requiredUsage.
// The requiredUsage is expressed as the bitwise-OR of
// packet.KeyFlag* values.
@ -183,7 +183,7 @@ func (el EntityList) KeysById(id uint64) (keys []Key) {
return
}
// KeysByIdAndUsage returns the set of keys with the given id that also meet
// KeysByIdUsage returns the set of keys with the given id that also meet
// the key usage given by requiredUsage. The requiredUsage is expressed as
// the bitwise-OR of packet.KeyFlag* values.
func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {

View File

@ -60,7 +60,7 @@ func (c *Compressed) parse(r io.Reader) error {
return err
}
// compressedWriterCloser represents the serialized compression stream
// compressedWriteCloser represents the serialized compression stream
// header and the compressor. Its Close() method ensures that both the
// compressor and serialized stream header are closed. Its Write()
// method writes to the compressor.

78
vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lazyregexp is a thin wrapper over regexp, allowing the use of global
// regexp variables without forcing them to be compiled at init.
package lazyregexp
import (
"os"
"regexp"
"strings"
"sync"
)
// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be
// compiled the first time it is needed.
type Regexp struct {
str string
once sync.Once
rx *regexp.Regexp
}
func (r *Regexp) re() *regexp.Regexp {
r.once.Do(r.build)
return r.rx
}
func (r *Regexp) build() {
r.rx = regexp.MustCompile(r.str)
r.str = ""
}
func (r *Regexp) FindSubmatch(s []byte) [][]byte {
return r.re().FindSubmatch(s)
}
func (r *Regexp) FindStringSubmatch(s string) []string {
return r.re().FindStringSubmatch(s)
}
func (r *Regexp) FindStringSubmatchIndex(s string) []int {
return r.re().FindStringSubmatchIndex(s)
}
func (r *Regexp) ReplaceAllString(src, repl string) string {
return r.re().ReplaceAllString(src, repl)
}
func (r *Regexp) FindString(s string) string {
return r.re().FindString(s)
}
func (r *Regexp) FindAllString(s string, n int) []string {
return r.re().FindAllString(s, n)
}
func (r *Regexp) MatchString(s string) bool {
return r.re().MatchString(s)
}
func (r *Regexp) SubexpNames() []string {
return r.re().SubexpNames()
}
var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
// New creates a new lazy regexp, delaying the compiling work until it is first
// needed. If the code is being run as part of tests, the regexp compiling will
// happen immediately.
func New(str string) *Regexp {
lr := &Regexp{str: str}
if inTest {
// In tests, always compile the regexps early.
lr.re()
}
return lr
}

174
vendor/golang.org/x/mod/modfile/print.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Module file printer.
package modfile
import (
"bytes"
"fmt"
"strings"
)
// Format returns a go.mod file as a byte slice, formatted in standard style.
func Format(f *FileSyntax) []byte {
pr := &printer{}
pr.file(f)
return pr.Bytes()
}
// A printer collects the state during printing of a file or expression.
type printer struct {
bytes.Buffer // output buffer
comment []Comment // pending end-of-line comments
margin int // left margin (indent), a number of tabs
}
// printf prints to the buffer.
func (p *printer) printf(format string, args ...interface{}) {
fmt.Fprintf(p, format, args...)
}
// indent returns the position on the current line, in bytes, 0-indexed.
func (p *printer) indent() int {
b := p.Bytes()
n := 0
for n < len(b) && b[len(b)-1-n] != '\n' {
n++
}
return n
}
// newline ends the current line, flushing end-of-line comments.
func (p *printer) newline() {
if len(p.comment) > 0 {
p.printf(" ")
for i, com := range p.comment {
if i > 0 {
p.trim()
p.printf("\n")
for i := 0; i < p.margin; i++ {
p.printf("\t")
}
}
p.printf("%s", strings.TrimSpace(com.Token))
}
p.comment = p.comment[:0]
}
p.trim()
p.printf("\n")
for i := 0; i < p.margin; i++ {
p.printf("\t")
}
}
// trim removes trailing spaces and tabs from the current line.
func (p *printer) trim() {
// Remove trailing spaces and tabs from line we're about to end.
b := p.Bytes()
n := len(b)
for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') {
n--
}
p.Truncate(n)
}
// file formats the given file into the print buffer.
func (p *printer) file(f *FileSyntax) {
for _, com := range f.Before {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
for i, stmt := range f.Stmt {
switch x := stmt.(type) {
case *CommentBlock:
// comments already handled
p.expr(x)
default:
p.expr(x)
p.newline()
}
for _, com := range stmt.Comment().After {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
if i+1 < len(f.Stmt) {
p.newline()
}
}
}
func (p *printer) expr(x Expr) {
// Emit line-comments preceding this expression.
if before := x.Comment().Before; len(before) > 0 {
// Want to print a line comment.
// Line comments must be at the current margin.
p.trim()
if p.indent() > 0 {
// There's other text on the line. Start a new line.
p.printf("\n")
}
// Re-indent to margin.
for i := 0; i < p.margin; i++ {
p.printf("\t")
}
for _, com := range before {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
}
switch x := x.(type) {
default:
panic(fmt.Errorf("printer: unexpected type %T", x))
case *CommentBlock:
// done
case *LParen:
p.printf("(")
case *RParen:
p.printf(")")
case *Line:
p.tokens(x.Token)
case *LineBlock:
p.tokens(x.Token)
p.printf(" ")
p.expr(&x.LParen)
p.margin++
for _, l := range x.Line {
p.newline()
p.expr(l)
}
p.margin--
p.newline()
p.expr(&x.RParen)
}
// Queue end-of-line comments for printing when we
// reach the end of the line.
p.comment = append(p.comment, x.Comment().Suffix...)
}
func (p *printer) tokens(tokens []string) {
sep := ""
for _, t := range tokens {
if t == "," || t == ")" || t == "]" || t == "}" {
sep = ""
}
p.printf("%s%s", sep, t)
sep = " "
if t == "(" || t == "[" || t == "{" {
sep = ""
}
}
}

958
vendor/golang.org/x/mod/modfile/read.go generated vendored Normal file
View File

@ -0,0 +1,958 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modfile
import (
"bytes"
"errors"
"fmt"
"os"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// A Position describes an arbitrary source position in a file, including the
// file, line, column, and byte offset.
type Position struct {
Line int // line in input (starting at 1)
LineRune int // rune in line (starting at 1)
Byte int // byte in input (starting at 0)
}
// add returns the position at the end of s, assuming it starts at p.
func (p Position) add(s string) Position {
p.Byte += len(s)
if n := strings.Count(s, "\n"); n > 0 {
p.Line += n
s = s[strings.LastIndex(s, "\n")+1:]
p.LineRune = 1
}
p.LineRune += utf8.RuneCountInString(s)
return p
}
// An Expr represents an input element.
type Expr interface {
// Span returns the start and end position of the expression,
// excluding leading or trailing comments.
Span() (start, end Position)
// Comment returns the comments attached to the expression.
// This method would normally be named 'Comments' but that
// would interfere with embedding a type of the same name.
Comment() *Comments
}
// A Comment represents a single // comment.
type Comment struct {
Start Position
Token string // without trailing newline
Suffix bool // an end of line (not whole line) comment
}
// Comments collects the comments associated with an expression.
type Comments struct {
Before []Comment // whole-line comments before this expression
Suffix []Comment // end-of-line comments after this expression
// For top-level expressions only, After lists whole-line
// comments following the expression.
After []Comment
}
// Comment returns the receiver. This isn't useful by itself, but
// a Comments struct is embedded into all the expression
// implementation types, and this gives each of those a Comment
// method to satisfy the Expr interface.
func (c *Comments) Comment() *Comments {
return c
}
// A FileSyntax represents an entire go.mod file.
type FileSyntax struct {
Name string // file path
Comments
Stmt []Expr
}
func (x *FileSyntax) Span() (start, end Position) {
if len(x.Stmt) == 0 {
return
}
start, _ = x.Stmt[0].Span()
_, end = x.Stmt[len(x.Stmt)-1].Span()
return start, end
}
// addLine adds a line containing the given tokens to the file.
//
// If the first token of the hint matches the first token of the
// line, the new line is added at the end of the block containing hint,
// extracting hint into a new block if it is not yet in one.
//
// If the hint is non-nil buts its first token does not match,
// the new line is added after the block containing hint
// (or hint itself, if not in a block).
//
// If no hint is provided, addLine appends the line to the end of
// the last block with a matching first token,
// or to the end of the file if no such block exists.
func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line {
if hint == nil {
// If no hint given, add to the last statement of the given type.
Loop:
for i := len(x.Stmt) - 1; i >= 0; i-- {
stmt := x.Stmt[i]
switch stmt := stmt.(type) {
case *Line:
if stmt.Token != nil && stmt.Token[0] == tokens[0] {
hint = stmt
break Loop
}
case *LineBlock:
if stmt.Token[0] == tokens[0] {
hint = stmt
break Loop
}
}
}
}
newLineAfter := func(i int) *Line {
new := &Line{Token: tokens}
if i == len(x.Stmt) {
x.Stmt = append(x.Stmt, new)
} else {
x.Stmt = append(x.Stmt, nil)
copy(x.Stmt[i+2:], x.Stmt[i+1:])
x.Stmt[i+1] = new
}
return new
}
if hint != nil {
for i, stmt := range x.Stmt {
switch stmt := stmt.(type) {
case *Line:
if stmt == hint {
if stmt.Token == nil || stmt.Token[0] != tokens[0] {
return newLineAfter(i)
}
// Convert line to line block.
stmt.InBlock = true
block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}}
stmt.Token = stmt.Token[1:]
x.Stmt[i] = block
new := &Line{Token: tokens[1:], InBlock: true}
block.Line = append(block.Line, new)
return new
}
case *LineBlock:
if stmt == hint {
if stmt.Token[0] != tokens[0] {
return newLineAfter(i)
}
new := &Line{Token: tokens[1:], InBlock: true}
stmt.Line = append(stmt.Line, new)
return new
}
for j, line := range stmt.Line {
if line == hint {
if stmt.Token[0] != tokens[0] {
return newLineAfter(i)
}
// Add new line after hint within the block.
stmt.Line = append(stmt.Line, nil)
copy(stmt.Line[j+2:], stmt.Line[j+1:])
new := &Line{Token: tokens[1:], InBlock: true}
stmt.Line[j+1] = new
return new
}
}
}
}
}
new := &Line{Token: tokens}
x.Stmt = append(x.Stmt, new)
return new
}
func (x *FileSyntax) updateLine(line *Line, tokens ...string) {
if line.InBlock {
tokens = tokens[1:]
}
line.Token = tokens
}
// markRemoved modifies line so that it (and its end-of-line comment, if any)
// will be dropped by (*FileSyntax).Cleanup.
func (line *Line) markRemoved() {
line.Token = nil
line.Comments.Suffix = nil
}
// Cleanup cleans up the file syntax x after any edit operations.
// To avoid quadratic behavior, (*Line).markRemoved marks the line as dead
// by setting line.Token = nil but does not remove it from the slice
// in which it appears. After edits have all been indicated,
// calling Cleanup cleans out the dead lines.
func (x *FileSyntax) Cleanup() {
w := 0
for _, stmt := range x.Stmt {
switch stmt := stmt.(type) {
case *Line:
if stmt.Token == nil {
continue
}
case *LineBlock:
ww := 0
for _, line := range stmt.Line {
if line.Token != nil {
stmt.Line[ww] = line
ww++
}
}
if ww == 0 {
continue
}
if ww == 1 {
// Collapse block into single line.
line := &Line{
Comments: Comments{
Before: commentsAdd(stmt.Before, stmt.Line[0].Before),
Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix),
After: commentsAdd(stmt.Line[0].After, stmt.After),
},
Token: stringsAdd(stmt.Token, stmt.Line[0].Token),
}
x.Stmt[w] = line
w++
continue
}
stmt.Line = stmt.Line[:ww]
}
x.Stmt[w] = stmt
w++
}
x.Stmt = x.Stmt[:w]
}
func commentsAdd(x, y []Comment) []Comment {
return append(x[:len(x):len(x)], y...)
}
func stringsAdd(x, y []string) []string {
return append(x[:len(x):len(x)], y...)
}
// A CommentBlock represents a top-level block of comments separate
// from any rule.
type CommentBlock struct {
Comments
Start Position
}
func (x *CommentBlock) Span() (start, end Position) {
return x.Start, x.Start
}
// A Line is a single line of tokens.
type Line struct {
Comments
Start Position
Token []string
InBlock bool
End Position
}
func (x *Line) Span() (start, end Position) {
return x.Start, x.End
}
// A LineBlock is a factored block of lines, like
//
// require (
// "x"
// "y"
// )
type LineBlock struct {
Comments
Start Position
LParen LParen
Token []string
Line []*Line
RParen RParen
}
func (x *LineBlock) Span() (start, end Position) {
return x.Start, x.RParen.Pos.add(")")
}
// An LParen represents the beginning of a parenthesized line block.
// It is a place to store suffix comments.
type LParen struct {
Comments
Pos Position
}
func (x *LParen) Span() (start, end Position) {
return x.Pos, x.Pos.add(")")
}
// An RParen represents the end of a parenthesized line block.
// It is a place to store whole-line (before) comments.
type RParen struct {
Comments
Pos Position
}
func (x *RParen) Span() (start, end Position) {
return x.Pos, x.Pos.add(")")
}
// An input represents a single input file being parsed.
type input struct {
// Lexing state.
filename string // name of input file, for errors
complete []byte // entire input
remaining []byte // remaining input
tokenStart []byte // token being scanned to end of input
token token // next token to be returned by lex, peek
pos Position // current input position
comments []Comment // accumulated comments
// Parser state.
file *FileSyntax // returned top-level syntax tree
parseErrors ErrorList // errors encountered during parsing
// Comment assignment state.
pre []Expr // all expressions, in preorder traversal
post []Expr // all expressions, in postorder traversal
}
func newInput(filename string, data []byte) *input {
return &input{
filename: filename,
complete: data,
remaining: data,
pos: Position{Line: 1, LineRune: 1, Byte: 0},
}
}
// parse parses the input file.
func parse(file string, data []byte) (f *FileSyntax, err error) {
// The parser panics for both routine errors like syntax errors
// and for programmer bugs like array index errors.
// Turn both into error returns. Catching bug panics is
// especially important when processing many files.
in := newInput(file, data)
defer func() {
if e := recover(); e != nil && e != &in.parseErrors {
in.parseErrors = append(in.parseErrors, Error{
Filename: in.filename,
Pos: in.pos,
Err: fmt.Errorf("internal error: %v", e),
})
}
if err == nil && len(in.parseErrors) > 0 {
err = in.parseErrors
}
}()
// Prime the lexer by reading in the first token. It will be available
// in the next peek() or lex() call.
in.readToken()
// Invoke the parser.
in.parseFile()
if len(in.parseErrors) > 0 {
return nil, in.parseErrors
}
in.file.Name = in.filename
// Assign comments to nearby syntax.
in.assignComments()
return in.file, nil
}
// Error is called to report an error.
// Error does not return: it panics.
func (in *input) Error(s string) {
in.parseErrors = append(in.parseErrors, Error{
Filename: in.filename,
Pos: in.pos,
Err: errors.New(s),
})
panic(&in.parseErrors)
}
// eof reports whether the input has reached end of file.
func (in *input) eof() bool {
return len(in.remaining) == 0
}
// peekRune returns the next rune in the input without consuming it.
func (in *input) peekRune() int {
if len(in.remaining) == 0 {
return 0
}
r, _ := utf8.DecodeRune(in.remaining)
return int(r)
}
// peekPrefix reports whether the remaining input begins with the given prefix.
func (in *input) peekPrefix(prefix string) bool {
// This is like bytes.HasPrefix(in.remaining, []byte(prefix))
// but without the allocation of the []byte copy of prefix.
for i := 0; i < len(prefix); i++ {
if i >= len(in.remaining) || in.remaining[i] != prefix[i] {
return false
}
}
return true
}
// readRune consumes and returns the next rune in the input.
func (in *input) readRune() int {
if len(in.remaining) == 0 {
in.Error("internal lexer error: readRune at EOF")
}
r, size := utf8.DecodeRune(in.remaining)
in.remaining = in.remaining[size:]
if r == '\n' {
in.pos.Line++
in.pos.LineRune = 1
} else {
in.pos.LineRune++
}
in.pos.Byte += size
return int(r)
}
type token struct {
kind tokenKind
pos Position
endPos Position
text string
}
type tokenKind int
const (
_EOF tokenKind = -(iota + 1)
_EOLCOMMENT
_IDENT
_STRING
_COMMENT
// newlines and punctuation tokens are allowed as ASCII codes.
)
func (k tokenKind) isComment() bool {
return k == _COMMENT || k == _EOLCOMMENT
}
// isEOL returns whether a token terminates a line.
func (k tokenKind) isEOL() bool {
return k == _EOF || k == _EOLCOMMENT || k == '\n'
}
// startToken marks the beginning of the next input token.
// It must be followed by a call to endToken, once the token's text has
// been consumed using readRune.
func (in *input) startToken() {
in.tokenStart = in.remaining
in.token.text = ""
in.token.pos = in.pos
}
// endToken marks the end of an input token.
// It records the actual token string in tok.text.
// A single trailing newline (LF or CRLF) will be removed from comment tokens.
func (in *input) endToken(kind tokenKind) {
in.token.kind = kind
text := string(in.tokenStart[:len(in.tokenStart)-len(in.remaining)])
if kind.isComment() {
if strings.HasSuffix(text, "\r\n") {
text = text[:len(text)-2]
} else {
text = strings.TrimSuffix(text, "\n")
}
}
in.token.text = text
in.token.endPos = in.pos
}
// peek returns the kind of the next token returned by lex.
func (in *input) peek() tokenKind {
return in.token.kind
}
// lex is called from the parser to obtain the next input token.
func (in *input) lex() token {
tok := in.token
in.readToken()
return tok
}
// readToken lexes the next token from the text and stores it in in.token.
func (in *input) readToken() {
// Skip past spaces, stopping at non-space or EOF.
for !in.eof() {
c := in.peekRune()
if c == ' ' || c == '\t' || c == '\r' {
in.readRune()
continue
}
// Comment runs to end of line.
if in.peekPrefix("//") {
in.startToken()
// Is this comment the only thing on its line?
// Find the last \n before this // and see if it's all
// spaces from there to here.
i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n"))
suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0
in.readRune()
in.readRune()
// Consume comment.
for len(in.remaining) > 0 && in.readRune() != '\n' {
}
// If we are at top level (not in a statement), hand the comment to
// the parser as a _COMMENT token. The grammar is written
// to handle top-level comments itself.
if !suffix {
in.endToken(_COMMENT)
return
}
// Otherwise, save comment for later attachment to syntax tree.
in.endToken(_EOLCOMMENT)
in.comments = append(in.comments, Comment{in.token.pos, in.token.text, suffix})
return
}
if in.peekPrefix("/*") {
in.Error("mod files must use // comments (not /* */ comments)")
}
// Found non-space non-comment.
break
}
// Found the beginning of the next token.
in.startToken()
// End of file.
if in.eof() {
in.endToken(_EOF)
return
}
// Punctuation tokens.
switch c := in.peekRune(); c {
case '\n', '(', ')', '[', ']', '{', '}', ',':
in.readRune()
in.endToken(tokenKind(c))
return
case '"', '`': // quoted string
quote := c
in.readRune()
for {
if in.eof() {
in.pos = in.token.pos
in.Error("unexpected EOF in string")
}
if in.peekRune() == '\n' {
in.Error("unexpected newline in string")
}
c := in.readRune()
if c == quote {
break
}
if c == '\\' && quote != '`' {
if in.eof() {
in.pos = in.token.pos
in.Error("unexpected EOF in string")
}
in.readRune()
}
}
in.endToken(_STRING)
return
}
// Checked all punctuation. Must be identifier token.
if c := in.peekRune(); !isIdent(c) {
in.Error(fmt.Sprintf("unexpected input character %#q", c))
}
// Scan over identifier.
for isIdent(in.peekRune()) {
if in.peekPrefix("//") {
break
}
if in.peekPrefix("/*") {
in.Error("mod files must use // comments (not /* */ comments)")
}
in.readRune()
}
in.endToken(_IDENT)
}
// isIdent reports whether c is an identifier rune.
// We treat most printable runes as identifier runes, except for a handful of
// ASCII punctuation characters.
func isIdent(c int) bool {
switch r := rune(c); r {
case ' ', '(', ')', '[', ']', '{', '}', ',':
return false
default:
return !unicode.IsSpace(r) && unicode.IsPrint(r)
}
}
// Comment assignment.
// We build two lists of all subexpressions, preorder and postorder.
// The preorder list is ordered by start location, with outer expressions first.
// The postorder list is ordered by end location, with outer expressions last.
// We use the preorder list to assign each whole-line comment to the syntax
// immediately following it, and we use the postorder list to assign each
// end-of-line comment to the syntax immediately preceding it.
// order walks the expression adding it and its subexpressions to the
// preorder and postorder lists.
func (in *input) order(x Expr) {
if x != nil {
in.pre = append(in.pre, x)
}
switch x := x.(type) {
default:
panic(fmt.Errorf("order: unexpected type %T", x))
case nil:
// nothing
case *LParen, *RParen:
// nothing
case *CommentBlock:
// nothing
case *Line:
// nothing
case *FileSyntax:
for _, stmt := range x.Stmt {
in.order(stmt)
}
case *LineBlock:
in.order(&x.LParen)
for _, l := range x.Line {
in.order(l)
}
in.order(&x.RParen)
}
if x != nil {
in.post = append(in.post, x)
}
}
// assignComments attaches comments to nearby syntax.
func (in *input) assignComments() {
const debug = false
// Generate preorder and postorder lists.
in.order(in.file)
// Split into whole-line comments and suffix comments.
var line, suffix []Comment
for _, com := range in.comments {
if com.Suffix {
suffix = append(suffix, com)
} else {
line = append(line, com)
}
}
if debug {
for _, c := range line {
fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
}
}
// Assign line comments to syntax immediately following.
for _, x := range in.pre {
start, _ := x.Span()
if debug {
fmt.Fprintf(os.Stderr, "pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte)
}
xcom := x.Comment()
for len(line) > 0 && start.Byte >= line[0].Start.Byte {
if debug {
fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte)
}
xcom.Before = append(xcom.Before, line[0])
line = line[1:]
}
}
// Remaining line comments go at end of file.
in.file.After = append(in.file.After, line...)
if debug {
for _, c := range suffix {
fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
}
}
// Assign suffix comments to syntax immediately before.
for i := len(in.post) - 1; i >= 0; i-- {
x := in.post[i]
start, end := x.Span()
if debug {
fmt.Fprintf(os.Stderr, "post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte)
}
// Do not assign suffix comments to end of line block or whole file.
// Instead assign them to the last element inside.
switch x.(type) {
case *FileSyntax:
continue
}
// Do not assign suffix comments to something that starts
// on an earlier line, so that in
//
// x ( y
// z ) // comment
//
// we assign the comment to z and not to x ( ... ).
if start.Line != end.Line {
continue
}
xcom := x.Comment()
for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte {
if debug {
fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte)
}
xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1])
suffix = suffix[:len(suffix)-1]
}
}
// We assigned suffix comments in reverse.
// If multiple suffix comments were appended to the same
// expression node, they are now in reverse. Fix that.
for _, x := range in.post {
reverseComments(x.Comment().Suffix)
}
// Remaining suffix comments go at beginning of file.
in.file.Before = append(in.file.Before, suffix...)
}
// reverseComments reverses the []Comment list.
func reverseComments(list []Comment) {
for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
list[i], list[j] = list[j], list[i]
}
}
func (in *input) parseFile() {
in.file = new(FileSyntax)
var cb *CommentBlock
for {
switch in.peek() {
case '\n':
in.lex()
if cb != nil {
in.file.Stmt = append(in.file.Stmt, cb)
cb = nil
}
case _COMMENT:
tok := in.lex()
if cb == nil {
cb = &CommentBlock{Start: tok.pos}
}
com := cb.Comment()
com.Before = append(com.Before, Comment{Start: tok.pos, Token: tok.text})
case _EOF:
if cb != nil {
in.file.Stmt = append(in.file.Stmt, cb)
}
return
default:
in.parseStmt()
if cb != nil {
in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before
cb = nil
}
}
}
}
func (in *input) parseStmt() {
tok := in.lex()
start := tok.pos
end := tok.endPos
tokens := []string{tok.text}
for {
tok := in.lex()
switch {
case tok.kind.isEOL():
in.file.Stmt = append(in.file.Stmt, &Line{
Start: start,
Token: tokens,
End: end,
})
return
case tok.kind == '(':
if next := in.peek(); next.isEOL() {
// Start of block: no more tokens on this line.
in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, tokens, tok))
return
} else if next == ')' {
rparen := in.lex()
if in.peek().isEOL() {
// Empty block.
in.lex()
in.file.Stmt = append(in.file.Stmt, &LineBlock{
Start: start,
Token: tokens,
LParen: LParen{Pos: tok.pos},
RParen: RParen{Pos: rparen.pos},
})
return
}
// '( )' in the middle of the line, not a block.
tokens = append(tokens, tok.text, rparen.text)
} else {
// '(' in the middle of the line, not a block.
tokens = append(tokens, tok.text)
}
default:
tokens = append(tokens, tok.text)
end = tok.endPos
}
}
}
func (in *input) parseLineBlock(start Position, token []string, lparen token) *LineBlock {
x := &LineBlock{
Start: start,
Token: token,
LParen: LParen{Pos: lparen.pos},
}
var comments []Comment
for {
switch in.peek() {
case _EOLCOMMENT:
// Suffix comment, will be attached later by assignComments.
in.lex()
case '\n':
// Blank line. Add an empty comment to preserve it.
in.lex()
if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" {
comments = append(comments, Comment{})
}
case _COMMENT:
tok := in.lex()
comments = append(comments, Comment{Start: tok.pos, Token: tok.text})
case _EOF:
in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune))
case ')':
rparen := in.lex()
x.RParen.Before = comments
x.RParen.Pos = rparen.pos
if !in.peek().isEOL() {
in.Error("syntax error (expected newline after closing paren)")
}
in.lex()
return x
default:
l := in.parseLine()
x.Line = append(x.Line, l)
l.Comment().Before = comments
comments = nil
}
}
}
func (in *input) parseLine() *Line {
tok := in.lex()
if tok.kind.isEOL() {
in.Error("internal parse error: parseLine at end of line")
}
start := tok.pos
end := tok.endPos
tokens := []string{tok.text}
for {
tok := in.lex()
if tok.kind.isEOL() {
return &Line{
Start: start,
Token: tokens,
End: end,
InBlock: true,
}
}
tokens = append(tokens, tok.text)
end = tok.endPos
}
}
var (
slashSlash = []byte("//")
moduleStr = []byte("module")
)
// ModulePath returns the module path from the gomod file text.
// If it cannot find a module path, it returns an empty string.
// It is tolerant of unrelated problems in the go.mod file.
func ModulePath(mod []byte) string {
for len(mod) > 0 {
line := mod
mod = nil
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, mod = line[:i], line[i+1:]
}
if i := bytes.Index(line, slashSlash); i >= 0 {
line = line[:i]
}
line = bytes.TrimSpace(line)
if !bytes.HasPrefix(line, moduleStr) {
continue
}
line = line[len(moduleStr):]
n := len(line)
line = bytes.TrimSpace(line)
if len(line) == n || len(line) == 0 {
continue
}
if line[0] == '"' || line[0] == '`' {
p, err := strconv.Unquote(string(line))
if err != nil {
return "" // malformed quoted string or multiline module path
}
return p
}
return string(line)
}
return "" // missing module path
}

1559
vendor/golang.org/x/mod/modfile/rule.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

234
vendor/golang.org/x/mod/modfile/work.go generated vendored Normal file
View File

@ -0,0 +1,234 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modfile
import (
"fmt"
"sort"
"strings"
)
// A WorkFile is the parsed, interpreted form of a go.work file.
type WorkFile struct {
Go *Go
Use []*Use
Replace []*Replace
Syntax *FileSyntax
}
// A Use is a single directory statement.
type Use struct {
Path string // Use path of module.
ModulePath string // Module path in the comment.
Syntax *Line
}
// ParseWork parses and returns a go.work file.
//
// file is the name of the file, used in positions and errors.
//
// data is the content of the file.
//
// fix is an optional function that canonicalizes module versions.
// If fix is nil, all module versions must be canonical (module.CanonicalVersion
// must return the same string).
func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) {
fs, err := parse(file, data)
if err != nil {
return nil, err
}
f := &WorkFile{
Syntax: fs,
}
var errs ErrorList
for _, x := range fs.Stmt {
switch x := x.(type) {
case *Line:
f.add(&errs, x, x.Token[0], x.Token[1:], fix)
case *LineBlock:
if len(x.Token) > 1 {
errs = append(errs, Error{
Filename: file,
Pos: x.Start,
Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")),
})
continue
}
switch x.Token[0] {
default:
errs = append(errs, Error{
Filename: file,
Pos: x.Start,
Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")),
})
continue
case "use", "replace":
for _, l := range x.Line {
f.add(&errs, l, x.Token[0], l.Token, fix)
}
}
}
}
if len(errs) > 0 {
return nil, errs
}
return f, nil
}
// Cleanup cleans up the file f after any edit operations.
// To avoid quadratic behavior, modifications like DropRequire
// clear the entry but do not remove it from the slice.
// Cleanup cleans out all the cleared entries.
func (f *WorkFile) Cleanup() {
w := 0
for _, r := range f.Use {
if r.Path != "" {
f.Use[w] = r
w++
}
}
f.Use = f.Use[:w]
w = 0
for _, r := range f.Replace {
if r.Old.Path != "" {
f.Replace[w] = r
w++
}
}
f.Replace = f.Replace[:w]
f.Syntax.Cleanup()
}
func (f *WorkFile) AddGoStmt(version string) error {
if !GoVersionRE.MatchString(version) {
return fmt.Errorf("invalid language version string %q", version)
}
if f.Go == nil {
stmt := &Line{Token: []string{"go", version}}
f.Go = &Go{
Version: version,
Syntax: stmt,
}
// Find the first non-comment-only block that's and add
// the go statement before it. That will keep file comments at the top.
i := 0
for i = 0; i < len(f.Syntax.Stmt); i++ {
if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok {
break
}
}
f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...)
} else {
f.Go.Version = version
f.Syntax.updateLine(f.Go.Syntax, "go", version)
}
return nil
}
func (f *WorkFile) AddUse(diskPath, modulePath string) error {
need := true
for _, d := range f.Use {
if d.Path == diskPath {
if need {
d.ModulePath = modulePath
f.Syntax.updateLine(d.Syntax, "use", AutoQuote(diskPath))
need = false
} else {
d.Syntax.markRemoved()
*d = Use{}
}
}
}
if need {
f.AddNewUse(diskPath, modulePath)
}
return nil
}
func (f *WorkFile) AddNewUse(diskPath, modulePath string) {
line := f.Syntax.addLine(nil, "use", AutoQuote(diskPath))
f.Use = append(f.Use, &Use{Path: diskPath, ModulePath: modulePath, Syntax: line})
}
func (f *WorkFile) SetUse(dirs []*Use) {
need := make(map[string]string)
for _, d := range dirs {
need[d.Path] = d.ModulePath
}
for _, d := range f.Use {
if modulePath, ok := need[d.Path]; ok {
d.ModulePath = modulePath
} else {
d.Syntax.markRemoved()
*d = Use{}
}
}
// TODO(#45713): Add module path to comment.
for diskPath, modulePath := range need {
f.AddNewUse(diskPath, modulePath)
}
f.SortBlocks()
}
func (f *WorkFile) DropUse(path string) error {
for _, d := range f.Use {
if d.Path == path {
d.Syntax.markRemoved()
*d = Use{}
}
}
return nil
}
func (f *WorkFile) AddReplace(oldPath, oldVers, newPath, newVers string) error {
return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers)
}
func (f *WorkFile) DropReplace(oldPath, oldVers string) error {
for _, r := range f.Replace {
if r.Old.Path == oldPath && r.Old.Version == oldVers {
r.Syntax.markRemoved()
*r = Replace{}
}
}
return nil
}
func (f *WorkFile) SortBlocks() {
f.removeDups() // otherwise sorting is unsafe
for _, stmt := range f.Syntax.Stmt {
block, ok := stmt.(*LineBlock)
if !ok {
continue
}
sort.SliceStable(block.Line, func(i, j int) bool {
return lineLess(block.Line[i], block.Line[j])
})
}
}
// removeDups removes duplicate replace directives.
//
// Later replace directives take priority.
//
// require directives are not de-duplicated. That's left up to higher-level
// logic (MVS).
//
// retract directives are not de-duplicated since comments are
// meaningful, and versions may be retracted multiple times.
func (f *WorkFile) removeDups() {
removeDups(f.Syntax, nil, &f.Replace)
}

841
vendor/golang.org/x/mod/module/module.go generated vendored Normal file
View File

@ -0,0 +1,841 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package module defines the module.Version type along with support code.
//
// The module.Version type is a simple Path, Version pair:
//
// type Version struct {
// Path string
// Version string
// }
//
// There are no restrictions imposed directly by use of this structure,
// but additional checking functions, most notably Check, verify that
// a particular path, version pair is valid.
//
// # Escaped Paths
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the escaped form be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe escaped form that
// leaves most paths unaltered.
//
// The safe escaped form is to replace every uppercase letter
// with an exclamation mark followed by the letter's lowercase equivalent.
//
// For example,
//
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the escaped form is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to escape a literal !.
//
// # Unicode Restrictions
//
// Today, paths are disallowed from using Unicode.
//
// Although paths are currently disallowed from using Unicode,
// we would like at some point to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention for escaping them in the file system.
// But there are at least two subtle considerations.
//
// First, note that not all case-fold equivalent distinct runes
// form an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('' for Kelvin)
// are three distinct runes that case-fold to each other.
// When we do add Unicode letters, we must not assume that upper/lower
// are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would escape as "!!k", or perhaps as "(212A)".
//
// Second, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
package module
// IMPORTANT NOTE
//
// This file essentially defines the set of valid import paths for the go command.
// There are many subtle considerations, including Unicode ambiguity,
// security, network, and file system representations.
//
// This file also defines the set of valid module path and version combinations,
// another topic with many subtle considerations.
//
// Changes to the semantics in this file require approval from rsc.
import (
"errors"
"fmt"
"path"
"sort"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/mod/semver"
)
// A Version (for clients, a module.Version) is defined by a module path and version pair.
// These are stored in their plain (unescaped) form.
type Version struct {
// Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2".
Path string
// Version is usually a semantic version in canonical form.
// There are three exceptions to this general rule.
// First, the top-level target of a build has no specific version
// and uses Version = "".
// Second, during MVS calculations the version "none" is used
// to represent the decision to take no version of a given module.
// Third, filesystem paths found in "replace" directives are
// represented by a path with an empty version.
Version string `json:",omitempty"`
}
// String returns a representation of the Version suitable for logging
// (Path@Version, or just Path if Version is empty).
func (m Version) String() string {
if m.Version == "" {
return m.Path
}
return m.Path + "@" + m.Version
}
// A ModuleError indicates an error specific to a module.
type ModuleError struct {
Path string
Version string
Err error
}
// VersionError returns a ModuleError derived from a Version and error,
// or err itself if it is already such an error.
func VersionError(v Version, err error) error {
var mErr *ModuleError
if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version {
return err
}
return &ModuleError{
Path: v.Path,
Version: v.Version,
Err: err,
}
}
func (e *ModuleError) Error() string {
if v, ok := e.Err.(*InvalidVersionError); ok {
return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err)
}
if e.Version != "" {
return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err)
}
return fmt.Sprintf("module %s: %v", e.Path, e.Err)
}
func (e *ModuleError) Unwrap() error { return e.Err }
// An InvalidVersionError indicates an error specific to a version, with the
// module path unknown or specified externally.
//
// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError
// must not wrap a ModuleError.
type InvalidVersionError struct {
Version string
Pseudo bool
Err error
}
// noun returns either "version" or "pseudo-version", depending on whether
// e.Version is a pseudo-version.
func (e *InvalidVersionError) noun() string {
if e.Pseudo {
return "pseudo-version"
}
return "version"
}
func (e *InvalidVersionError) Error() string {
return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err)
}
func (e *InvalidVersionError) Unwrap() error { return e.Err }
// An InvalidPathError indicates a module, import, or file path doesn't
// satisfy all naming constraints. See CheckPath, CheckImportPath,
// and CheckFilePath for specific restrictions.
type InvalidPathError struct {
Kind string // "module", "import", or "file"
Path string
Err error
}
func (e *InvalidPathError) Error() string {
return fmt.Sprintf("malformed %s path %q: %v", e.Kind, e.Path, e.Err)
}
func (e *InvalidPathError) Unwrap() error { return e.Err }
// Check checks that a given module path, version pair is valid.
// In addition to the path being a valid module path
// and the version being a valid semantic version,
// the two must correspond.
// For example, the path "yaml/v2" only corresponds to
// semantic versions beginning with "v2.".
func Check(path, version string) error {
if err := CheckPath(path); err != nil {
return err
}
if !semver.IsValid(version) {
return &ModuleError{
Path: path,
Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")},
}
}
_, pathMajor, _ := SplitPathVersion(path)
if err := CheckPathMajor(version, pathMajor); err != nil {
return &ModuleError{Path: path, Err: err}
}
return nil
}
// firstPathOK reports whether r can appear in the first element of a module path.
// The first element of the path must be an LDH domain name, at least for now.
// To avoid case ambiguity, the domain name must be entirely lower case.
func firstPathOK(r rune) bool {
return r == '-' || r == '.' ||
'0' <= r && r <= '9' ||
'a' <= r && r <= 'z'
}
// modPathOK reports whether r can appear in a module path element.
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~.
//
// This matches what "go get" has historically recognized in import paths,
// and avoids confusing sequences like '%20' or '+' that would change meaning
// if used in a URL.
//
// TODO(rsc): We would like to allow Unicode letters, but that requires additional
// care in the safe encoding (see "escaped paths" above).
func modPathOK(r rune) bool {
if r < utf8.RuneSelf {
return r == '-' || r == '.' || r == '_' || r == '~' ||
'0' <= r && r <= '9' ||
'A' <= r && r <= 'Z' ||
'a' <= r && r <= 'z'
}
return false
}
// importPathOK reports whether r can appear in a package import path element.
//
// Import paths are intermediate between module paths and file paths: we allow
// disallow characters that would be confusing or ambiguous as arguments to
// 'go get' (such as '@' and ' ' ), but allow certain characters that are
// otherwise-unambiguous on the command line and historically used for some
// binary names (such as '++' as a suffix for compiler binaries and wrappers).
func importPathOK(r rune) bool {
return modPathOK(r) || r == '+'
}
// fileNameOK reports whether r can appear in a file name.
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
// If we expand the set of allowed characters here, we have to
// work harder at detecting potential case-folding and normalization collisions.
// See note about "escaped paths" above.
func fileNameOK(r rune) bool {
if r < utf8.RuneSelf {
// Entire set of ASCII punctuation, from which we remove characters:
// ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
// We disallow some shell special characters: " ' * < > ? ` |
// (Note that some of those are disallowed by the Windows file system as well.)
// We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
// We allow spaces (U+0020) in file names.
const allowed = "!#$%&()+,-.=@[]^_{}~ "
if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
return true
}
return strings.ContainsRune(allowed, r)
}
// It may be OK to add more ASCII punctuation here, but only carefully.
// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
return unicode.IsLetter(r)
}
// CheckPath checks that a module path is valid.
// A valid module path is a valid import path, as checked by CheckImportPath,
// with three additional constraints.
// First, the leading path element (up to the first slash, if any),
// by convention a domain name, must contain only lower-case ASCII letters,
// ASCII digits, dots (U+002E), and dashes (U+002D);
// it must contain at least one dot and cannot start with a dash.
// Second, for a final path element of the form /vN, where N looks numeric
// (ASCII digits and dots) must not begin with a leading zero, must not be /v1,
// and must not contain any dots. For paths beginning with "gopkg.in/",
// this second requirement is replaced by a requirement that the path
// follow the gopkg.in server's conventions.
// Third, no path element may begin with a dot.
func CheckPath(path string) (err error) {
defer func() {
if err != nil {
err = &InvalidPathError{Kind: "module", Path: path, Err: err}
}
}()
if err := checkPath(path, modulePath); err != nil {
return err
}
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
if i == 0 {
return fmt.Errorf("leading slash")
}
if !strings.Contains(path[:i], ".") {
return fmt.Errorf("missing dot in first path element")
}
if path[0] == '-' {
return fmt.Errorf("leading dash in first path element")
}
for _, r := range path[:i] {
if !firstPathOK(r) {
return fmt.Errorf("invalid char %q in first path element", r)
}
}
if _, _, ok := SplitPathVersion(path); !ok {
return fmt.Errorf("invalid version")
}
return nil
}
// CheckImportPath checks that an import path is valid.
//
// A valid import path consists of one or more valid path elements
// separated by slashes (U+002F). (It must not begin with nor end in a slash.)
//
// A valid path element is a non-empty string made up of
// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~.
// It must not end with a dot (U+002E), nor contain two dots in a row.
//
// The element prefix up to the first dot must not be a reserved file name
// on Windows, regardless of case (CON, com1, NuL, and so on). The element
// must not have a suffix of a tilde followed by one or more ASCII digits
// (to exclude paths elements that look like Windows short-names).
//
// CheckImportPath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckImportPath(path string) error {
if err := checkPath(path, importPath); err != nil {
return &InvalidPathError{Kind: "import", Path: path, Err: err}
}
return nil
}
// pathKind indicates what kind of path we're checking. Module paths,
// import paths, and file paths have different restrictions.
type pathKind int
const (
modulePath pathKind = iota
importPath
filePath
)
// checkPath checks that a general path is valid. kind indicates what
// specific constraints should be applied.
//
// checkPath returns an error describing why the path is not valid.
// Because these checks apply to module, import, and file paths,
// and because other checks may be applied, the caller is expected to wrap
// this error with InvalidPathError.
func checkPath(path string, kind pathKind) error {
if !utf8.ValidString(path) {
return fmt.Errorf("invalid UTF-8")
}
if path == "" {
return fmt.Errorf("empty string")
}
if path[0] == '-' && kind != filePath {
return fmt.Errorf("leading dash")
}
if strings.Contains(path, "//") {
return fmt.Errorf("double slash")
}
if path[len(path)-1] == '/' {
return fmt.Errorf("trailing slash")
}
elemStart := 0
for i, r := range path {
if r == '/' {
if err := checkElem(path[elemStart:i], kind); err != nil {
return err
}
elemStart = i + 1
}
}
if err := checkElem(path[elemStart:], kind); err != nil {
return err
}
return nil
}
// checkElem checks whether an individual path element is valid.
func checkElem(elem string, kind pathKind) error {
if elem == "" {
return fmt.Errorf("empty path element")
}
if strings.Count(elem, ".") == len(elem) {
return fmt.Errorf("invalid path element %q", elem)
}
if elem[0] == '.' && kind == modulePath {
return fmt.Errorf("leading dot in path element")
}
if elem[len(elem)-1] == '.' {
return fmt.Errorf("trailing dot in path element")
}
for _, r := range elem {
ok := false
switch kind {
case modulePath:
ok = modPathOK(r)
case importPath:
ok = importPathOK(r)
case filePath:
ok = fileNameOK(r)
default:
panic(fmt.Sprintf("internal error: invalid kind %v", kind))
}
if !ok {
return fmt.Errorf("invalid char %q", r)
}
}
// Windows disallows a bunch of path elements, sadly.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
short := elem
if i := strings.Index(short, "."); i >= 0 {
short = short[:i]
}
for _, bad := range badWindowsNames {
if strings.EqualFold(bad, short) {
return fmt.Errorf("%q disallowed as path element component on Windows", short)
}
}
if kind == filePath {
// don't check for Windows short-names in file names. They're
// only an issue for import paths.
return nil
}
// Reject path components that look like Windows short-names.
// Those usually end in a tilde followed by one or more ASCII digits.
if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 {
suffix := short[tilde+1:]
suffixIsDigits := true
for _, r := range suffix {
if r < '0' || r > '9' {
suffixIsDigits = false
break
}
}
if suffixIsDigits {
return fmt.Errorf("trailing tilde and digits in path element")
}
}
return nil
}
// CheckFilePath checks that a slash-separated file path is valid.
// The definition of a valid file path is the same as the definition
// of a valid import path except that the set of allowed characters is larger:
// all Unicode letters, ASCII digits, the ASCII space character (U+0020),
// and the ASCII punctuation characters
// “!#$%&()+,-.=@[]^_{}~”.
// (The excluded punctuation characters, " * < > ? ` ' | / \ and :,
// have special meanings in certain shells or operating systems.)
//
// CheckFilePath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckFilePath(path string) error {
if err := checkPath(path, filePath); err != nil {
return &InvalidPathError{Kind: "file", Path: path, Err: err}
}
return nil
}
// badWindowsNames are the reserved file path elements on Windows.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
var badWindowsNames = []string{
"CON",
"PRN",
"AUX",
"NUL",
"COM1",
"COM2",
"COM3",
"COM4",
"COM5",
"COM6",
"COM7",
"COM8",
"COM9",
"LPT1",
"LPT2",
"LPT3",
"LPT4",
"LPT5",
"LPT6",
"LPT7",
"LPT8",
"LPT9",
}
// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
// and version is either empty or "/vN" for N >= 2.
// As a special case, gopkg.in paths are recognized directly;
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
// SplitPathVersion returns with ok = false when presented with
// a path whose last path element does not satisfy the constraints
// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
if strings.HasPrefix(path, "gopkg.in/") {
return splitGopkgIn(path)
}
i := len(path)
dot := false
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
if path[i-1] == '.' {
dot = true
}
i--
}
if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' {
return path, "", true
}
prefix, pathMajor = path[:i-2], path[i-2:]
if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
return path, "", false
}
return prefix, pathMajor, true
}
// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
if !strings.HasPrefix(path, "gopkg.in/") {
return path, "", false
}
i := len(path)
if strings.HasSuffix(path, "-unstable") {
i -= len("-unstable")
}
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
i--
}
if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
// All gopkg.in paths must end in vN for some N.
return path, "", false
}
prefix, pathMajor = path[:i-2], path[i-2:]
if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
return path, "", false
}
return prefix, pathMajor, true
}
// MatchPathMajor reports whether the semantic version v
// matches the path major version pathMajor.
//
// MatchPathMajor returns true if and only if CheckPathMajor returns nil.
func MatchPathMajor(v, pathMajor string) bool {
return CheckPathMajor(v, pathMajor) == nil
}
// CheckPathMajor returns a non-nil error if the semantic version v
// does not match the path major version pathMajor.
func CheckPathMajor(v, pathMajor string) error {
// TODO(jayconrod): return errors or panic for invalid inputs. This function
// (and others) was covered by integration tests for cmd/go, and surrounding
// code protected against invalid inputs like non-canonical versions.
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
}
if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
// Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
// For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
return nil
}
m := semver.Major(v)
if pathMajor == "" {
if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" {
return nil
}
pathMajor = "v0 or v1"
} else if pathMajor[0] == '/' || pathMajor[0] == '.' {
if m == pathMajor[1:] {
return nil
}
pathMajor = pathMajor[1:]
}
return &InvalidVersionError{
Version: v,
Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)),
}
}
// PathMajorPrefix returns the major-version tag prefix implied by pathMajor.
// An empty PathMajorPrefix allows either v0 or v1.
//
// Note that MatchPathMajor may accept some versions that do not actually begin
// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1'
// pathMajor, even though that pathMajor implies 'v1' tagging.
func PathMajorPrefix(pathMajor string) string {
if pathMajor == "" {
return ""
}
if pathMajor[0] != '/' && pathMajor[0] != '.' {
panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator")
}
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
}
m := pathMajor[1:]
if m != semver.Major(m) {
panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version")
}
return m
}
// CanonicalVersion returns the canonical form of the version string v.
// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
func CanonicalVersion(v string) string {
cv := semver.Canonical(v)
if semver.Build(v) == "+incompatible" {
cv += "+incompatible"
}
return cv
}
// Sort sorts the list by Path, breaking ties by comparing Version fields.
// The Version fields are interpreted as semantic versions (using semver.Compare)
// optionally followed by a tie-breaking suffix introduced by a slash character,
// like in "v0.0.1/go.mod".
func Sort(list []Version) {
sort.Slice(list, func(i, j int) bool {
mi := list[i]
mj := list[j]
if mi.Path != mj.Path {
return mi.Path < mj.Path
}
// To help go.sum formatting, allow version/file.
// Compare semver prefix by semver rules,
// file by string order.
vi := mi.Version
vj := mj.Version
var fi, fj string
if k := strings.Index(vi, "/"); k >= 0 {
vi, fi = vi[:k], vi[k:]
}
if k := strings.Index(vj, "/"); k >= 0 {
vj, fj = vj[:k], vj[k:]
}
if vi != vj {
return semver.Compare(vi, vj) < 0
}
return fi < fj
})
}
// EscapePath returns the escaped form of the given module path.
// It fails if the module path is invalid.
func EscapePath(path string) (escaped string, err error) {
if err := CheckPath(path); err != nil {
return "", err
}
return escapeString(path)
}
// EscapeVersion returns the escaped form of the given module version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func EscapeVersion(v string) (escaped string, err error) {
if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") {
return "", &InvalidVersionError{
Version: v,
Err: fmt.Errorf("disallowed version string"),
}
}
return escapeString(v)
}
func escapeString(s string) (escaped string, err error) {
haveUpper := false
for _, r := range s {
if r == '!' || r >= utf8.RuneSelf {
// This should be disallowed by CheckPath, but diagnose anyway.
// The correctness of the escaping loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EscapePath")
}
if 'A' <= r && r <= 'Z' {
haveUpper = true
}
}
if !haveUpper {
return s, nil
}
var buf []byte
for _, r := range s {
if 'A' <= r && r <= 'Z' {
buf = append(buf, '!', byte(r+'a'-'A'))
} else {
buf = append(buf, byte(r))
}
}
return string(buf), nil
}
// UnescapePath returns the module path for the given escaped path.
// It fails if the escaped path is invalid or describes an invalid path.
func UnescapePath(escaped string) (path string, err error) {
path, ok := unescapeString(escaped)
if !ok {
return "", fmt.Errorf("invalid escaped module path %q", escaped)
}
if err := CheckPath(path); err != nil {
return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err)
}
return path, nil
}
// UnescapeVersion returns the version string for the given escaped version.
// It fails if the escaped form is invalid or describes an invalid version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func UnescapeVersion(escaped string) (v string, err error) {
v, ok := unescapeString(escaped)
if !ok {
return "", fmt.Errorf("invalid escaped version %q", escaped)
}
if err := checkElem(v, filePath); err != nil {
return "", fmt.Errorf("invalid escaped version %q: %v", v, err)
}
return v, nil
}
func unescapeString(escaped string) (string, bool) {
var buf []byte
bang := false
for _, r := range escaped {
if r >= utf8.RuneSelf {
return "", false
}
if bang {
bang = false
if r < 'a' || 'z' < r {
return "", false
}
buf = append(buf, byte(r+'A'-'a'))
continue
}
if r == '!' {
bang = true
continue
}
if 'A' <= r && r <= 'Z' {
return "", false
}
buf = append(buf, byte(r))
}
if bang {
return "", false
}
return string(buf), true
}
// MatchPrefixPatterns reports whether any path prefix of target matches one of
// the glob patterns (as defined by path.Match) in the comma-separated globs
// list. This implements the algorithm used when matching a module path to the
// GOPRIVATE environment variable, as described by 'go help module-private'.
//
// It ignores any empty or malformed patterns in the list.
// Trailing slashes on patterns are ignored.
func MatchPrefixPatterns(globs, target string) bool {
for globs != "" {
// Extract next non-empty glob in comma-separated list.
var glob string
if i := strings.Index(globs, ","); i >= 0 {
glob, globs = globs[:i], globs[i+1:]
} else {
glob, globs = globs, ""
}
glob = strings.TrimSuffix(glob, "/")
if glob == "" {
continue
}
// A glob with N+1 path elements (N slashes) needs to be matched
// against the first N+1 path elements of target,
// which end just before the N+1'th slash.
n := strings.Count(glob, "/")
prefix := target
// Walk target, counting slashes, truncating at the N+1'th slash.
for i := 0; i < len(target); i++ {
if target[i] == '/' {
if n == 0 {
prefix = target[:i]
break
}
n--
}
}
if n > 0 {
// Not enough prefix elements.
continue
}
matched, _ := path.Match(glob, prefix)
if matched {
return true
}
}
return false
}

250
vendor/golang.org/x/mod/module/pseudo.go generated vendored Normal file
View File

@ -0,0 +1,250 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Pseudo-versions
//
// Code authors are expected to tag the revisions they want users to use,
// including prereleases. However, not all authors tag versions at all,
// and not all commits a user might want to try will have tags.
// A pseudo-version is a version with a special form that allows us to
// address an untagged commit and order that version with respect to
// other versions we might encounter.
//
// A pseudo-version takes one of the general forms:
//
// (1) vX.0.0-yyyymmddhhmmss-abcdef123456
// (2) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456
// (3) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible
// (4) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456
// (5) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible
//
// If there is no recently tagged version with the right major version vX,
// then form (1) is used, creating a space of pseudo-versions at the bottom
// of the vX version range, less than any tagged version, including the unlikely v0.0.0.
//
// If the most recent tagged version before the target commit is vX.Y.Z or vX.Y.Z+incompatible,
// then the pseudo-version uses form (2) or (3), making it a prerelease for the next
// possible semantic version after vX.Y.Z. The leading 0 segment in the prerelease string
// ensures that the pseudo-version compares less than possible future explicit prereleases
// like vX.Y.(Z+1)-rc1 or vX.Y.(Z+1)-1.
//
// If the most recent tagged version before the target commit is vX.Y.Z-pre or vX.Y.Z-pre+incompatible,
// then the pseudo-version uses form (4) or (5), making it a slightly later prerelease.
package module
import (
"errors"
"fmt"
"strings"
"time"
"golang.org/x/mod/internal/lazyregexp"
"golang.org/x/mod/semver"
)
var pseudoVersionRE = lazyregexp.New(`^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)\d{14}-[A-Za-z0-9]+(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$`)
const PseudoVersionTimestampFormat = "20060102150405"
// PseudoVersion returns a pseudo-version for the given major version ("v1")
// preexisting older tagged version ("" or "v1.2.3" or "v1.2.3-pre"), revision time,
// and revision identifier (usually a 12-byte commit hash prefix).
func PseudoVersion(major, older string, t time.Time, rev string) string {
if major == "" {
major = "v0"
}
segment := fmt.Sprintf("%s-%s", t.UTC().Format(PseudoVersionTimestampFormat), rev)
build := semver.Build(older)
older = semver.Canonical(older)
if older == "" {
return major + ".0.0-" + segment // form (1)
}
if semver.Prerelease(older) != "" {
return older + ".0." + segment + build // form (4), (5)
}
// Form (2), (3).
// Extract patch from vMAJOR.MINOR.PATCH
i := strings.LastIndex(older, ".") + 1
v, patch := older[:i], older[i:]
// Reassemble.
return v + incDecimal(patch) + "-0." + segment + build
}
// ZeroPseudoVersion returns a pseudo-version with a zero timestamp and
// revision, which may be used as a placeholder.
func ZeroPseudoVersion(major string) string {
return PseudoVersion(major, "", time.Time{}, "000000000000")
}
// incDecimal returns the decimal string incremented by 1.
func incDecimal(decimal string) string {
// Scan right to left turning 9s to 0s until you find a digit to increment.
digits := []byte(decimal)
i := len(digits) - 1
for ; i >= 0 && digits[i] == '9'; i-- {
digits[i] = '0'
}
if i >= 0 {
digits[i]++
} else {
// digits is all zeros
digits[0] = '1'
digits = append(digits, '0')
}
return string(digits)
}
// decDecimal returns the decimal string decremented by 1, or the empty string
// if the decimal is all zeroes.
func decDecimal(decimal string) string {
// Scan right to left turning 0s to 9s until you find a digit to decrement.
digits := []byte(decimal)
i := len(digits) - 1
for ; i >= 0 && digits[i] == '0'; i-- {
digits[i] = '9'
}
if i < 0 {
// decimal is all zeros
return ""
}
if i == 0 && digits[i] == '1' && len(digits) > 1 {
digits = digits[1:]
} else {
digits[i]--
}
return string(digits)
}
// IsPseudoVersion reports whether v is a pseudo-version.
func IsPseudoVersion(v string) bool {
return strings.Count(v, "-") >= 2 && semver.IsValid(v) && pseudoVersionRE.MatchString(v)
}
// IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base,
// timestamp, and revision, as returned by ZeroPseudoVersion.
func IsZeroPseudoVersion(v string) bool {
return v == ZeroPseudoVersion(semver.Major(v))
}
// PseudoVersionTime returns the time stamp of the pseudo-version v.
// It returns an error if v is not a pseudo-version or if the time stamp
// embedded in the pseudo-version is not a valid time.
func PseudoVersionTime(v string) (time.Time, error) {
_, timestamp, _, _, err := parsePseudoVersion(v)
if err != nil {
return time.Time{}, err
}
t, err := time.Parse("20060102150405", timestamp)
if err != nil {
return time.Time{}, &InvalidVersionError{
Version: v,
Pseudo: true,
Err: fmt.Errorf("malformed time %q", timestamp),
}
}
return t, nil
}
// PseudoVersionRev returns the revision identifier of the pseudo-version v.
// It returns an error if v is not a pseudo-version.
func PseudoVersionRev(v string) (rev string, err error) {
_, _, rev, _, err = parsePseudoVersion(v)
return
}
// PseudoVersionBase returns the canonical parent version, if any, upon which
// the pseudo-version v is based.
//
// If v has no parent version (that is, if it is "vX.0.0-[…]"),
// PseudoVersionBase returns the empty string and a nil error.
func PseudoVersionBase(v string) (string, error) {
base, _, _, build, err := parsePseudoVersion(v)
if err != nil {
return "", err
}
switch pre := semver.Prerelease(base); pre {
case "":
// vX.0.0-yyyymmddhhmmss-abcdef123456 → ""
if build != "" {
// Pseudo-versions of the form vX.0.0-yyyymmddhhmmss-abcdef123456+incompatible
// are nonsensical: the "vX.0.0-" prefix implies that there is no parent tag,
// but the "+incompatible" suffix implies that the major version of
// the parent tag is not compatible with the module's import path.
//
// There are a few such entries in the index generated by proxy.golang.org,
// but we believe those entries were generated by the proxy itself.
return "", &InvalidVersionError{
Version: v,
Pseudo: true,
Err: fmt.Errorf("lacks base version, but has build metadata %q", build),
}
}
return "", nil
case "-0":
// vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z
// vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z+incompatible
base = strings.TrimSuffix(base, pre)
i := strings.LastIndexByte(base, '.')
if i < 0 {
panic("base from parsePseudoVersion missing patch number: " + base)
}
patch := decDecimal(base[i+1:])
if patch == "" {
// vX.0.0-0 is invalid, but has been observed in the wild in the index
// generated by requests to proxy.golang.org.
//
// NOTE(bcmills): I cannot find a historical bug that accounts for
// pseudo-versions of this form, nor have I seen such versions in any
// actual go.mod files. If we find actual examples of this form and a
// reasonable theory of how they came into existence, it seems fine to
// treat them as equivalent to vX.0.0 (especially since the invalid
// pseudo-versions have lower precedence than the real ones). For now, we
// reject them.
return "", &InvalidVersionError{
Version: v,
Pseudo: true,
Err: fmt.Errorf("version before %s would have negative patch number", base),
}
}
return base[:i+1] + patch + build, nil
default:
// vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z-pre
// vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z-pre+incompatible
if !strings.HasSuffix(base, ".0") {
panic(`base from parsePseudoVersion missing ".0" before date: ` + base)
}
return strings.TrimSuffix(base, ".0") + build, nil
}
}
var errPseudoSyntax = errors.New("syntax error")
func parsePseudoVersion(v string) (base, timestamp, rev, build string, err error) {
if !IsPseudoVersion(v) {
return "", "", "", "", &InvalidVersionError{
Version: v,
Pseudo: true,
Err: errPseudoSyntax,
}
}
build = semver.Build(v)
v = strings.TrimSuffix(v, build)
j := strings.LastIndex(v, "-")
v, rev = v[:j], v[j+1:]
i := strings.LastIndex(v, "-")
if j := strings.LastIndex(v, "."); j > i {
base = v[:j] // "vX.Y.Z-pre.0" or "vX.Y.(Z+1)-0"
timestamp = v[j+1:]
} else {
base = v[:i] // "vX.0.0"
timestamp = v[i+1:]
}
return base, timestamp, rev, build, nil
}

View File

@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
p.c.L = &p.mu
}
defer p.c.Signal()
if p.err != nil {
if p.err != nil || p.breakErr != nil {
return 0, errClosedPipeWrite
}
if p.breakErr != nil {
p.unread += len(d)
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
}

View File

@ -1822,15 +1822,18 @@ func (sc *serverConn) processData(f *DataFrame) error {
}
if len(data) > 0 {
st.bodyBytes += int64(len(data))
wrote, err := st.body.Write(data)
if err != nil {
// The handler has closed the request body.
// Return the connection-level flow control for the discarded data,
// but not the stream-level flow control.
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
return nil
}
if wrote != len(data) {
panic("internal error: bad Writer")
}
st.bodyBytes += int64(len(data))
}
// Return any padded flow control now, since we won't

View File

@ -560,10 +560,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
traceGotConn(req, cc, reused)
res, err := cc.RoundTrip(req)
if err != nil && retry <= 6 {
roundTripErr := err
if req, err = shouldRetryRequest(req, err); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
}
backoff := float64(uint(1) << (uint(retry) - 1))
@ -572,7 +573,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
timer := backoffNewTimer(d)
select {
case <-timer.C:
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
timer.Stop()
@ -1265,6 +1266,27 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return res, nil
}
cancelRequest := func(cs *clientStream, err error) error {
cs.cc.mu.Lock()
defer cs.cc.mu.Unlock()
cs.abortStreamLocked(err)
if cs.ID != 0 {
// This request may have failed because of a problem with the connection,
// or for some unrelated reason. (For example, the user might have canceled
// the request without waiting for a response.) Mark the connection as
// not reusable, since trying to reuse a dead connection is worse than
// unnecessarily creating a new one.
//
// If cs.ID is 0, then the request was never allocated a stream ID and
// whatever went wrong was unrelated to the connection. We might have
// timed out waiting for a stream slot when StrictMaxConcurrentStreams
// is set, for example, in which case retrying on a different connection
// will not help.
cs.cc.doNotReuse = true
}
return err
}
for {
select {
case <-cs.respHeaderRecv:
@ -1279,15 +1301,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return handleResponseHeaders()
default:
waitDone()
return nil, cs.abortErr
return nil, cancelRequest(cs, cs.abortErr)
}
case <-ctx.Done():
err := ctx.Err()
cs.abortStream(err)
return nil, err
return nil, cancelRequest(cs, ctx.Err())
case <-cs.reqCancel:
cs.abortStream(errRequestCanceled)
return nil, errRequestCanceled
return nil, cancelRequest(cs, errRequestCanceled)
}
}
}
@ -2555,6 +2574,9 @@ func (b transportResponseBody) Close() error {
cs := b.cs
cc := cs.cc
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
unread := cs.bufPipe.Len()
if unread > 0 {
cc.mu.Lock()
@ -2573,9 +2595,6 @@ func (b transportResponseBody) Close() error {
cc.wmu.Unlock()
}
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
select {
case <-cs.donec:
case <-cs.ctx.Done():

View File

@ -289,7 +289,7 @@ func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter,
case AuthMethodNotRequired:
return nil
case AuthMethodUsernamePassword:
if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 {
if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) > 255 {
return errors.New("invalid username/password")
}
b := []byte{authUsernamePasswordVersion}

View File

@ -20,7 +20,7 @@ type token struct{}
// A zero Group is valid, has no limit on the number of active goroutines,
// and does not cancel on error.
type Group struct {
cancel func()
cancel func(error)
wg sync.WaitGroup
@ -43,7 +43,7 @@ func (g *Group) done() {
// returns a non-nil error or the first time Wait returns, whichever occurs
// first.
func WithContext(ctx context.Context) (*Group, context.Context) {
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := withCancelCause(ctx)
return &Group{cancel: cancel}, ctx
}
@ -52,7 +52,7 @@ func WithContext(ctx context.Context) (*Group, context.Context) {
func (g *Group) Wait() error {
g.wg.Wait()
if g.cancel != nil {
g.cancel()
g.cancel(g.err)
}
return g.err
}
@ -76,7 +76,7 @@ func (g *Group) Go(f func() error) {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel()
g.cancel(g.err)
}
})
}
@ -105,7 +105,7 @@ func (g *Group) TryGo(f func() error) bool {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel()
g.cancel(g.err)
}
})
}

14
vendor/golang.org/x/sync/errgroup/go120.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.20
// +build go1.20
package errgroup
import "context"
func withCancelCause(parent context.Context) (context.Context, func(error)) {
return context.WithCancelCause(parent)
}

15
vendor/golang.org/x/sync/errgroup/pre_go120.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.20
// +build !go1.20
package errgroup
import "context"
func withCancelCause(parent context.Context) (context.Context, func(error)) {
ctx, cancel := context.WithCancel(parent)
return ctx, func(error) { cancel() }
}

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm
// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh wasm
package cpu

View File

@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
$cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && /bin/pwd):/build generate:$GOOS
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
exit
fi

Some files were not shown because too many files have changed in this diff Show More