Merge pull request #5731 from alakesh/opentelemetry-basic
[otel-tracing] Initial opentelemetry support
This commit is contained in:
commit
5b76d08f4c
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@ -235,10 +235,6 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-18.04, macos-10.15, windows-2019]
|
||||
go-version: ['1.16.6']
|
||||
include:
|
||||
# Go 1.13.x is still used by Docker/Moby
|
||||
- go-version: '1.13.x'
|
||||
os: ubuntu-18.04
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
|
@ -34,10 +34,12 @@ import (
|
||||
"github.com/containerd/containerd/services/server"
|
||||
srvconfig "github.com/containerd/containerd/services/server/config"
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/containerd/containerd/tracing"
|
||||
"github.com/containerd/containerd/version"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"go.opentelemetry.io/otel"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
@ -130,6 +132,20 @@ can be used and modified as necessary as a custom configuration.`
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize OpenTelemetry tracing
|
||||
shutdown, err := tracing.InitOpenTelemetry(config)
|
||||
if err != nil {
|
||||
errors.Wrap(err, "failed to initialize OpenTelemetry tracing")
|
||||
}
|
||||
if shutdown != nil {
|
||||
defer shutdown()
|
||||
}
|
||||
|
||||
// Get a tracer
|
||||
ctrdTracer := otel.Tracer("containerd")
|
||||
ctx, mainCtrdSpan := ctrdTracer.Start(ctx, "containerd-exporter")
|
||||
defer mainCtrdSpan.End()
|
||||
|
||||
// Make sure top-level directories are created early.
|
||||
if err := server.CreateTopLevelDirectories(config); err != nil {
|
||||
return err
|
||||
@ -243,6 +259,9 @@ can be used and modified as necessary as a custom configuration.`
|
||||
func serve(ctx gocontext.Context, l net.Listener, serveFunc func(net.Listener) error) {
|
||||
path := l.Addr().String()
|
||||
log.G(ctx).WithField("address", path).Info("serving...")
|
||||
serveSpan, ctx := tracing.StartSpan(ctx, l.Addr().String())
|
||||
defer tracing.StopSpan(serveSpan)
|
||||
|
||||
go func() {
|
||||
defer l.Close()
|
||||
if err := serveFunc(l); err != nil {
|
||||
|
10
go.mod
10
go.mod
@ -30,8 +30,9 @@ require (
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
github.com/gogo/googleapis v1.4.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/google/go-cmp v0.5.5
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/imdario/mergo v0.3.12
|
||||
@ -51,10 +52,15 @@ require (
|
||||
github.com/prometheus/procfs v0.6.0 // indirect; temporarily force v0.6.0, which was previously defined in imgcrypt as explicit version
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tchap/go-patricia v2.2.6+incompatible
|
||||
github.com/urfave/cli v1.22.2
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0
|
||||
go.opentelemetry.io/otel v1.0.0-RC1
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC1
|
||||
go.opentelemetry.io/otel/sdk v1.0.0-RC1
|
||||
go.opentelemetry.io/otel/trace v1.0.0-RC1
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||
|
44
go.sum
44
go.sum
@ -9,6 +9,7 @@ cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
@ -55,6 +56,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@ -66,6 +68,8 @@ github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjL
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
@ -236,8 +240,9 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@ -247,8 +252,9 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@ -272,11 +278,14 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
@ -420,6 +429,7 @@ github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3x
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@ -462,8 +472,9 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck=
|
||||
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
|
||||
@ -492,8 +503,28 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/contrib v0.21.0 h1:RMJ6GlUVzLYp/zmItxTTdAmr1gnpO/HHMFmvjAhvJQM=
|
||||
go.opentelemetry.io/contrib v0.21.0/go.mod h1:EH4yDYeNoaTqn/8yCWQmfNB78VHfGX2Jt2bvnvzBlGM=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0 h1:68WZYF6CrnsXIVDYc51cR9VmTX2IM7y0svo7s4lu5kQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0/go.mod h1:Vm5u/mtkj1OMhtao0v+BGo2LUoLCgHYXvRmj0jWITlE=
|
||||
go.opentelemetry.io/otel v1.0.0-RC1 h1:4CeoX93DNTWt8awGK9JmNXzF9j7TyOu9upscEdtcdXc=
|
||||
go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.0-RC1 h1:GHKxjc4EDldz8ScMDpiNwX4BAub6wGFUUo5Axm2BimU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.0-RC1/go.mod h1:FliQjImlo7emZVjixV8nbDMAa4iAkcWTE9zzSEOiEPw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC1 h1:ZOQXuxKJ9evGspu3LvbZxx3KOOQvKAPBJVMOfGf1cOM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC1/go.mod h1:cDwRc2Jrh5Gku1peGK8p9rRuX/Uq2OtVmLicjlw2WYU=
|
||||
go.opentelemetry.io/otel/oteltest v1.0.0-RC1 h1:G685iP3XiskCwk/z0eIabL55XUl2gk0cljhGk9sB0Yk=
|
||||
go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4=
|
||||
go.opentelemetry.io/otel/sdk v1.0.0-RC1 h1:Sy2VLOOg24bipyC29PhuMXYNJrLsxkie8hyI7kUlG9Q=
|
||||
go.opentelemetry.io/otel/sdk v1.0.0-RC1/go.mod h1:kj6yPn7Pgt5ByRuwesbaWcRLA+V7BSDg3Hf8xRvsvf8=
|
||||
go.opentelemetry.io/otel/trace v1.0.0-RC1 h1:jrjqKJZEibFrDz+umEASeU3LvdVyWKlnTh7XEfwrT58=
|
||||
go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg=
|
||||
go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4=
|
||||
go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -526,6 +557,7 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
@ -559,6 +591,7 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
@ -665,6 +698,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
@ -681,6 +715,7 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -714,7 +749,9 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
@ -750,6 +787,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -57,6 +57,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@ -67,6 +68,7 @@ github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjL
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
@ -224,8 +226,9 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@ -235,8 +238,9 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
@ -263,6 +267,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
@ -393,6 +398,7 @@ github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3x
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
||||
@ -429,8 +435,9 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
@ -455,8 +462,18 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/contrib v0.21.0/go.mod h1:EH4yDYeNoaTqn/8yCWQmfNB78VHfGX2Jt2bvnvzBlGM=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0/go.mod h1:Vm5u/mtkj1OMhtao0v+BGo2LUoLCgHYXvRmj0jWITlE=
|
||||
go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.0-RC1/go.mod h1:FliQjImlo7emZVjixV8nbDMAa4iAkcWTE9zzSEOiEPw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC1/go.mod h1:cDwRc2Jrh5Gku1peGK8p9rRuX/Uq2OtVmLicjlw2WYU=
|
||||
go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4=
|
||||
go.opentelemetry.io/otel/sdk v1.0.0-RC1/go.mod h1:kj6yPn7Pgt5ByRuwesbaWcRLA+V7BSDg3Hf8xRvsvf8=
|
||||
go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg=
|
||||
go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -521,6 +538,7 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
@ -625,6 +643,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
@ -674,7 +693,9 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
@ -707,6 +728,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -67,6 +67,8 @@ type Config struct {
|
||||
Timeouts map[string]string `toml:"timeouts"`
|
||||
// Imports are additional file path list to config files that can overwrite main config file fields
|
||||
Imports []string `toml:"imports"`
|
||||
// OpenTelemetry configuration
|
||||
OpenTelemetry OpenTelemetryConfig `toml:"otel"`
|
||||
|
||||
StreamProcessors map[string]StreamProcessor `toml:"stream_processors"`
|
||||
}
|
||||
@ -165,6 +167,14 @@ type ProxyPlugin struct {
|
||||
Address string `toml:"address"`
|
||||
}
|
||||
|
||||
// OpenTelemetryConfig provides open telemetry configuration
|
||||
type OpenTelemetryConfig struct {
|
||||
ServiceName string `toml:"service_name"`
|
||||
ExporterName string `toml:"exporter_name"`
|
||||
ExporterEndpoint string `toml:"exporter_endpoint"`
|
||||
TraceSamplingRatio float64 `toml:"trace_sampling_ratio"`
|
||||
}
|
||||
|
||||
// BoltConfig defines the configuration values for the bolt plugin, which is
|
||||
// loaded here, rather than back registered in the metadata package.
|
||||
type BoltConfig struct {
|
||||
@ -203,6 +213,24 @@ func (bc *BoltConfig) Validate() error {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ExporterTypeOTLP represents the open telemetry exporter OTLP
|
||||
ExporterTypeOTLP = "otlp"
|
||||
)
|
||||
|
||||
// Validate OpenTelemetry config
|
||||
func (cfg *OpenTelemetryConfig) Validate() error {
|
||||
switch cfg.ExporterName {
|
||||
case ExporterTypeOTLP:
|
||||
if cfg.ServiceName == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "missing service name in config %+v", cfg)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "unsupported exporter: %+v", cfg)
|
||||
}
|
||||
}
|
||||
|
||||
// Decode unmarshals a plugin specific configuration by plugin id
|
||||
func (c *Config) Decode(p *plugin.Registration) (interface{}, error) {
|
||||
id := p.URI()
|
||||
|
@ -51,9 +51,11 @@ import (
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/containerd/ttrpc"
|
||||
metrics "github.com/docker/go-metrics"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/credentials"
|
||||
@ -98,8 +100,14 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) {
|
||||
}
|
||||
|
||||
serverOpts := []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
|
||||
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
|
||||
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
|
||||
otelgrpc.StreamServerInterceptor(),
|
||||
grpc.StreamServerInterceptor(grpc_prometheus.StreamServerInterceptor),
|
||||
)),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
|
||||
otelgrpc.UnaryServerInterceptor(),
|
||||
grpc.UnaryServerInterceptor(grpc_prometheus.UnaryServerInterceptor),
|
||||
)),
|
||||
}
|
||||
if config.GRPC.MaxRecvMsgSize > 0 {
|
||||
serverOpts = append(serverOpts, grpc.MaxRecvMsgSize(config.GRPC.MaxRecvMsgSize))
|
||||
|
109
tracing/tracing.go
Normal file
109
tracing/tracing.go
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
srvconfig "github.com/containerd/containerd/services/server/config"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// InitOpenTelemetry reads config and initializes otel middleware, sets the exporter
|
||||
// propagator and global tracer provider
|
||||
func InitOpenTelemetry(config *srvconfig.Config) (func(), error) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Check if tracing is configured
|
||||
if config.OpenTelemetry == (srvconfig.OpenTelemetryConfig{}) {
|
||||
logrus.Info("OpenTelemetry configuration not found, tracing is disabled")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Validate configuration
|
||||
if err := config.OpenTelemetry.Validate(); err != nil {
|
||||
return nil, errors.Wrap(err, "invalid open telemetry configuration")
|
||||
}
|
||||
|
||||
res, err := resource.New(ctx,
|
||||
resource.WithAttributes(
|
||||
// Service name used to displace traces in backends
|
||||
semconv.ServiceNameKey.String(config.OpenTelemetry.ServiceName),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create resource")
|
||||
}
|
||||
|
||||
// Configure OTLP trace exporter and set it up to connect to OpenTelemetry collector
|
||||
// running on a local host.
|
||||
ctrdTraceExporter, err := otlptracegrpc.New(ctx,
|
||||
otlptracegrpc.WithEndpoint(config.OpenTelemetry.ExporterEndpoint),
|
||||
otlptracegrpc.WithDialOption(grpc.WithBlock()),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create trace exporter")
|
||||
}
|
||||
|
||||
// Register the trace exporter with a TracerProvider, using a batch span
|
||||
// process to aggregate spans before export.
|
||||
ctrdBatchSpanProcessor := sdktrace.NewBatchSpanProcessor(ctrdTraceExporter)
|
||||
ctrdTracerProvider := sdktrace.NewTracerProvider(
|
||||
// We use TraceIDRatioBased sampling. Ratio read from config translated into following
|
||||
// if sampling ratio < 0 it is interpreted as 0. If ratio >= 1, it will always sample.
|
||||
sdktrace.WithSampler(sdktrace.TraceIDRatioBased(config.OpenTelemetry.TraceSamplingRatio)),
|
||||
sdktrace.WithResource(res),
|
||||
sdktrace.WithSpanProcessor(ctrdBatchSpanProcessor),
|
||||
)
|
||||
otel.SetTracerProvider(ctrdTracerProvider)
|
||||
|
||||
// set global propagator to tracecontext
|
||||
otel.SetTextMapPropagator(propagation.TraceContext{})
|
||||
|
||||
return func() {
|
||||
// Shutdown will flush any remaining spans and shut down the exporter.
|
||||
err := ctrdTracerProvider.Shutdown(ctx)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("failed to shutdown TracerProvider")
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// StartSpan starts child span in a context.
|
||||
func StartSpan(ctx context.Context, opName string, opts ...trace.SpanStartOption) (trace.Span, context.Context) {
|
||||
parentSpan := trace.SpanFromContext(ctx)
|
||||
tracer := trace.NewNoopTracerProvider().Tracer("")
|
||||
if parentSpan.SpanContext().IsValid() {
|
||||
tracer = parentSpan.TracerProvider().Tracer("")
|
||||
}
|
||||
ctx, span := tracer.Start(ctx, opName, opts...)
|
||||
return span, ctx
|
||||
}
|
||||
|
||||
// StopSpan ends the span specified
|
||||
func StopSpan(span trace.Span) {
|
||||
span.End()
|
||||
}
|
25
vendor/github.com/cenkalti/backoff/v4/.gitignore
generated
vendored
Normal file
25
vendor/github.com/cenkalti/backoff/v4/.gitignore
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
# IDEs
|
||||
.idea/
|
10
vendor/github.com/cenkalti/backoff/v4/.travis.yml
generated
vendored
Normal file
10
vendor/github.com/cenkalti/backoff/v4/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.13
|
||||
- 1.x
|
||||
- tip
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci
|
20
vendor/github.com/cenkalti/backoff/v4/LICENSE
generated
vendored
Normal file
20
vendor/github.com/cenkalti/backoff/v4/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Cenk Altı
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
32
vendor/github.com/cenkalti/backoff/v4/README.md
generated
vendored
Normal file
32
vendor/github.com/cenkalti/backoff/v4/README.md
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
|
||||
|
||||
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||
|
||||
[Exponential backoff][exponential backoff wiki]
|
||||
is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
|
||||
in order to gradually find an acceptable rate.
|
||||
The retries exponentially increase and stop increasing when a certain threshold is met.
|
||||
|
||||
## Usage
|
||||
|
||||
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
|
||||
|
||||
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
|
||||
## Contributing
|
||||
|
||||
* I would like to keep this library as small as possible.
|
||||
* Please don't send a PR without opening an issue and discussing it first.
|
||||
* If proposed change is not a common use case, I will probably not accept it.
|
||||
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
|
||||
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||
[travis]: https://travis-ci.org/cenkalti/backoff
|
||||
[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
|
||||
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||
|
||||
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||
|
||||
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
|
66
vendor/github.com/cenkalti/backoff/v4/backoff.go
generated
vendored
Normal file
66
vendor/github.com/cenkalti/backoff/v4/backoff.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Package backoff implements backoff algorithms for retrying operations.
|
||||
//
|
||||
// Use Retry function for retrying operations that may fail.
|
||||
// If Retry does not meet your needs,
|
||||
// copy/paste the function into your project and modify as you wish.
|
||||
//
|
||||
// There is also Ticker type similar to time.Ticker.
|
||||
// You can use it if you need to work with channels.
|
||||
//
|
||||
// See Examples section below for usage examples.
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
// BackOff is a backoff policy for retrying an operation.
|
||||
type BackOff interface {
|
||||
// NextBackOff returns the duration to wait before retrying the operation,
|
||||
// or backoff. Stop to indicate that no more retries should be made.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// duration := backoff.NextBackOff();
|
||||
// if (duration == backoff.Stop) {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
//
|
||||
NextBackOff() time.Duration
|
||||
|
||||
// Reset to initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// Stop indicates that no more retries should be made for use in NextBackOff().
|
||||
const Stop time.Duration = -1
|
||||
|
||||
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
|
||||
// meaning that the operation is retried immediately without waiting, indefinitely.
|
||||
type ZeroBackOff struct{}
|
||||
|
||||
func (b *ZeroBackOff) Reset() {}
|
||||
|
||||
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
|
||||
|
||||
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
|
||||
// NextBackOff(), meaning that the operation should never be retried.
|
||||
type StopBackOff struct{}
|
||||
|
||||
func (b *StopBackOff) Reset() {}
|
||||
|
||||
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
|
||||
|
||||
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
|
||||
// This is in contrast to an exponential backoff policy,
|
||||
// which returns a delay that grows longer as you call NextBackOff() over and over again.
|
||||
type ConstantBackOff struct {
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (b *ConstantBackOff) Reset() {}
|
||||
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
|
||||
|
||||
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
|
||||
return &ConstantBackOff{Interval: d}
|
||||
}
|
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
Normal file
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type backOffContext struct {
|
||||
BackOff
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
||||
if b, ok := b.(*backOffContext); ok {
|
||||
return &backOffContext{
|
||||
BackOff: b.BackOff,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
return &backOffContext{
|
||||
BackOff: b,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func getContext(b BackOff) context.Context {
|
||||
if cb, ok := b.(BackOffContext); ok {
|
||||
return cb.Context()
|
||||
}
|
||||
if tb, ok := b.(*backOffTries); ok {
|
||||
return getContext(tb.delegate)
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (b *backOffContext) Context() context.Context {
|
||||
return b.ctx
|
||||
}
|
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
return Stop
|
||||
default:
|
||||
return b.BackOff.NextBackOff()
|
||||
}
|
||||
}
|
158
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
Normal file
158
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
// After MaxElapsedTime the ExponentialBackOff returns Stop.
|
||||
// It never stops if MaxElapsedTime == 0.
|
||||
MaxElapsedTime time.Duration
|
||||
Stop time.Duration
|
||||
Clock Clock
|
||||
|
||||
currentInterval time.Duration
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
DefaultMaxElapsedTime = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Stop: Stop,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
||||
func (t systemClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SystemClock implements Clock interface that uses time.Now().
|
||||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
elapsed := b.GetElapsedTime()
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
|
||||
return b.Stop
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
// is created and is reset when Reset() is called.
|
||||
//
|
||||
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||
// safe to call even while the backoff policy is used by a running
|
||||
// ticker.
|
||||
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||
return b.Clock.Now().Sub(b.startTime)
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
3
vendor/github.com/cenkalti/backoff/v4/go.mod
generated
vendored
Normal file
3
vendor/github.com/cenkalti/backoff/v4/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/cenkalti/backoff/v4
|
||||
|
||||
go 1.13
|
112
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
Normal file
112
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error
|
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
// NOTE that if the backoff policy stated to stop retrying,
|
||||
// the notify function isn't called.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error {
|
||||
return RetryNotify(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||
var err error
|
||||
var next time.Duration
|
||||
if t == nil {
|
||||
t = &defaultTimer{}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
|
||||
ctx := getContext(b)
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
if err = operation(); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return permanent.Err
|
||||
}
|
||||
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
if cerr := ctx.Err(); cerr != nil {
|
||||
return cerr
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if notify != nil {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
t.Start(next)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e *PermanentError) Is(target error) bool {
|
||||
_, ok := target.(*PermanentError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
97
vendor/github.com/cenkalti/backoff/v4/ticker.go
generated
vendored
Normal file
97
vendor/github.com/cenkalti/backoff/v4/ticker.go
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
|
||||
//
|
||||
// Ticks will continue to arrive when the previous operation is still running,
|
||||
// so operations that take a while to fail could run in quick succession.
|
||||
type Ticker struct {
|
||||
C <-chan time.Time
|
||||
c chan time.Time
|
||||
b BackOff
|
||||
ctx context.Context
|
||||
timer Timer
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
// NewTicker returns a new Ticker containing a channel that will send
|
||||
// the time at times specified by the BackOff argument. Ticker is
|
||||
// guaranteed to tick at least once. The channel is closed when Stop
|
||||
// method is called or BackOff stops. It is not safe to manipulate the
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker {
|
||||
return NewTickerWithTimer(b, &defaultTimer{})
|
||||
}
|
||||
|
||||
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
|
||||
if timer == nil {
|
||||
timer = &defaultTimer{}
|
||||
}
|
||||
c := make(chan time.Time)
|
||||
t := &Ticker{
|
||||
C: c,
|
||||
c: c,
|
||||
b: b,
|
||||
ctx: getContext(b),
|
||||
timer: timer,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.b.Reset()
|
||||
go t.run()
|
||||
return t
|
||||
}
|
||||
|
||||
// Stop turns off a ticker. After Stop, no more ticks will be sent.
|
||||
func (t *Ticker) Stop() {
|
||||
t.stopOnce.Do(func() { close(t.stop) })
|
||||
}
|
||||
|
||||
func (t *Ticker) run() {
|
||||
c := t.c
|
||||
defer close(c)
|
||||
|
||||
// Ticker is guaranteed to tick at least once.
|
||||
afterC := t.send(time.Now())
|
||||
|
||||
for {
|
||||
if afterC == nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case tick := <-afterC:
|
||||
afterC = t.send(tick)
|
||||
case <-t.stop:
|
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Ticker) send(tick time.Time) <-chan time.Time {
|
||||
select {
|
||||
case t.c <- tick:
|
||||
case <-t.stop:
|
||||
return nil
|
||||
}
|
||||
|
||||
next := t.b.NextBackOff()
|
||||
if next == Stop {
|
||||
t.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
t.timer.Start(next)
|
||||
return t.timer.C()
|
||||
}
|
35
vendor/github.com/cenkalti/backoff/v4/timer.go
generated
vendored
Normal file
35
vendor/github.com/cenkalti/backoff/v4/timer.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
type Timer interface {
|
||||
Start(duration time.Duration)
|
||||
Stop()
|
||||
C() <-chan time.Time
|
||||
}
|
||||
|
||||
// defaultTimer implements Timer interface using time.Timer
|
||||
type defaultTimer struct {
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// C returns the timers channel which receives the current time when the timer fires.
|
||||
func (t *defaultTimer) C() <-chan time.Time {
|
||||
return t.timer.C
|
||||
}
|
||||
|
||||
// Start starts the timer to fire after the given duration
|
||||
func (t *defaultTimer) Start(duration time.Duration) {
|
||||
if t.timer == nil {
|
||||
t.timer = time.NewTimer(duration)
|
||||
} else {
|
||||
t.timer.Reset(duration)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop is called when the timer is not used anymore and resources may be freed.
|
||||
func (t *defaultTimer) Stop() {
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
}
|
||||
}
|
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
Normal file
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
/*
|
||||
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||
return Stop if NextBackOff() has been called too many times since
|
||||
the last time Reset() was called
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||
return &backOffTries{delegate: b, maxTries: max}
|
||||
}
|
||||
|
||||
type backOffTries struct {
|
||||
delegate BackOff
|
||||
maxTries uint64
|
||||
numTries uint64
|
||||
}
|
||||
|
||||
func (b *backOffTries) NextBackOff() time.Duration {
|
||||
if b.maxTries == 0 {
|
||||
return Stop
|
||||
}
|
||||
if b.maxTries > 0 {
|
||||
if b.maxTries <= b.numTries {
|
||||
return Stop
|
||||
}
|
||||
b.numTries++
|
||||
}
|
||||
return b.delegate.NextBackOff()
|
||||
}
|
||||
|
||||
func (b *backOffTries) Reset() {
|
||||
b.numTries = 0
|
||||
b.delegate.Reset()
|
||||
}
|
180
vendor/github.com/golang/protobuf/descriptor/descriptor.go
generated
vendored
Normal file
180
vendor/github.com/golang/protobuf/descriptor/descriptor.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package descriptor provides functions for obtaining the protocol buffer
|
||||
// descriptors of generated Go types.
|
||||
//
|
||||
// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package
|
||||
// for how to obtain an EnumDescriptor or MessageDescriptor in order to
|
||||
// programatically interact with the protobuf type system.
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protodesc"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
|
||||
descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||
)
|
||||
|
||||
// Message is proto.Message with a method to return its descriptor.
|
||||
//
|
||||
// Deprecated: The Descriptor method may not be generated by future
|
||||
// versions of protoc-gen-go, meaning that this interface may not
|
||||
// be implemented by many concrete message types.
|
||||
type Message interface {
|
||||
proto.Message
|
||||
Descriptor() ([]byte, []int)
|
||||
}
|
||||
|
||||
// ForMessage returns the file descriptor proto containing
|
||||
// the message and the message descriptor proto for the message itself.
|
||||
// The returned proto messages must not be mutated.
|
||||
//
|
||||
// Deprecated: Not all concrete message types satisfy the Message interface.
|
||||
// Use MessageDescriptorProto instead. If possible, the calling code should
|
||||
// be rewritten to use protobuf reflection instead.
|
||||
// See package "google.golang.org/protobuf/reflect/protoreflect" for details.
|
||||
func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
|
||||
return MessageDescriptorProto(m)
|
||||
}
|
||||
|
||||
type rawDesc struct {
|
||||
fileDesc []byte
|
||||
indexes []int
|
||||
}
|
||||
|
||||
var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc
|
||||
|
||||
func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) {
|
||||
// Fast-path: check whether raw descriptors are already cached.
|
||||
origDesc := d
|
||||
if v, ok := rawDescCache.Load(origDesc); ok {
|
||||
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
|
||||
}
|
||||
|
||||
// Slow-path: derive the raw descriptor from the v2 descriptor.
|
||||
|
||||
// Start with the leaf (a given enum or message declaration) and
|
||||
// ascend upwards until we hit the parent file descriptor.
|
||||
var idxs []int
|
||||
for {
|
||||
idxs = append(idxs, d.Index())
|
||||
d = d.Parent()
|
||||
if d == nil {
|
||||
// TODO: We could construct a FileDescriptor stub for standalone
|
||||
// descriptors to satisfy the API.
|
||||
return nil, nil
|
||||
}
|
||||
if _, ok := d.(protoreflect.FileDescriptor); ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Obtain the raw file descriptor.
|
||||
fd := d.(protoreflect.FileDescriptor)
|
||||
b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd))
|
||||
file := protoimpl.X.CompressGZIP(b)
|
||||
|
||||
// Reverse the indexes, since we populated it in reverse.
|
||||
for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 {
|
||||
idxs[i], idxs[j] = idxs[j], idxs[i]
|
||||
}
|
||||
|
||||
if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok {
|
||||
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
|
||||
}
|
||||
return file, idxs
|
||||
}
|
||||
|
||||
// EnumRawDescriptor returns the GZIP'd raw file descriptor representing
|
||||
// the enum and the index path to reach the enum declaration.
|
||||
// The returned slices must not be mutated.
|
||||
func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) {
|
||||
if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok {
|
||||
return ev.EnumDescriptor()
|
||||
}
|
||||
ed := protoimpl.X.EnumTypeOf(e)
|
||||
return deriveRawDescriptor(ed.Descriptor())
|
||||
}
|
||||
|
||||
// MessageRawDescriptor returns the GZIP'd raw file descriptor representing
|
||||
// the message and the index path to reach the message declaration.
|
||||
// The returned slices must not be mutated.
|
||||
func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) {
|
||||
if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok {
|
||||
return mv.Descriptor()
|
||||
}
|
||||
md := protoimpl.X.MessageTypeOf(m)
|
||||
return deriveRawDescriptor(md.Descriptor())
|
||||
}
|
||||
|
||||
var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto
|
||||
|
||||
func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto {
|
||||
// Fast-path: check whether descriptor protos are already cached.
|
||||
if v, ok := fileDescCache.Load(&rawDesc[0]); ok {
|
||||
return v.(*descriptorpb.FileDescriptorProto)
|
||||
}
|
||||
|
||||
// Slow-path: derive the descriptor proto from the GZIP'd message.
|
||||
zr, err := gzip.NewReader(bytes.NewReader(rawDesc))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(zr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fd := new(descriptorpb.FileDescriptorProto)
|
||||
if err := proto.Unmarshal(b, fd); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok {
|
||||
return v.(*descriptorpb.FileDescriptorProto)
|
||||
}
|
||||
return fd
|
||||
}
|
||||
|
||||
// EnumDescriptorProto returns the file descriptor proto representing
|
||||
// the enum and the enum descriptor proto for the enum itself.
|
||||
// The returned proto messages must not be mutated.
|
||||
func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) {
|
||||
rawDesc, idxs := EnumRawDescriptor(e)
|
||||
if rawDesc == nil || idxs == nil {
|
||||
return nil, nil
|
||||
}
|
||||
fd := deriveFileDescriptor(rawDesc)
|
||||
if len(idxs) == 1 {
|
||||
return fd, fd.EnumType[idxs[0]]
|
||||
}
|
||||
md := fd.MessageType[idxs[0]]
|
||||
for _, i := range idxs[1 : len(idxs)-1] {
|
||||
md = md.NestedType[i]
|
||||
}
|
||||
ed := md.EnumType[idxs[len(idxs)-1]]
|
||||
return fd, ed
|
||||
}
|
||||
|
||||
// MessageDescriptorProto returns the file descriptor proto representing
|
||||
// the message and the message descriptor proto for the message itself.
|
||||
// The returned proto messages must not be mutated.
|
||||
func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
|
||||
rawDesc, idxs := MessageRawDescriptor(m)
|
||||
if rawDesc == nil || idxs == nil {
|
||||
return nil, nil
|
||||
}
|
||||
fd := deriveFileDescriptor(rawDesc)
|
||||
md := fd.MessageType[idxs[0]]
|
||||
for _, i := range idxs[1:] {
|
||||
md = md.NestedType[i]
|
||||
}
|
||||
return fd, md
|
||||
}
|
524
vendor/github.com/golang/protobuf/jsonpb/decode.go
generated
vendored
Normal file
524
vendor/github.com/golang/protobuf/jsonpb/decode.go
generated
vendored
Normal file
@ -0,0 +1,524 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jsonpb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapJSONUnmarshalV2 = false
|
||||
|
||||
// UnmarshalNext unmarshals the next JSON object from d into m.
|
||||
func UnmarshalNext(d *json.Decoder, m proto.Message) error {
|
||||
return new(Unmarshaler).UnmarshalNext(d, m)
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a JSON object from r into m.
|
||||
func Unmarshal(r io.Reader, m proto.Message) error {
|
||||
return new(Unmarshaler).Unmarshal(r, m)
|
||||
}
|
||||
|
||||
// UnmarshalString unmarshals a JSON object from s into m.
|
||||
func UnmarshalString(s string, m proto.Message) error {
|
||||
return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
|
||||
}
|
||||
|
||||
// Unmarshaler is a configurable object for converting from a JSON
|
||||
// representation to a protocol buffer object.
|
||||
type Unmarshaler struct {
|
||||
// AllowUnknownFields specifies whether to allow messages to contain
|
||||
// unknown JSON fields, as opposed to failing to unmarshal.
|
||||
AllowUnknownFields bool
|
||||
|
||||
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
||||
// If unset, the global registry is used by default.
|
||||
AnyResolver AnyResolver
|
||||
}
|
||||
|
||||
// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
|
||||
// they are unmarshaled from JSON. Messages that implement this should also
|
||||
// implement JSONPBMarshaler so that the custom format can be produced.
|
||||
//
|
||||
// The JSON unmarshaling must follow the JSON to proto specification:
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||
//
|
||||
// Deprecated: Custom types should implement protobuf reflection instead.
|
||||
type JSONPBUnmarshaler interface {
|
||||
UnmarshalJSONPB(*Unmarshaler, []byte) error
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a JSON object from r into m.
|
||||
func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
|
||||
return u.UnmarshalNext(json.NewDecoder(r), m)
|
||||
}
|
||||
|
||||
// UnmarshalNext unmarshals the next JSON object from d into m.
|
||||
func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
|
||||
if m == nil {
|
||||
return errors.New("invalid nil message")
|
||||
}
|
||||
|
||||
// Parse the next JSON object from the stream.
|
||||
raw := json.RawMessage{}
|
||||
if err := d.Decode(&raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for custom unmarshalers first since they may not properly
|
||||
// implement protobuf reflection that the logic below relies on.
|
||||
if jsu, ok := m.(JSONPBUnmarshaler); ok {
|
||||
return jsu.UnmarshalJSONPB(u, raw)
|
||||
}
|
||||
|
||||
mr := proto.MessageReflect(m)
|
||||
|
||||
// NOTE: For historical reasons, a top-level null is treated as a noop.
|
||||
// This is incorrect, but kept for compatibility.
|
||||
if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if wrapJSONUnmarshalV2 {
|
||||
// NOTE: If input message is non-empty, we need to preserve merge semantics
|
||||
// of the old jsonpb implementation. These semantics are not supported by
|
||||
// the protobuf JSON specification.
|
||||
isEmpty := true
|
||||
mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
|
||||
isEmpty = false // at least one iteration implies non-empty
|
||||
return false
|
||||
})
|
||||
if !isEmpty {
|
||||
// Perform unmarshaling into a newly allocated, empty message.
|
||||
mr = mr.New()
|
||||
|
||||
// Use a defer to copy all unmarshaled fields into the original message.
|
||||
dst := proto.MessageReflect(m)
|
||||
defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
dst.Set(fd, v)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// Unmarshal using the v2 JSON unmarshaler.
|
||||
opts := protojson.UnmarshalOptions{
|
||||
DiscardUnknown: u.AllowUnknownFields,
|
||||
}
|
||||
if u.AnyResolver != nil {
|
||||
opts.Resolver = anyResolver{u.AnyResolver}
|
||||
}
|
||||
return opts.Unmarshal(raw, mr.Interface())
|
||||
} else {
|
||||
if err := u.unmarshalMessage(mr, raw); err != nil {
|
||||
return err
|
||||
}
|
||||
return protoV2.CheckInitialized(mr.Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
|
||||
md := m.Descriptor()
|
||||
fds := md.Fields()
|
||||
|
||||
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
|
||||
return jsu.UnmarshalJSONPB(u, in)
|
||||
}
|
||||
|
||||
if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch wellKnownType(md.FullName()) {
|
||||
case "Any":
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rawTypeURL, ok := jsonObject["@type"]
|
||||
if !ok {
|
||||
return errors.New("Any JSON doesn't have '@type'")
|
||||
}
|
||||
typeURL, err := unquoteString(string(rawTypeURL))
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
|
||||
}
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
|
||||
|
||||
var m2 protoreflect.Message
|
||||
if u.AnyResolver != nil {
|
||||
mi, err := u.AnyResolver.Resolve(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m2 = proto.MessageReflect(mi)
|
||||
} else {
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
|
||||
if err != nil {
|
||||
if err == protoregistry.NotFound {
|
||||
return fmt.Errorf("could not resolve Any message type: %v", typeURL)
|
||||
}
|
||||
return err
|
||||
}
|
||||
m2 = mt.New()
|
||||
}
|
||||
|
||||
if wellKnownType(m2.Descriptor().FullName()) != "" {
|
||||
rawValue, ok := jsonObject["value"]
|
||||
if !ok {
|
||||
return errors.New("Any JSON doesn't have 'value'")
|
||||
}
|
||||
if err := u.unmarshalMessage(m2, rawValue); err != nil {
|
||||
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
|
||||
}
|
||||
} else {
|
||||
delete(jsonObject, "@type")
|
||||
rawJSON, err := json.Marshal(jsonObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
|
||||
}
|
||||
if err = u.unmarshalMessage(m2, rawJSON); err != nil {
|
||||
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
|
||||
}
|
||||
}
|
||||
|
||||
rawWire, err := protoV2.Marshal(m2.Interface())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
|
||||
}
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
|
||||
return nil
|
||||
case "BoolValue", "BytesValue", "StringValue",
|
||||
"Int32Value", "UInt32Value", "FloatValue",
|
||||
"Int64Value", "UInt64Value", "DoubleValue":
|
||||
fd := fds.ByNumber(1)
|
||||
v, err := u.unmarshalValue(m.NewField(fd), in, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
return nil
|
||||
case "Duration":
|
||||
v, err := unquoteString(string(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Duration: %v", err)
|
||||
}
|
||||
|
||||
sec := d.Nanoseconds() / 1e9
|
||||
nsec := d.Nanoseconds() % 1e9
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
|
||||
return nil
|
||||
case "Timestamp":
|
||||
v, err := unquoteString(string(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Timestamp: %v", err)
|
||||
}
|
||||
|
||||
sec := t.Unix()
|
||||
nsec := t.Nanosecond()
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
|
||||
return nil
|
||||
case "Value":
|
||||
switch {
|
||||
case string(in) == "null":
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
|
||||
case string(in) == "true":
|
||||
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
|
||||
case string(in) == "false":
|
||||
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
|
||||
case hasPrefixAndSuffix('"', in, '"'):
|
||||
s, err := unquoteString(string(in))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unrecognized type for Value %q", in)
|
||||
}
|
||||
m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
|
||||
case hasPrefixAndSuffix('[', in, ']'):
|
||||
v := m.Mutable(fds.ByNumber(6))
|
||||
return u.unmarshalMessage(v.Message(), in)
|
||||
case hasPrefixAndSuffix('{', in, '}'):
|
||||
v := m.Mutable(fds.ByNumber(5))
|
||||
return u.unmarshalMessage(v.Message(), in)
|
||||
default:
|
||||
f, err := strconv.ParseFloat(string(in), 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unrecognized type for Value %q", in)
|
||||
}
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
|
||||
}
|
||||
return nil
|
||||
case "ListValue":
|
||||
var jsonArray []json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonArray); err != nil {
|
||||
return fmt.Errorf("bad ListValue: %v", err)
|
||||
}
|
||||
|
||||
lv := m.Mutable(fds.ByNumber(1)).List()
|
||||
for _, raw := range jsonArray {
|
||||
ve := lv.NewElement()
|
||||
if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
|
||||
return err
|
||||
}
|
||||
lv.Append(ve)
|
||||
}
|
||||
return nil
|
||||
case "Struct":
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return fmt.Errorf("bad StructValue: %v", err)
|
||||
}
|
||||
|
||||
mv := m.Mutable(fds.ByNumber(1)).Map()
|
||||
for key, raw := range jsonObject {
|
||||
kv := protoreflect.ValueOf(key).MapKey()
|
||||
vv := mv.NewValue()
|
||||
if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
|
||||
return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
|
||||
}
|
||||
mv.Set(kv, vv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle known fields.
|
||||
for i := 0; i < fds.Len(); i++ {
|
||||
fd := fds.Get(i)
|
||||
if fd.IsWeak() && fd.Message().IsPlaceholder() {
|
||||
continue // weak reference is not linked in
|
||||
}
|
||||
|
||||
// Search for any raw JSON value associated with this field.
|
||||
var raw json.RawMessage
|
||||
name := string(fd.Name())
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
name = string(fd.Message().Name())
|
||||
}
|
||||
if v, ok := jsonObject[name]; ok {
|
||||
delete(jsonObject, name)
|
||||
raw = v
|
||||
}
|
||||
name = string(fd.JSONName())
|
||||
if v, ok := jsonObject[name]; ok {
|
||||
delete(jsonObject, name)
|
||||
raw = v
|
||||
}
|
||||
|
||||
field := m.NewField(fd)
|
||||
// Unmarshal the field value.
|
||||
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||
continue
|
||||
}
|
||||
v, err := u.unmarshalValue(field, raw, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
|
||||
// Handle extension fields.
|
||||
for name, raw := range jsonObject {
|
||||
if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Resolve the extension field by name.
|
||||
xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
|
||||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||
if xt == nil && isMessageSet(md) {
|
||||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||
}
|
||||
if xt == nil {
|
||||
continue
|
||||
}
|
||||
delete(jsonObject, name)
|
||||
fd := xt.TypeDescriptor()
|
||||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
|
||||
}
|
||||
|
||||
field := m.NewField(fd)
|
||||
// Unmarshal the field value.
|
||||
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||
continue
|
||||
}
|
||||
v, err := u.unmarshalValue(field, raw, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
|
||||
if !u.AllowUnknownFields && len(jsonObject) > 0 {
|
||||
for name := range jsonObject {
|
||||
return fmt.Errorf("unknown field %q in %v", name, md.FullName())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
|
||||
if md := fd.Message(); md != nil {
|
||||
return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
|
||||
if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
|
||||
_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
|
||||
return ok
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
switch {
|
||||
case fd.IsList():
|
||||
var jsonArray []json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonArray); err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv := v.List()
|
||||
for _, raw := range jsonArray {
|
||||
ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(ve)
|
||||
}
|
||||
return v, nil
|
||||
case fd.IsMap():
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return v, err
|
||||
}
|
||||
kfd := fd.MapKey()
|
||||
vfd := fd.MapValue()
|
||||
mv := v.Map()
|
||||
for key, raw := range jsonObject {
|
||||
var kv protoreflect.MapKey
|
||||
if kfd.Kind() == protoreflect.StringKind {
|
||||
kv = protoreflect.ValueOf(key).MapKey()
|
||||
} else {
|
||||
v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
kv = v.MapKey()
|
||||
}
|
||||
|
||||
vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
mv.Set(kv, vv)
|
||||
}
|
||||
return v, nil
|
||||
default:
|
||||
return u.unmarshalSingularValue(v, in, fd)
|
||||
}
|
||||
}
|
||||
|
||||
var nonFinite = map[string]float64{
|
||||
`"NaN"`: math.NaN(),
|
||||
`"Infinity"`: math.Inf(+1),
|
||||
`"-Infinity"`: math.Inf(-1),
|
||||
}
|
||||
|
||||
func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
switch fd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
return unmarshalValue(in, new(bool))
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
return unmarshalValue(trimQuote(in), new(int32))
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
return unmarshalValue(trimQuote(in), new(int64))
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
return unmarshalValue(trimQuote(in), new(uint32))
|
||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
return unmarshalValue(trimQuote(in), new(uint64))
|
||||
case protoreflect.FloatKind:
|
||||
if f, ok := nonFinite[string(in)]; ok {
|
||||
return protoreflect.ValueOfFloat32(float32(f)), nil
|
||||
}
|
||||
return unmarshalValue(trimQuote(in), new(float32))
|
||||
case protoreflect.DoubleKind:
|
||||
if f, ok := nonFinite[string(in)]; ok {
|
||||
return protoreflect.ValueOfFloat64(float64(f)), nil
|
||||
}
|
||||
return unmarshalValue(trimQuote(in), new(float64))
|
||||
case protoreflect.StringKind:
|
||||
return unmarshalValue(in, new(string))
|
||||
case protoreflect.BytesKind:
|
||||
return unmarshalValue(in, new([]byte))
|
||||
case protoreflect.EnumKind:
|
||||
if hasPrefixAndSuffix('"', in, '"') {
|
||||
vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
|
||||
if vd == nil {
|
||||
return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
|
||||
}
|
||||
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||
}
|
||||
return unmarshalValue(in, new(protoreflect.EnumNumber))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
err := u.unmarshalMessage(v.Message(), in)
|
||||
return v, err
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
|
||||
err := json.Unmarshal(in, v)
|
||||
return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
|
||||
}
|
||||
|
||||
func unquoteString(in string) (out string, err error) {
|
||||
err = json.Unmarshal([]byte(in), &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
|
||||
if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// trimQuote is like unquoteString but simply strips surrounding quotes.
|
||||
// This is incorrect, but is behavior done by the legacy implementation.
|
||||
func trimQuote(in []byte) []byte {
|
||||
if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
|
||||
in = in[1 : len(in)-1]
|
||||
}
|
||||
return in
|
||||
}
|
559
vendor/github.com/golang/protobuf/jsonpb/encode.go
generated
vendored
Normal file
559
vendor/github.com/golang/protobuf/jsonpb/encode.go
generated
vendored
Normal file
@ -0,0 +1,559 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jsonpb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapJSONMarshalV2 = false
|
||||
|
||||
// Marshaler is a configurable object for marshaling protocol buffer messages
|
||||
// to the specified JSON representation.
|
||||
type Marshaler struct {
|
||||
// OrigName specifies whether to use the original protobuf name for fields.
|
||||
OrigName bool
|
||||
|
||||
// EnumsAsInts specifies whether to render enum values as integers,
|
||||
// as opposed to string values.
|
||||
EnumsAsInts bool
|
||||
|
||||
// EmitDefaults specifies whether to render fields with zero values.
|
||||
EmitDefaults bool
|
||||
|
||||
// Indent controls whether the output is compact or not.
|
||||
// If empty, the output is compact JSON. Otherwise, every JSON object
|
||||
// entry and JSON array value will be on its own line.
|
||||
// Each line will be preceded by repeated copies of Indent, where the
|
||||
// number of copies is the current indentation depth.
|
||||
Indent string
|
||||
|
||||
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
||||
// If unset, the global registry is used by default.
|
||||
AnyResolver AnyResolver
|
||||
}
|
||||
|
||||
// JSONPBMarshaler is implemented by protobuf messages that customize the
|
||||
// way they are marshaled to JSON. Messages that implement this should also
|
||||
// implement JSONPBUnmarshaler so that the custom format can be parsed.
|
||||
//
|
||||
// The JSON marshaling must follow the proto to JSON specification:
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||
//
|
||||
// Deprecated: Custom types should implement protobuf reflection instead.
|
||||
type JSONPBMarshaler interface {
|
||||
MarshalJSONPB(*Marshaler) ([]byte, error)
|
||||
}
|
||||
|
||||
// Marshal serializes a protobuf message as JSON into w.
|
||||
func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
|
||||
b, err := jm.marshal(m)
|
||||
if len(b) > 0 {
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalToString serializes a protobuf message as JSON in string form.
|
||||
func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
|
||||
b, err := jm.marshal(m)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
|
||||
v := reflect.ValueOf(m)
|
||||
if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
|
||||
return nil, errors.New("Marshal called with nil")
|
||||
}
|
||||
|
||||
// Check for custom marshalers first since they may not properly
|
||||
// implement protobuf reflection that the logic below relies on.
|
||||
if jsm, ok := m.(JSONPBMarshaler); ok {
|
||||
return jsm.MarshalJSONPB(jm)
|
||||
}
|
||||
|
||||
if wrapJSONMarshalV2 {
|
||||
opts := protojson.MarshalOptions{
|
||||
UseProtoNames: jm.OrigName,
|
||||
UseEnumNumbers: jm.EnumsAsInts,
|
||||
EmitUnpopulated: jm.EmitDefaults,
|
||||
Indent: jm.Indent,
|
||||
}
|
||||
if jm.AnyResolver != nil {
|
||||
opts.Resolver = anyResolver{jm.AnyResolver}
|
||||
}
|
||||
return opts.Marshal(proto.MessageReflect(m).Interface())
|
||||
} else {
|
||||
// Check for unpopulated required fields first.
|
||||
m2 := proto.MessageReflect(m)
|
||||
if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := jsonWriter{Marshaler: jm}
|
||||
err := w.marshalMessage(m2, "", "")
|
||||
return w.buf, err
|
||||
}
|
||||
}
|
||||
|
||||
type jsonWriter struct {
|
||||
*Marshaler
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *jsonWriter) write(s string) {
|
||||
w.buf = append(w.buf, s...)
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
|
||||
if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
|
||||
b, err := jsm.MarshalJSONPB(w.Marshaler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if typeURL != "" {
|
||||
// we are marshaling this object to an Any type
|
||||
var js map[string]*json.RawMessage
|
||||
if err = json.Unmarshal(b, &js); err != nil {
|
||||
return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
|
||||
}
|
||||
turl, err := json.Marshal(typeURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
|
||||
}
|
||||
js["@type"] = (*json.RawMessage)(&turl)
|
||||
if b, err = json.Marshal(js); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.write(string(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
md := m.Descriptor()
|
||||
fds := md.Fields()
|
||||
|
||||
// Handle well-known types.
|
||||
const secondInNanos = int64(time.Second / time.Nanosecond)
|
||||
switch wellKnownType(md.FullName()) {
|
||||
case "Any":
|
||||
return w.marshalAny(m, indent)
|
||||
case "BoolValue", "BytesValue", "StringValue",
|
||||
"Int32Value", "UInt32Value", "FloatValue",
|
||||
"Int64Value", "UInt64Value", "DoubleValue":
|
||||
fd := fds.ByNumber(1)
|
||||
return w.marshalValue(fd, m.Get(fd), indent)
|
||||
case "Duration":
|
||||
const maxSecondsInDuration = 315576000000
|
||||
// "Generated output always contains 0, 3, 6, or 9 fractional digits,
|
||||
// depending on required precision."
|
||||
s := m.Get(fds.ByNumber(1)).Int()
|
||||
ns := m.Get(fds.ByNumber(2)).Int()
|
||||
if s < -maxSecondsInDuration || s > maxSecondsInDuration {
|
||||
return fmt.Errorf("seconds out of range %v", s)
|
||||
}
|
||||
if ns <= -secondInNanos || ns >= secondInNanos {
|
||||
return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
|
||||
}
|
||||
if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
|
||||
return errors.New("signs of seconds and nanos do not match")
|
||||
}
|
||||
var sign string
|
||||
if s < 0 || ns < 0 {
|
||||
sign, s, ns = "-", -1*s, -1*ns
|
||||
}
|
||||
x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, ".000")
|
||||
w.write(fmt.Sprintf(`"%vs"`, x))
|
||||
return nil
|
||||
case "Timestamp":
|
||||
// "RFC 3339, where generated output will always be Z-normalized
|
||||
// and uses 0, 3, 6 or 9 fractional digits."
|
||||
s := m.Get(fds.ByNumber(1)).Int()
|
||||
ns := m.Get(fds.ByNumber(2)).Int()
|
||||
if ns < 0 || ns >= secondInNanos {
|
||||
return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
|
||||
}
|
||||
t := time.Unix(s, ns).UTC()
|
||||
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
|
||||
x := t.Format("2006-01-02T15:04:05.000000000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, ".000")
|
||||
w.write(fmt.Sprintf(`"%vZ"`, x))
|
||||
return nil
|
||||
case "Value":
|
||||
// JSON value; which is a null, number, string, bool, object, or array.
|
||||
od := md.Oneofs().Get(0)
|
||||
fd := m.WhichOneof(od)
|
||||
if fd == nil {
|
||||
return errors.New("nil Value")
|
||||
}
|
||||
return w.marshalValue(fd, m.Get(fd), indent)
|
||||
case "Struct", "ListValue":
|
||||
// JSON object or array.
|
||||
fd := fds.ByNumber(1)
|
||||
return w.marshalValue(fd, m.Get(fd), indent)
|
||||
}
|
||||
|
||||
w.write("{")
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
}
|
||||
|
||||
firstField := true
|
||||
if typeURL != "" {
|
||||
if err := w.marshalTypeURL(indent, typeURL); err != nil {
|
||||
return err
|
||||
}
|
||||
firstField = false
|
||||
}
|
||||
|
||||
for i := 0; i < fds.Len(); {
|
||||
fd := fds.Get(i)
|
||||
if od := fd.ContainingOneof(); od != nil {
|
||||
fd = m.WhichOneof(od)
|
||||
i += od.Fields().Len()
|
||||
if fd == nil {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
|
||||
v := m.Get(fd)
|
||||
|
||||
if !m.Has(fd) {
|
||||
if !w.EmitDefaults || fd.ContainingOneof() != nil {
|
||||
continue
|
||||
}
|
||||
if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
|
||||
v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
|
||||
}
|
||||
}
|
||||
|
||||
if !firstField {
|
||||
w.writeComma()
|
||||
}
|
||||
if err := w.marshalField(fd, v, indent); err != nil {
|
||||
return err
|
||||
}
|
||||
firstField = false
|
||||
}
|
||||
|
||||
// Handle proto2 extensions.
|
||||
if md.ExtensionRanges().Len() > 0 {
|
||||
// Collect a sorted list of all extension descriptor and values.
|
||||
type ext struct {
|
||||
desc protoreflect.FieldDescriptor
|
||||
val protoreflect.Value
|
||||
}
|
||||
var exts []ext
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
exts = append(exts, ext{fd, v})
|
||||
}
|
||||
return true
|
||||
})
|
||||
sort.Slice(exts, func(i, j int) bool {
|
||||
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||
})
|
||||
|
||||
for _, ext := range exts {
|
||||
if !firstField {
|
||||
w.writeComma()
|
||||
}
|
||||
if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
|
||||
return err
|
||||
}
|
||||
firstField = false
|
||||
}
|
||||
}
|
||||
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
}
|
||||
w.write("}")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *jsonWriter) writeComma() {
|
||||
if w.Indent != "" {
|
||||
w.write(",\n")
|
||||
} else {
|
||||
w.write(",")
|
||||
}
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
|
||||
// "If the Any contains a value that has a special JSON mapping,
|
||||
// it will be converted as follows: {"@type": xxx, "value": yyy}.
|
||||
// Otherwise, the value will be converted into a JSON object,
|
||||
// and the "@type" field will be inserted to indicate the actual data type."
|
||||
md := m.Descriptor()
|
||||
typeURL := m.Get(md.Fields().ByNumber(1)).String()
|
||||
rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
|
||||
|
||||
var m2 protoreflect.Message
|
||||
if w.AnyResolver != nil {
|
||||
mi, err := w.AnyResolver.Resolve(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m2 = proto.MessageReflect(mi)
|
||||
} else {
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m2 = mt.New()
|
||||
}
|
||||
|
||||
if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if wellKnownType(m2.Descriptor().FullName()) == "" {
|
||||
return w.marshalMessage(m2, indent, typeURL)
|
||||
}
|
||||
|
||||
w.write("{")
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
}
|
||||
if err := w.marshalTypeURL(indent, typeURL); err != nil {
|
||||
return err
|
||||
}
|
||||
w.writeComma()
|
||||
if w.Indent != "" {
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
w.write(`"value": `)
|
||||
} else {
|
||||
w.write(`"value":`)
|
||||
}
|
||||
if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
}
|
||||
w.write("}")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
|
||||
if w.Indent != "" {
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write(`"@type":`)
|
||||
if w.Indent != "" {
|
||||
w.write(" ")
|
||||
}
|
||||
b, err := json.Marshal(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.write(string(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
// marshalField writes field description and value to the Writer.
|
||||
func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||
if w.Indent != "" {
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write(`"`)
|
||||
switch {
|
||||
case fd.IsExtension():
|
||||
// For message set, use the fname of the message as the extension name.
|
||||
name := string(fd.FullName())
|
||||
if isMessageSet(fd.ContainingMessage()) {
|
||||
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||
}
|
||||
|
||||
w.write("[" + name + "]")
|
||||
case w.OrigName:
|
||||
name := string(fd.Name())
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
name = string(fd.Message().Name())
|
||||
}
|
||||
w.write(name)
|
||||
default:
|
||||
w.write(string(fd.JSONName()))
|
||||
}
|
||||
w.write(`":`)
|
||||
if w.Indent != "" {
|
||||
w.write(" ")
|
||||
}
|
||||
return w.marshalValue(fd, v, indent)
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||
switch {
|
||||
case fd.IsList():
|
||||
w.write("[")
|
||||
comma := ""
|
||||
lv := v.List()
|
||||
for i := 0; i < lv.Len(); i++ {
|
||||
w.write(comma)
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
|
||||
return err
|
||||
}
|
||||
comma = ","
|
||||
}
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write("]")
|
||||
return nil
|
||||
case fd.IsMap():
|
||||
kfd := fd.MapKey()
|
||||
vfd := fd.MapValue()
|
||||
mv := v.Map()
|
||||
|
||||
// Collect a sorted list of all map keys and values.
|
||||
type entry struct{ key, val protoreflect.Value }
|
||||
var entries []entry
|
||||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
entries = append(entries, entry{k.Value(), v})
|
||||
return true
|
||||
})
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
switch kfd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
return entries[i].key.Int() < entries[j].key.Int()
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||
case protoreflect.StringKind:
|
||||
return entries[i].key.String() < entries[j].key.String()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
})
|
||||
|
||||
w.write(`{`)
|
||||
comma := ""
|
||||
for _, entry := range entries {
|
||||
w.write(comma)
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
|
||||
s := fmt.Sprint(entry.key.Interface())
|
||||
b, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.write(string(b))
|
||||
|
||||
w.write(`:`)
|
||||
if w.Indent != "" {
|
||||
w.write(` `)
|
||||
}
|
||||
|
||||
if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
|
||||
return err
|
||||
}
|
||||
comma = ","
|
||||
}
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write(`}`)
|
||||
return nil
|
||||
default:
|
||||
return w.marshalSingularValue(fd, v, indent)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||
switch {
|
||||
case !v.IsValid():
|
||||
w.write("null")
|
||||
return nil
|
||||
case fd.Message() != nil:
|
||||
return w.marshalMessage(v.Message(), indent+w.Indent, "")
|
||||
case fd.Enum() != nil:
|
||||
if fd.Enum().FullName() == "google.protobuf.NullValue" {
|
||||
w.write("null")
|
||||
return nil
|
||||
}
|
||||
|
||||
vd := fd.Enum().Values().ByNumber(v.Enum())
|
||||
if vd == nil || w.EnumsAsInts {
|
||||
w.write(strconv.Itoa(int(v.Enum())))
|
||||
} else {
|
||||
w.write(`"` + string(vd.Name()) + `"`)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
switch v.Interface().(type) {
|
||||
case float32, float64:
|
||||
switch {
|
||||
case math.IsInf(v.Float(), +1):
|
||||
w.write(`"Infinity"`)
|
||||
return nil
|
||||
case math.IsInf(v.Float(), -1):
|
||||
w.write(`"-Infinity"`)
|
||||
return nil
|
||||
case math.IsNaN(v.Float()):
|
||||
w.write(`"NaN"`)
|
||||
return nil
|
||||
}
|
||||
case int64, uint64:
|
||||
w.write(fmt.Sprintf(`"%d"`, v.Interface()))
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := json.Marshal(v.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.write(string(b))
|
||||
return nil
|
||||
}
|
||||
}
|
69
vendor/github.com/golang/protobuf/jsonpb/json.go
generated
vendored
Normal file
69
vendor/github.com/golang/protobuf/jsonpb/json.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package jsonpb provides functionality to marshal and unmarshal between a
|
||||
// protocol buffer message and JSON. It follows the specification at
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json.
|
||||
//
|
||||
// Do not rely on the default behavior of the standard encoding/json package
|
||||
// when called on generated message types as it does not operate correctly.
|
||||
//
|
||||
// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
|
||||
// package instead.
|
||||
package jsonpb
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// AnyResolver takes a type URL, present in an Any message,
|
||||
// and resolves it into an instance of the associated message.
|
||||
type AnyResolver interface {
|
||||
Resolve(typeURL string) (proto.Message, error)
|
||||
}
|
||||
|
||||
type anyResolver struct{ AnyResolver }
|
||||
|
||||
func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
|
||||
return r.FindMessageByURL(string(message))
|
||||
}
|
||||
|
||||
func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
|
||||
m, err := r.Resolve(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return protoimpl.X.MessageTypeOf(m), nil
|
||||
}
|
||||
|
||||
func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
||||
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
||||
}
|
||||
|
||||
func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
||||
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
||||
}
|
||||
|
||||
func wellKnownType(s protoreflect.FullName) string {
|
||||
if s.Parent() == "google.protobuf" {
|
||||
switch s.Name() {
|
||||
case "Empty", "Any",
|
||||
"BoolValue", "BytesValue", "StringValue",
|
||||
"Int32Value", "UInt32Value", "FloatValue",
|
||||
"Int64Value", "UInt64Value", "DoubleValue",
|
||||
"Duration", "Timestamp",
|
||||
"NullValue", "Struct", "Value", "ListValue":
|
||||
return string(s.Name())
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||
return ok && ms.IsMessageSet()
|
||||
}
|
200
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
200
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
@ -0,0 +1,200 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
|
||||
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/descriptor.proto.
|
||||
|
||||
type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type
|
||||
|
||||
const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE
|
||||
const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT
|
||||
const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64
|
||||
const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64
|
||||
const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32
|
||||
const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64
|
||||
const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32
|
||||
const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL
|
||||
const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING
|
||||
const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP
|
||||
const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE
|
||||
const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES
|
||||
const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32
|
||||
const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM
|
||||
const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32
|
||||
const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64
|
||||
const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32
|
||||
const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64
|
||||
|
||||
var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name
|
||||
var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value
|
||||
|
||||
type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label
|
||||
|
||||
const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL
|
||||
const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED
|
||||
const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED
|
||||
|
||||
var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name
|
||||
var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value
|
||||
|
||||
type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode
|
||||
|
||||
const FileOptions_SPEED = descriptorpb.FileOptions_SPEED
|
||||
const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE
|
||||
const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME
|
||||
|
||||
var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name
|
||||
var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value
|
||||
|
||||
type FieldOptions_CType = descriptorpb.FieldOptions_CType
|
||||
|
||||
const FieldOptions_STRING = descriptorpb.FieldOptions_STRING
|
||||
const FieldOptions_CORD = descriptorpb.FieldOptions_CORD
|
||||
const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE
|
||||
|
||||
var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name
|
||||
var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value
|
||||
|
||||
type FieldOptions_JSType = descriptorpb.FieldOptions_JSType
|
||||
|
||||
const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL
|
||||
const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING
|
||||
const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER
|
||||
|
||||
var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name
|
||||
var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value
|
||||
|
||||
type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel
|
||||
|
||||
const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN
|
||||
const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS
|
||||
const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT
|
||||
|
||||
var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name
|
||||
var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value
|
||||
|
||||
type FileDescriptorSet = descriptorpb.FileDescriptorSet
|
||||
type FileDescriptorProto = descriptorpb.FileDescriptorProto
|
||||
type DescriptorProto = descriptorpb.DescriptorProto
|
||||
type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions
|
||||
type FieldDescriptorProto = descriptorpb.FieldDescriptorProto
|
||||
type OneofDescriptorProto = descriptorpb.OneofDescriptorProto
|
||||
type EnumDescriptorProto = descriptorpb.EnumDescriptorProto
|
||||
type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto
|
||||
type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto
|
||||
type MethodDescriptorProto = descriptorpb.MethodDescriptorProto
|
||||
|
||||
const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming
|
||||
const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming
|
||||
|
||||
type FileOptions = descriptorpb.FileOptions
|
||||
|
||||
const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles
|
||||
const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8
|
||||
const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor
|
||||
const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices
|
||||
const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices
|
||||
const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices
|
||||
const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices
|
||||
const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated
|
||||
const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas
|
||||
|
||||
type MessageOptions = descriptorpb.MessageOptions
|
||||
|
||||
const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat
|
||||
const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor
|
||||
const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated
|
||||
|
||||
type FieldOptions = descriptorpb.FieldOptions
|
||||
|
||||
const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype
|
||||
const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype
|
||||
const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy
|
||||
const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated
|
||||
const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak
|
||||
|
||||
type OneofOptions = descriptorpb.OneofOptions
|
||||
type EnumOptions = descriptorpb.EnumOptions
|
||||
|
||||
const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated
|
||||
|
||||
type EnumValueOptions = descriptorpb.EnumValueOptions
|
||||
|
||||
const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated
|
||||
|
||||
type ServiceOptions = descriptorpb.ServiceOptions
|
||||
|
||||
const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated
|
||||
|
||||
type MethodOptions = descriptorpb.MethodOptions
|
||||
|
||||
const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated
|
||||
const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel
|
||||
|
||||
type UninterpretedOption = descriptorpb.UninterpretedOption
|
||||
type SourceCodeInfo = descriptorpb.SourceCodeInfo
|
||||
type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo
|
||||
type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange
|
||||
type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange
|
||||
type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange
|
||||
type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart
|
||||
type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location
|
||||
type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation
|
||||
|
||||
var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{
|
||||
0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
|
||||
0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
|
||||
0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65,
|
||||
0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b,
|
||||
0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x32,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() }
|
||||
func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() {
|
||||
if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File
|
||||
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil
|
||||
}
|
71
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
generated
vendored
Normal file
71
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
|
||||
|
||||
package wrappers
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/wrappers.proto.
|
||||
|
||||
type DoubleValue = wrapperspb.DoubleValue
|
||||
type FloatValue = wrapperspb.FloatValue
|
||||
type Int64Value = wrapperspb.Int64Value
|
||||
type UInt64Value = wrapperspb.UInt64Value
|
||||
type Int32Value = wrapperspb.Int32Value
|
||||
type UInt32Value = wrapperspb.UInt32Value
|
||||
type BoolValue = wrapperspb.BoolValue
|
||||
type StringValue = wrapperspb.StringValue
|
||||
type BytesValue = wrapperspb.BytesValue
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{
|
||||
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61,
|
||||
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
|
||||
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
||||
0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
|
||||
0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil
|
||||
}
|
2
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
@ -315,7 +315,7 @@ func (tf Transform) Option() Option { return tf.trans }
|
||||
// pops the address from the stack. Thus, when traversing into a pointer from
|
||||
// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
|
||||
// by checking whether the pointer has already been visited. The cycle detection
|
||||
// uses a seperate stack for the x and y values.
|
||||
// uses a separate stack for the x and y values.
|
||||
//
|
||||
// If a cycle is detected we need to determine whether the two pointers
|
||||
// should be considered equal. The definition of equality chosen by Equal
|
||||
|
202
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
202
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
@ -7,6 +7,7 @@ package cmp
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -96,15 +97,16 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
}
|
||||
|
||||
// Auto-detect the type of the data.
|
||||
var isLinedText, isText, isBinary bool
|
||||
var sx, sy string
|
||||
var ssx, ssy []string
|
||||
var isString, isMostlyText, isPureLinedText, isBinary bool
|
||||
switch {
|
||||
case t.Kind() == reflect.String:
|
||||
sx, sy = vx.String(), vy.String()
|
||||
isText = true // Initial estimate, verify later
|
||||
isString = true
|
||||
case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
|
||||
sx, sy = string(vx.Bytes()), string(vy.Bytes())
|
||||
isBinary = true // Initial estimate, verify later
|
||||
isString = true
|
||||
case t.Kind() == reflect.Array:
|
||||
// Arrays need to be addressable for slice operations to work.
|
||||
vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
|
||||
@ -112,13 +114,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
vy2.Set(vy)
|
||||
vx, vy = vx2, vy2
|
||||
}
|
||||
if isText || isBinary {
|
||||
var numLines, lastLineIdx, maxLineLen int
|
||||
isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy)
|
||||
if isString {
|
||||
var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
|
||||
for i, r := range sx + sy {
|
||||
if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError {
|
||||
isBinary = true
|
||||
break
|
||||
numTotalRunes++
|
||||
if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
|
||||
numValidRunes++
|
||||
}
|
||||
if r == '\n' {
|
||||
if maxLineLen < i-lastLineIdx {
|
||||
@ -128,8 +129,26 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
numLines++
|
||||
}
|
||||
}
|
||||
isText = !isBinary
|
||||
isLinedText = isText && numLines >= 4 && maxLineLen <= 1024
|
||||
isPureText := numValidRunes == numTotalRunes
|
||||
isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
|
||||
isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
|
||||
isBinary = !isMostlyText
|
||||
|
||||
// Avoid diffing by lines if it produces a significantly more complex
|
||||
// edit script than diffing by bytes.
|
||||
if isPureLinedText {
|
||||
ssx = strings.Split(sx, "\n")
|
||||
ssy = strings.Split(sy, "\n")
|
||||
esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(ssx[ix] == ssy[iy])
|
||||
})
|
||||
esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(sx[ix] == sy[iy])
|
||||
})
|
||||
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
|
||||
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
|
||||
isPureLinedText = efficiencyLines < 4*efficiencyBytes
|
||||
}
|
||||
}
|
||||
|
||||
// Format the string into printable records.
|
||||
@ -138,9 +157,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
switch {
|
||||
// If the text appears to be multi-lined text,
|
||||
// then perform differencing across individual lines.
|
||||
case isLinedText:
|
||||
ssx := strings.Split(sx, "\n")
|
||||
ssy := strings.Split(sy, "\n")
|
||||
case isPureLinedText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
@ -229,7 +246,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
// If the text appears to be single-lined text,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is printed as quoted strings.
|
||||
case isText:
|
||||
case isMostlyText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
@ -237,7 +254,6 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
delim = ""
|
||||
|
||||
// If the text appears to be binary data,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
@ -299,7 +315,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
|
||||
// Wrap the output with appropriate type information.
|
||||
var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if !isText {
|
||||
if !isMostlyText {
|
||||
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
|
||||
// Emit the type for extra clarity (e.g. "string{...}").
|
||||
if t.Kind() == reflect.String {
|
||||
@ -338,8 +354,11 @@ func (opts formatOptions) formatDiffSlice(
|
||||
vx, vy reflect.Value, chunkSize int, name string,
|
||||
makeRec func(reflect.Value, diffMode) textRecord,
|
||||
) (list textList) {
|
||||
es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result {
|
||||
return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface())
|
||||
eq := func(ix, iy int) bool {
|
||||
return vx.Index(ix).Interface() == vy.Index(iy).Interface()
|
||||
}
|
||||
es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(eq(ix, iy))
|
||||
})
|
||||
|
||||
appendChunks := func(v reflect.Value, d diffMode) int {
|
||||
@ -364,6 +383,7 @@ func (opts formatOptions) formatDiffSlice(
|
||||
|
||||
groups := coalesceAdjacentEdits(name, es)
|
||||
groups = coalesceInterveningIdentical(groups, chunkSize/4)
|
||||
groups = cleanupSurroundingIdentical(groups, eq)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
@ -416,25 +436,36 @@ func (opts formatOptions) formatDiffSlice(
|
||||
|
||||
// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
|
||||
// equal or unequal counts.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: "..XXY...Y"
|
||||
// Output: [
|
||||
// {NumIdentical: 2},
|
||||
// {NumRemoved: 2, NumInserted 1},
|
||||
// {NumIdentical: 3},
|
||||
// {NumInserted: 1},
|
||||
// ]
|
||||
//
|
||||
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
|
||||
var prevCase int // Arbitrary index into which case last occurred
|
||||
lastStats := func(i int) *diffStats {
|
||||
if prevCase != i {
|
||||
var prevMode byte
|
||||
lastStats := func(mode byte) *diffStats {
|
||||
if prevMode != mode {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevCase = i
|
||||
prevMode = mode
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case diff.Identity:
|
||||
lastStats(1).NumIdentical++
|
||||
lastStats('=').NumIdentical++
|
||||
case diff.UniqueX:
|
||||
lastStats(2).NumRemoved++
|
||||
lastStats('!').NumRemoved++
|
||||
case diff.UniqueY:
|
||||
lastStats(2).NumInserted++
|
||||
lastStats('!').NumInserted++
|
||||
case diff.Modified:
|
||||
lastStats(2).NumModified++
|
||||
lastStats('!').NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
@ -444,6 +475,35 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats)
|
||||
// equal groups into adjacent unequal groups that currently result in a
|
||||
// dual inserted/removed printout. This acts as a high-pass filter to smooth
|
||||
// out high-frequency changes within the windowSize.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// WindowSize: 16,
|
||||
// Input: [
|
||||
// {NumIdentical: 61}, // group 0
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 1
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 9}, // └── coalesce
|
||||
// {NumIdentical: 64}, // group 2
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 3
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 7}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 2}, // └── coalesce
|
||||
// {NumIdentical: 63}, // group 4
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 64},
|
||||
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 63},
|
||||
// ]
|
||||
//
|
||||
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
|
||||
groups, groupsOrig := groups[:0], groups
|
||||
for i, ds := range groupsOrig {
|
||||
@ -463,3 +523,91 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// cleanupSurroundingIdentical scans through all unequal groups, and
|
||||
// moves any leading sequence of equal elements to the preceding equal group and
|
||||
// moves and trailing sequence of equal elements to the succeeding equal group.
|
||||
//
|
||||
// This is necessary since coalesceInterveningIdentical may coalesce edit groups
|
||||
// together such that leading/trailing spans of equal elements becomes possible.
|
||||
// Note that this can occur even with an optimal diffing algorithm.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
|
||||
// {NumIdentical: 67},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
|
||||
// {NumIdentical: 54},
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 64}, // incremented by 3
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 67},
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 64}, // incremented by 10
|
||||
// ]
|
||||
//
|
||||
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
|
||||
var ix, iy int // indexes into sequence x and y
|
||||
for i, ds := range groups {
|
||||
// Handle equal group.
|
||||
if ds.NumDiff() == 0 {
|
||||
ix += ds.NumIdentical
|
||||
iy += ds.NumIdentical
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle unequal group.
|
||||
nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
|
||||
ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
|
||||
var numLeadingIdentical, numTrailingIdentical int
|
||||
for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ {
|
||||
numLeadingIdentical++
|
||||
}
|
||||
for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ {
|
||||
numTrailingIdentical++
|
||||
}
|
||||
if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
|
||||
if numLeadingIdentical > 0 {
|
||||
// Remove leading identical span from this group and
|
||||
// insert it into the preceding group.
|
||||
if i-1 >= 0 {
|
||||
groups[i-1].NumIdentical += numLeadingIdentical
|
||||
} else {
|
||||
// No preceding group exists, so prepend a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
|
||||
}()
|
||||
}
|
||||
// Increment indexes since the preceding group would have handled this.
|
||||
ix += numLeadingIdentical
|
||||
iy += numLeadingIdentical
|
||||
}
|
||||
if numTrailingIdentical > 0 {
|
||||
// Remove trailing identical span from this group and
|
||||
// insert it into the succeeding group.
|
||||
if i+1 < len(groups) {
|
||||
groups[i+1].NumIdentical += numTrailingIdentical
|
||||
} else {
|
||||
// No succeeding group exists, so append a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
|
||||
}()
|
||||
}
|
||||
// Do not increment indexes since the succeeding group will handle this.
|
||||
}
|
||||
|
||||
// Update this group since some identical elements were removed.
|
||||
nx -= numIdentical
|
||||
ny -= numIdentical
|
||||
groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
|
||||
}
|
||||
ix += nx
|
||||
iy += ny
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
202
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
generated
vendored
Normal file
202
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
# Created by .ignore support plugin (hsz.mobi)
|
||||
### Go template
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
### Windows template
|
||||
# Windows image file caches
|
||||
Thumbs.db
|
||||
ehthumbs.db
|
||||
|
||||
# Folder config file
|
||||
Desktop.ini
|
||||
|
||||
# Recycle Bin used on file shares
|
||||
$RECYCLE.BIN/
|
||||
|
||||
# Windows Installer files
|
||||
*.cab
|
||||
*.msi
|
||||
*.msm
|
||||
*.msp
|
||||
|
||||
# Windows shortcuts
|
||||
*.lnk
|
||||
### Kate template
|
||||
# Swap Files #
|
||||
.*.kate-swp
|
||||
.swp.*
|
||||
### SublimeText template
|
||||
# cache files for sublime text
|
||||
*.tmlanguage.cache
|
||||
*.tmPreferences.cache
|
||||
*.stTheme.cache
|
||||
|
||||
# workspace files are user-specific
|
||||
*.sublime-workspace
|
||||
|
||||
# project files should be checked into the repository, unless a significant
|
||||
# proportion of contributors will probably not be using SublimeText
|
||||
# *.sublime-project
|
||||
|
||||
# sftp configuration file
|
||||
sftp-config.json
|
||||
### Linux template
|
||||
*~
|
||||
|
||||
# temporary files which can be created if a process still has a handle open of a deleted file
|
||||
.fuse_hidden*
|
||||
|
||||
# KDE directory preferences
|
||||
.directory
|
||||
|
||||
# Linux trash folder which might appear on any partition or disk
|
||||
.Trash-*
|
||||
### JetBrains template
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff:
|
||||
.idea
|
||||
.idea/tasks.xml
|
||||
.idea/dictionaries
|
||||
.idea/vcs.xml
|
||||
.idea/jsLibraryMappings.xml
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/dataSources.ids
|
||||
.idea/dataSources.xml
|
||||
.idea/dataSources.local.xml
|
||||
.idea/sqlDataSources.xml
|
||||
.idea/dynamic.xml
|
||||
.idea/uiDesigner.xml
|
||||
|
||||
# Gradle:
|
||||
.idea/gradle.xml
|
||||
.idea/libraries
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
### Xcode template
|
||||
# Xcode
|
||||
#
|
||||
# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
|
||||
|
||||
## Build generated
|
||||
build/
|
||||
DerivedData/
|
||||
|
||||
## Various settings
|
||||
*.pbxuser
|
||||
!default.pbxuser
|
||||
*.mode1v3
|
||||
!default.mode1v3
|
||||
*.mode2v3
|
||||
!default.mode2v3
|
||||
*.perspectivev3
|
||||
!default.perspectivev3
|
||||
xcuserdata/
|
||||
|
||||
## Other
|
||||
*.moved-aside
|
||||
*.xccheckout
|
||||
*.xcscmblueprint
|
||||
### Eclipse template
|
||||
|
||||
.metadata
|
||||
bin/
|
||||
tmp/
|
||||
*.tmp
|
||||
*.bak
|
||||
*.swp
|
||||
*~.nib
|
||||
local.properties
|
||||
.settings/
|
||||
.loadpath
|
||||
.recommenders
|
||||
|
||||
# Eclipse Core
|
||||
.project
|
||||
|
||||
# External tool builders
|
||||
.externalToolBuilders/
|
||||
|
||||
# Locally stored "Eclipse launch configurations"
|
||||
*.launch
|
||||
|
||||
# PyDev specific (Python IDE for Eclipse)
|
||||
*.pydevproject
|
||||
|
||||
# CDT-specific (C/C++ Development Tooling)
|
||||
.cproject
|
||||
|
||||
# JDT-specific (Eclipse Java Development Tools)
|
||||
.classpath
|
||||
|
||||
# Java annotation processor (APT)
|
||||
.factorypath
|
||||
|
||||
# PDT-specific (PHP Development Tools)
|
||||
.buildpath
|
||||
|
||||
# sbteclipse plugin
|
||||
.target
|
||||
|
||||
# Tern plugin
|
||||
.tern-project
|
||||
|
||||
# TeXlipse plugin
|
||||
.texlipse
|
||||
|
||||
# STS (Spring Tool Suite)
|
||||
.springBeans
|
||||
|
||||
# Code Recommenders
|
||||
.recommenders/
|
||||
|
||||
|
||||
coverage.txt
|
||||
|
||||
#vendor
|
||||
vendor/
|
21
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
generated
vendored
Normal file
21
vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.8.x
|
||||
env:
|
||||
- DEP_VERSION="0.3.2"
|
||||
|
||||
before_install:
|
||||
# Download the binary to bin folder in $GOPATH
|
||||
- curl -L -s https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 -o $GOPATH/bin/dep
|
||||
# Make the binary executable
|
||||
- chmod +x $GOPATH/bin/dep
|
||||
|
||||
install:
|
||||
- dep ensure
|
||||
|
||||
script:
|
||||
- make test
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
30
vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
generated
vendored
Normal file
30
vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
||||
|
||||
Types of changes:
|
||||
- `Added` for new features.
|
||||
- `Changed` for changes in existing functionality.
|
||||
- `Deprecated` for soon-to-be removed features.
|
||||
- `Removed` for now removed features.
|
||||
- `Fixed` for any bug fixes.
|
||||
- `Security` in case of vulnerabilities.
|
||||
|
||||
## [Unreleased]
|
||||
### Added
|
||||
- This CHANGELOG file to keep track of changes.
|
||||
|
||||
## 1.0.0 - 2018-05-08
|
||||
### Added
|
||||
- grpc_auth
|
||||
- grpc_ctxtags
|
||||
- grpc_zap
|
||||
- grpc_logrus
|
||||
- grpc_opentracing
|
||||
- grpc_retry
|
||||
- grpc_validator
|
||||
- grpc_recovery
|
||||
|
||||
[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...HEAD
|
20
vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md
generated
vendored
Normal file
20
vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
# Contributing
|
||||
|
||||
We would love to have people submit pull requests and help make `grpc-ecosystem/go-grpc-middleware` even better 👍.
|
||||
|
||||
Fork, then clone the repo:
|
||||
|
||||
```bash
|
||||
git clone git@github.com:your-username/go-grpc-middleware.git
|
||||
```
|
||||
|
||||
Before checking in please run the following:
|
||||
|
||||
```bash
|
||||
make all
|
||||
```
|
||||
|
||||
This will `vet`, `fmt`, regenerate documentation and run all tests.
|
||||
|
||||
|
||||
Push to your fork and open a pull request.
|
123
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
generated
vendored
Normal file
123
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
|
||||
version = "v0.16.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = ["gogoproto","proto","protoc-gen-gogo/descriptor"]
|
||||
revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02"
|
||||
version = "v0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["jsonpb","proto","ptypes","ptypes/any","ptypes/duration","ptypes/struct","ptypes/timestamp"]
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
packages = [".","ext","log","mocktracer"]
|
||||
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = ["assert","require","suite"]
|
||||
revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
|
||||
version = "v1.1.4"
|
||||
|
||||
[[projects]]
|
||||
name = "go.uber.org/atomic"
|
||||
packages = ["."]
|
||||
revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
name = "go.uber.org/multierr"
|
||||
packages = ["."]
|
||||
revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "go.uber.org/zap"
|
||||
packages = [".","buffer","internal/bufferpool","internal/color","internal/exit","zapcore"]
|
||||
revision = "35aad584952c3e7020db7b839f6b102de6271f89"
|
||||
version = "v1.7.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
revision = "94eea52f7b742c7cbe0b03b22f0c4c8631ece122"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
||||
revision = "a8b9294777976932365dabb6640cf1468d95c70f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "13fcbd661c8ececa8807a29b48407d674b1d8ed8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "75cc3cad82b5f47d3fb229ddda8c5167da14f294"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "7f0da29060c682909f650ad8ed4e515bd74fa12a"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","credentials/oauth","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
|
||||
revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
|
||||
version = "v1.8.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "b24c6670412eb0bc44ed1db77fecc52333f8725f3e3272bdc568f5683a63031f"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
35
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
generated
vendored
Normal file
35
vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
[[constraint]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
version = "0.5.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
version = "1.0.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
version = "1.0.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.1.4"
|
||||
|
||||
[[constraint]]
|
||||
name = "go.uber.org/zap"
|
||||
version = "1.7.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "1.8.0"
|
201
vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE
generated
vendored
Normal file
201
vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
84
vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
generated
vendored
Normal file
84
vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
# Go gRPC Middleware
|
||||
|
||||
[](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware)
|
||||
[](https://goreportcard.com/report/github.com/grpc-ecosystem/go-grpc-middleware)
|
||||
[](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware)
|
||||
[](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/?badge)
|
||||
[](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
|
||||
[](LICENSE)
|
||||
[](#status)
|
||||
[](https://join.slack.com/t/improbable-eng/shared_invite/enQtMzQ1ODcyMzQ5MjM4LWY5ZWZmNGM2ODc5MmViNmQ3ZTA3ZTY3NzQwOTBlMTkzZmIxZTIxODk0OWU3YjZhNWVlNDU3MDlkZGViZjhkMjc)
|
||||
|
||||
[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
|
||||
|
||||
## Middleware
|
||||
|
||||
[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for
|
||||
Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs)
|
||||
that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client either around the user call. It is a perfect way to implement
|
||||
common patterns: auth, logging, message, validation, retries or monitoring.
|
||||
|
||||
These are generic building blocks that make it easy to build multiple microservices easily.
|
||||
The purpose of this repository is to act as a go-to point for such reusable functionality. It contains
|
||||
some of them itself, but also will link to useful external repos.
|
||||
|
||||
`grpc_middleware` itself provides support for chaining interceptors, here's an example:
|
||||
|
||||
```go
|
||||
import "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
|
||||
myServer := grpc.NewServer(
|
||||
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
|
||||
grpc_ctxtags.StreamServerInterceptor(),
|
||||
grpc_opentracing.StreamServerInterceptor(),
|
||||
grpc_prometheus.StreamServerInterceptor,
|
||||
grpc_zap.StreamServerInterceptor(zapLogger),
|
||||
grpc_auth.StreamServerInterceptor(myAuthFunction),
|
||||
grpc_recovery.StreamServerInterceptor(),
|
||||
)),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
|
||||
grpc_ctxtags.UnaryServerInterceptor(),
|
||||
grpc_opentracing.UnaryServerInterceptor(),
|
||||
grpc_prometheus.UnaryServerInterceptor,
|
||||
grpc_zap.UnaryServerInterceptor(zapLogger),
|
||||
grpc_auth.UnaryServerInterceptor(myAuthFunction),
|
||||
grpc_recovery.UnaryServerInterceptor(),
|
||||
)),
|
||||
)
|
||||
```
|
||||
|
||||
## Interceptors
|
||||
|
||||
*Please send a PR to add new interceptors or middleware to this list*
|
||||
|
||||
#### Auth
|
||||
* [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware
|
||||
|
||||
#### Logging
|
||||
* [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
|
||||
* [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
|
||||
* [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
|
||||
|
||||
|
||||
#### Monitoring
|
||||
* [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
|
||||
* [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors
|
||||
* [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags
|
||||
|
||||
#### Client
|
||||
* [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware
|
||||
|
||||
#### Server
|
||||
* [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
|
||||
* [`grpc_recovery`](recovery/) - turn panics into gRPC errors
|
||||
|
||||
|
||||
## Status
|
||||
|
||||
This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io).
|
||||
|
||||
Additional tooling will be added, and contributions are welcome.
|
||||
|
||||
## License
|
||||
|
||||
`go-grpc-middleware` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
|
183
vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
generated
vendored
Normal file
183
vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
// gRPC Server Interceptor chaining middleware.
|
||||
|
||||
package grpc_middleware
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// ChainUnaryServer creates a single interceptor out of a chain of many interceptors.
|
||||
//
|
||||
// Execution is done in left-to-right order, including passing of context.
|
||||
// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three
|
||||
// will see context changes of one and two.
|
||||
func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
if n > 1 {
|
||||
lastI := n - 1
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
var (
|
||||
chainHandler grpc.UnaryHandler
|
||||
curI int
|
||||
)
|
||||
|
||||
chainHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
|
||||
if curI == lastI {
|
||||
return handler(currentCtx, currentReq)
|
||||
}
|
||||
curI++
|
||||
resp, err := interceptors[curI](currentCtx, currentReq, info, chainHandler)
|
||||
curI--
|
||||
return resp, err
|
||||
}
|
||||
|
||||
return interceptors[0](ctx, req, info, chainHandler)
|
||||
}
|
||||
}
|
||||
|
||||
if n == 1 {
|
||||
return interceptors[0]
|
||||
}
|
||||
|
||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||
return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
}
|
||||
|
||||
// ChainStreamServer creates a single interceptor out of a chain of many interceptors.
|
||||
//
|
||||
// Execution is done in left-to-right order, including passing of context.
|
||||
// For example ChainUnaryServer(one, two, three) will execute one before two before three.
|
||||
// If you want to pass context between interceptors, use WrapServerStream.
|
||||
func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
if n > 1 {
|
||||
lastI := n - 1
|
||||
return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
var (
|
||||
chainHandler grpc.StreamHandler
|
||||
curI int
|
||||
)
|
||||
|
||||
chainHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error {
|
||||
if curI == lastI {
|
||||
return handler(currentSrv, currentStream)
|
||||
}
|
||||
curI++
|
||||
err := interceptors[curI](currentSrv, currentStream, info, chainHandler)
|
||||
curI--
|
||||
return err
|
||||
}
|
||||
|
||||
return interceptors[0](srv, stream, info, chainHandler)
|
||||
}
|
||||
}
|
||||
|
||||
if n == 1 {
|
||||
return interceptors[0]
|
||||
}
|
||||
|
||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||
return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return handler(srv, stream)
|
||||
}
|
||||
}
|
||||
|
||||
// ChainUnaryClient creates a single interceptor out of a chain of many interceptors.
|
||||
//
|
||||
// Execution is done in left-to-right order, including passing of context.
|
||||
// For example ChainUnaryClient(one, two, three) will execute one before two before three.
|
||||
func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
if n > 1 {
|
||||
lastI := n - 1
|
||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
var (
|
||||
chainHandler grpc.UnaryInvoker
|
||||
curI int
|
||||
)
|
||||
|
||||
chainHandler = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
|
||||
if curI == lastI {
|
||||
return invoker(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentOpts...)
|
||||
}
|
||||
curI++
|
||||
err := interceptors[curI](currentCtx, currentMethod, currentReq, currentRepl, currentConn, chainHandler, currentOpts...)
|
||||
curI--
|
||||
return err
|
||||
}
|
||||
|
||||
return interceptors[0](ctx, method, req, reply, cc, chainHandler, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
if n == 1 {
|
||||
return interceptors[0]
|
||||
}
|
||||
|
||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
return invoker(ctx, method, req, reply, cc, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ChainStreamClient creates a single interceptor out of a chain of many interceptors.
|
||||
//
|
||||
// Execution is done in left-to-right order, including passing of context.
|
||||
// For example ChainStreamClient(one, two, three) will execute one before two before three.
|
||||
func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
|
||||
n := len(interceptors)
|
||||
|
||||
if n > 1 {
|
||||
lastI := n - 1
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
var (
|
||||
chainHandler grpc.Streamer
|
||||
curI int
|
||||
)
|
||||
|
||||
chainHandler = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
if curI == lastI {
|
||||
return streamer(currentCtx, currentDesc, currentConn, currentMethod, currentOpts...)
|
||||
}
|
||||
curI++
|
||||
stream, err := interceptors[curI](currentCtx, currentDesc, currentConn, currentMethod, chainHandler, currentOpts...)
|
||||
curI--
|
||||
return stream, err
|
||||
}
|
||||
|
||||
return interceptors[0](ctx, desc, cc, method, chainHandler, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
if n == 1 {
|
||||
return interceptors[0]
|
||||
}
|
||||
|
||||
// n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
return streamer(ctx, desc, cc, method, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// Chain creates a single interceptor out of a chain of many interceptors.
|
||||
//
|
||||
// WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors.
|
||||
// Basically syntactic sugar.
|
||||
func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption {
|
||||
return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...))
|
||||
}
|
||||
|
||||
// WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors.
|
||||
// Basically syntactic sugar.
|
||||
func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption {
|
||||
return grpc.StreamInterceptor(ChainStreamServer(interceptors...))
|
||||
}
|
69
vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
generated
vendored
Normal file
69
vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
/*
|
||||
`grpc_middleware` is a collection of gRPC middleware packages: interceptors, helpers and tools.
|
||||
|
||||
Middleware
|
||||
|
||||
gRPC is a fantastic RPC middleware, which sees a lot of adoption in the Golang world. However, the
|
||||
upstream gRPC codebase is relatively bare bones.
|
||||
|
||||
This package, and most of its child packages provides commonly needed middleware for gRPC:
|
||||
client-side interceptors for retires, server-side interceptors for input validation and auth,
|
||||
functions for chaining said interceptors, metadata convenience methods and more.
|
||||
|
||||
Chaining
|
||||
|
||||
By default, gRPC doesn't allow one to have more than one interceptor either on the client nor on
|
||||
the server side. `grpc_middleware` provides convenient chaining methods
|
||||
|
||||
Simple way of turning a multiple interceptors into a single interceptor. Here's an example for
|
||||
server chaining:
|
||||
|
||||
myServer := grpc.NewServer(
|
||||
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)),
|
||||
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary),
|
||||
)
|
||||
|
||||
These interceptors will be executed from left to right: logging, monitoring and auth.
|
||||
|
||||
Here's an example for client side chaining:
|
||||
|
||||
clientConn, err = grpc.Dial(
|
||||
address,
|
||||
grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(monitoringClientUnary, retryUnary)),
|
||||
grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(monitoringClientStream, retryStream)),
|
||||
)
|
||||
client = pb_testproto.NewTestServiceClient(clientConn)
|
||||
resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
|
||||
|
||||
These interceptors will be executed from left to right: monitoring and then retry logic.
|
||||
|
||||
The retry interceptor will call every interceptor that follows it whenever when a retry happens.
|
||||
|
||||
Writing Your Own
|
||||
|
||||
Implementing your own interceptor is pretty trivial: there are interfaces for that. But the interesting
|
||||
bit exposing common data to handlers (and other middleware), similarly to HTTP Middleware design.
|
||||
For example, you may want to pass the identity of the caller from the auth interceptor all the way
|
||||
to the handling function.
|
||||
|
||||
For example, a client side interceptor example for auth looks like:
|
||||
|
||||
func FakeAuthUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
newCtx := context.WithValue(ctx, "user_id", "john@example.com")
|
||||
return handler(newCtx, req)
|
||||
}
|
||||
|
||||
Unfortunately, it's not as easy for streaming RPCs. These have the `context.Context` embedded within
|
||||
the `grpc.ServerStream` object. To pass values through context, a wrapper (`WrappedServerStream`) is
|
||||
needed. For example:
|
||||
|
||||
func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
newStream := grpc_middleware.WrapServerStream(stream)
|
||||
newStream.WrappedContext = context.WithValue(ctx, "user_id", "john@example.com")
|
||||
return handler(srv, stream)
|
||||
}
|
||||
*/
|
||||
package grpc_middleware
|
16
vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
generated
vendored
Normal file
16
vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
SHELL=/bin/bash
|
||||
|
||||
GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/)
|
||||
|
||||
all: vet fmt test
|
||||
|
||||
fmt:
|
||||
go fmt $(GOFILES_NOVENDOR)
|
||||
|
||||
vet:
|
||||
go vet $(GOFILES_NOVENDOR)
|
||||
|
||||
test: vet
|
||||
./scripts/test_all.sh
|
||||
|
||||
.PHONY: all test
|
BIN
vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png
generated
vendored
Normal file
BIN
vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 5.0 KiB |
29
vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
generated
vendored
Normal file
29
vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2016 Michal Witkowski. All Rights Reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
|
||||
package grpc_middleware
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// WrappedServerStream is a thin wrapper around grpc.ServerStream that allows modifying context.
|
||||
type WrappedServerStream struct {
|
||||
grpc.ServerStream
|
||||
// WrappedContext is the wrapper's own Context. You can assign it.
|
||||
WrappedContext context.Context
|
||||
}
|
||||
|
||||
// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context()
|
||||
func (w *WrappedServerStream) Context() context.Context {
|
||||
return w.WrappedContext
|
||||
}
|
||||
|
||||
// WrapServerStream returns a ServerStream that has the ability to overwrite context.
|
||||
func WrapServerStream(stream grpc.ServerStream) *WrappedServerStream {
|
||||
if existing, ok := stream.(*WrappedServerStream); ok {
|
||||
return existing
|
||||
}
|
||||
return &WrappedServerStream{ServerStream: stream, WrappedContext: stream.Context()}
|
||||
}
|
27
vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
generated
vendored
Normal file
27
vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2015, Gengo, Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of Gengo, Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
23
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
generated
vendored
Normal file
23
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
load("@rules_proto//proto:defs.bzl", "proto_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
proto_library(
|
||||
name = "internal_proto",
|
||||
srcs = ["errors.proto"],
|
||||
deps = ["@com_google_protobuf//:any_proto"],
|
||||
)
|
||||
|
||||
go_proto_library(
|
||||
name = "internal_go_proto",
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
|
||||
proto = ":internal_proto",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
embed = [":internal_go_proto"],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
|
||||
)
|
189
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go
generated
vendored
Normal file
189
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go
generated
vendored
Normal file
@ -0,0 +1,189 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: internal/errors.proto
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
any "github.com/golang/protobuf/ptypes/any"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// Error is the generic error returned from unary RPCs.
|
||||
type Error struct {
|
||||
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
|
||||
// This is to make the error more compatible with users that expect errors to be Status objects:
|
||||
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
|
||||
// It should be the exact same message as the Error field.
|
||||
Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Error) Reset() { *m = Error{} }
|
||||
func (m *Error) String() string { return proto.CompactTextString(m) }
|
||||
func (*Error) ProtoMessage() {}
|
||||
func (*Error) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9b093362ca6d1e03, []int{0}
|
||||
}
|
||||
|
||||
func (m *Error) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Error.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Error.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Error) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Error.Merge(m, src)
|
||||
}
|
||||
func (m *Error) XXX_Size() int {
|
||||
return xxx_messageInfo_Error.Size(m)
|
||||
}
|
||||
func (m *Error) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Error.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Error proto.InternalMessageInfo
|
||||
|
||||
func (m *Error) GetError() string {
|
||||
if m != nil {
|
||||
return m.Error
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Error) GetCode() int32 {
|
||||
if m != nil {
|
||||
return m.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Error) GetMessage() string {
|
||||
if m != nil {
|
||||
return m.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Error) GetDetails() []*any.Any {
|
||||
if m != nil {
|
||||
return m.Details
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StreamError is a response type which is returned when
|
||||
// streaming rpc returns an error.
|
||||
type StreamError struct {
|
||||
GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
|
||||
HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
|
||||
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
|
||||
HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
|
||||
Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StreamError) Reset() { *m = StreamError{} }
|
||||
func (m *StreamError) String() string { return proto.CompactTextString(m) }
|
||||
func (*StreamError) ProtoMessage() {}
|
||||
func (*StreamError) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9b093362ca6d1e03, []int{1}
|
||||
}
|
||||
|
||||
func (m *StreamError) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StreamError.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *StreamError) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StreamError.Merge(m, src)
|
||||
}
|
||||
func (m *StreamError) XXX_Size() int {
|
||||
return xxx_messageInfo_StreamError.Size(m)
|
||||
}
|
||||
func (m *StreamError) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StreamError.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StreamError proto.InternalMessageInfo
|
||||
|
||||
func (m *StreamError) GetGrpcCode() int32 {
|
||||
if m != nil {
|
||||
return m.GrpcCode
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StreamError) GetHttpCode() int32 {
|
||||
if m != nil {
|
||||
return m.HttpCode
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StreamError) GetMessage() string {
|
||||
if m != nil {
|
||||
return m.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *StreamError) GetHttpStatus() string {
|
||||
if m != nil {
|
||||
return m.HttpStatus
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *StreamError) GetDetails() []*any.Any {
|
||||
if m != nil {
|
||||
return m.Details
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error")
|
||||
proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) }
|
||||
|
||||
var fileDescriptor_9b093362ca6d1e03 = []byte{
|
||||
// 252 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30,
|
||||
0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52,
|
||||
0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24,
|
||||
0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c,
|
||||
0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2,
|
||||
0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1,
|
||||
0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b,
|
||||
0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c,
|
||||
0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b,
|
||||
0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72,
|
||||
0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3,
|
||||
0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6,
|
||||
0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7,
|
||||
0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae,
|
||||
0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03,
|
||||
0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00,
|
||||
}
|
26
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto
generated
vendored
Normal file
26
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
syntax = "proto3";
|
||||
package grpc.gateway.runtime;
|
||||
option go_package = "internal";
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
// Error is the generic error returned from unary RPCs.
|
||||
message Error {
|
||||
string error = 1;
|
||||
// This is to make the error more compatible with users that expect errors to be Status objects:
|
||||
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
|
||||
// It should be the exact same message as the Error field.
|
||||
int32 code = 2;
|
||||
string message = 3;
|
||||
repeated google.protobuf.Any details = 4;
|
||||
}
|
||||
|
||||
// StreamError is a response type which is returned when
|
||||
// streaming rpc returns an error.
|
||||
message StreamError {
|
||||
int32 grpc_code = 1;
|
||||
int32 http_code = 2;
|
||||
string message = 3;
|
||||
string http_status = 4;
|
||||
repeated google.protobuf.Any details = 5;
|
||||
}
|
85
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
generated
vendored
Normal file
85
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"context.go",
|
||||
"convert.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"fieldmask.go",
|
||||
"handler.go",
|
||||
"marshal_httpbodyproto.go",
|
||||
"marshal_json.go",
|
||||
"marshal_jsonpb.go",
|
||||
"marshal_proto.go",
|
||||
"marshaler.go",
|
||||
"marshaler_registry.go",
|
||||
"mux.go",
|
||||
"pattern.go",
|
||||
"proto2_convert.go",
|
||||
"proto_errors.go",
|
||||
"query.go",
|
||||
],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
||||
deps = [
|
||||
"//internal:go_default_library",
|
||||
"//utilities:go_default_library",
|
||||
"@com_github_golang_protobuf//descriptor:go_default_library_gen",
|
||||
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@go_googleapis//google/api:httpbody_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:any_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//grpclog:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"context_test.go",
|
||||
"convert_test.go",
|
||||
"errors_test.go",
|
||||
"fieldmask_test.go",
|
||||
"handler_test.go",
|
||||
"marshal_httpbodyproto_test.go",
|
||||
"marshal_json_test.go",
|
||||
"marshal_jsonpb_test.go",
|
||||
"marshal_proto_test.go",
|
||||
"marshaler_registry_test.go",
|
||||
"mux_test.go",
|
||||
"pattern_test.go",
|
||||
"query_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//internal:go_default_library",
|
||||
"//runtime/internal/examplepb:go_default_library",
|
||||
"//utilities:go_default_library",
|
||||
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes:go_default_library_gen",
|
||||
"@go_googleapis//google/api:httpbody_go_proto",
|
||||
"@go_googleapis//google/rpc:errdetails_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:struct_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
],
|
||||
)
|
291
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
generated
vendored
Normal file
291
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
generated
vendored
Normal file
@ -0,0 +1,291 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// MetadataHeaderPrefix is the http prefix that represents custom metadata
|
||||
// parameters to or from a gRPC call.
|
||||
const MetadataHeaderPrefix = "Grpc-Metadata-"
|
||||
|
||||
// MetadataPrefix is prepended to permanent HTTP header keys (as specified
|
||||
// by the IANA) when added to the gRPC context.
|
||||
const MetadataPrefix = "grpcgateway-"
|
||||
|
||||
// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
|
||||
// HTTP headers in a response handled by grpc-gateway
|
||||
const MetadataTrailerPrefix = "Grpc-Trailer-"
|
||||
|
||||
const metadataGrpcTimeout = "Grpc-Timeout"
|
||||
const metadataHeaderBinarySuffix = "-Bin"
|
||||
|
||||
const xForwardedFor = "X-Forwarded-For"
|
||||
const xForwardedHost = "X-Forwarded-Host"
|
||||
|
||||
var (
|
||||
// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
|
||||
// header isn't present. If the value is 0 the sent `context` will not have a timeout.
|
||||
DefaultContextTimeout = 0 * time.Second
|
||||
)
|
||||
|
||||
func decodeBinHeader(v string) ([]byte, error) {
|
||||
if len(v)%4 == 0 {
|
||||
// Input was padded, or padding was not necessary.
|
||||
return base64.StdEncoding.DecodeString(v)
|
||||
}
|
||||
return base64.RawStdEncoding.DecodeString(v)
|
||||
}
|
||||
|
||||
/*
|
||||
AnnotateContext adds context information such as metadata from the request.
|
||||
|
||||
At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
|
||||
except that the forwarded destination is not another HTTP service but rather
|
||||
a gRPC service.
|
||||
*/
|
||||
func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
|
||||
ctx, md, err := annotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if md == nil {
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
return metadata.NewOutgoingContext(ctx, md), nil
|
||||
}
|
||||
|
||||
// AnnotateIncomingContext adds context information such as metadata from the request.
|
||||
// Attach metadata as incoming context.
|
||||
func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
|
||||
ctx, md, err := annotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if md == nil {
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
return metadata.NewIncomingContext(ctx, md), nil
|
||||
}
|
||||
|
||||
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) {
|
||||
var pairs []string
|
||||
timeout := DefaultContextTimeout
|
||||
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
|
||||
var err error
|
||||
timeout, err = timeoutDecode(tm)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
|
||||
}
|
||||
}
|
||||
|
||||
for key, vals := range req.Header {
|
||||
key = textproto.CanonicalMIMEHeaderKey(key)
|
||||
for _, val := range vals {
|
||||
// For backwards-compatibility, pass through 'authorization' header with no prefix.
|
||||
if key == "Authorization" {
|
||||
pairs = append(pairs, "authorization", val)
|
||||
}
|
||||
if h, ok := mux.incomingHeaderMatcher(key); ok {
|
||||
// Handles "-bin" metadata in grpc, since grpc will do another base64
|
||||
// encode before sending to server, we need to decode it first.
|
||||
if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
|
||||
b, err := decodeBinHeader(val)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
|
||||
}
|
||||
|
||||
val = string(b)
|
||||
}
|
||||
pairs = append(pairs, h, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
if host := req.Header.Get(xForwardedHost); host != "" {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedHost), host)
|
||||
} else if req.Host != "" {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
|
||||
}
|
||||
|
||||
if addr := req.RemoteAddr; addr != "" {
|
||||
if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
|
||||
if fwd := req.Header.Get(xForwardedFor); fwd == "" {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
|
||||
} else {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if timeout != 0 {
|
||||
ctx, _ = context.WithTimeout(ctx, timeout)
|
||||
}
|
||||
if len(pairs) == 0 {
|
||||
return ctx, nil, nil
|
||||
}
|
||||
md := metadata.Pairs(pairs...)
|
||||
for _, mda := range mux.metadataAnnotators {
|
||||
md = metadata.Join(md, mda(ctx, req))
|
||||
}
|
||||
return ctx, md, nil
|
||||
}
|
||||
|
||||
// ServerMetadata consists of metadata sent from gRPC server.
|
||||
type ServerMetadata struct {
|
||||
HeaderMD metadata.MD
|
||||
TrailerMD metadata.MD
|
||||
}
|
||||
|
||||
type serverMetadataKey struct{}
|
||||
|
||||
// NewServerMetadataContext creates a new context with ServerMetadata
|
||||
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
||||
return context.WithValue(ctx, serverMetadataKey{}, md)
|
||||
}
|
||||
|
||||
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
||||
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
||||
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
||||
return
|
||||
}
|
||||
|
||||
// ServerTransportStream implements grpc.ServerTransportStream.
|
||||
// It should only be used by the generated files to support grpc.SendHeader
|
||||
// outside of gRPC server use.
|
||||
type ServerTransportStream struct {
|
||||
mu sync.Mutex
|
||||
header metadata.MD
|
||||
trailer metadata.MD
|
||||
}
|
||||
|
||||
// Method returns the method for the stream.
|
||||
func (s *ServerTransportStream) Method() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Header returns the header metadata of the stream.
|
||||
func (s *ServerTransportStream) Header() metadata.MD {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.header.Copy()
|
||||
}
|
||||
|
||||
// SetHeader sets the header metadata.
|
||||
func (s *ServerTransportStream) SetHeader(md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.header = metadata.Join(s.header, md)
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendHeader sets the header metadata.
|
||||
func (s *ServerTransportStream) SendHeader(md metadata.MD) error {
|
||||
return s.SetHeader(md)
|
||||
}
|
||||
|
||||
// Trailer returns the cached trailer metadata.
|
||||
func (s *ServerTransportStream) Trailer() metadata.MD {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.trailer.Copy()
|
||||
}
|
||||
|
||||
// SetTrailer sets the trailer metadata.
|
||||
func (s *ServerTransportStream) SetTrailer(md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.trailer = metadata.Join(s.trailer, md)
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func timeoutDecode(s string) (time.Duration, error) {
|
||||
size := len(s)
|
||||
if size < 2 {
|
||||
return 0, fmt.Errorf("timeout string is too short: %q", s)
|
||||
}
|
||||
d, ok := timeoutUnitToDuration(s[size-1])
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
|
||||
}
|
||||
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return d * time.Duration(t), nil
|
||||
}
|
||||
|
||||
func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
|
||||
switch u {
|
||||
case 'H':
|
||||
return time.Hour, true
|
||||
case 'M':
|
||||
return time.Minute, true
|
||||
case 'S':
|
||||
return time.Second, true
|
||||
case 'm':
|
||||
return time.Millisecond, true
|
||||
case 'u':
|
||||
return time.Microsecond, true
|
||||
case 'n':
|
||||
return time.Nanosecond, true
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// isPermanentHTTPHeader checks whether hdr belongs to the list of
|
||||
// permanent request headers maintained by IANA.
|
||||
// http://www.iana.org/assignments/message-headers/message-headers.xml
|
||||
func isPermanentHTTPHeader(hdr string) bool {
|
||||
switch hdr {
|
||||
case
|
||||
"Accept",
|
||||
"Accept-Charset",
|
||||
"Accept-Language",
|
||||
"Accept-Ranges",
|
||||
"Authorization",
|
||||
"Cache-Control",
|
||||
"Content-Type",
|
||||
"Cookie",
|
||||
"Date",
|
||||
"Expect",
|
||||
"From",
|
||||
"Host",
|
||||
"If-Match",
|
||||
"If-Modified-Since",
|
||||
"If-None-Match",
|
||||
"If-Schedule-Tag-Match",
|
||||
"If-Unmodified-Since",
|
||||
"Max-Forwards",
|
||||
"Origin",
|
||||
"Pragma",
|
||||
"Referer",
|
||||
"User-Agent",
|
||||
"Via",
|
||||
"Warning":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
318
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
generated
vendored
Normal file
318
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
generated
vendored
Normal file
@ -0,0 +1,318 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
"github.com/golang/protobuf/ptypes/duration"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
)
|
||||
|
||||
// String just returns the given string.
|
||||
// It is just for compatibility to other types.
|
||||
func String(val string) (string, error) {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// StringSlice converts 'val' where individual strings are separated by
|
||||
// 'sep' into a string slice.
|
||||
func StringSlice(val, sep string) ([]string, error) {
|
||||
return strings.Split(val, sep), nil
|
||||
}
|
||||
|
||||
// Bool converts the given string representation of a boolean value into bool.
|
||||
func Bool(val string) (bool, error) {
|
||||
return strconv.ParseBool(val)
|
||||
}
|
||||
|
||||
// BoolSlice converts 'val' where individual booleans are separated by
|
||||
// 'sep' into a bool slice.
|
||||
func BoolSlice(val, sep string) ([]bool, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]bool, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Bool(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Float64 converts the given string representation into representation of a floating point number into float64.
|
||||
func Float64(val string) (float64, error) {
|
||||
return strconv.ParseFloat(val, 64)
|
||||
}
|
||||
|
||||
// Float64Slice converts 'val' where individual floating point numbers are separated by
|
||||
// 'sep' into a float64 slice.
|
||||
func Float64Slice(val, sep string) ([]float64, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]float64, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Float64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Float32 converts the given string representation of a floating point number into float32.
|
||||
func Float32(val string) (float32, error) {
|
||||
f, err := strconv.ParseFloat(val, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float32(f), nil
|
||||
}
|
||||
|
||||
// Float32Slice converts 'val' where individual floating point numbers are separated by
|
||||
// 'sep' into a float32 slice.
|
||||
func Float32Slice(val, sep string) ([]float32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]float32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Float32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Int64 converts the given string representation of an integer into int64.
|
||||
func Int64(val string) (int64, error) {
|
||||
return strconv.ParseInt(val, 0, 64)
|
||||
}
|
||||
|
||||
// Int64Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a int64 slice.
|
||||
func Int64Slice(val, sep string) ([]int64, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]int64, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Int64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Int32 converts the given string representation of an integer into int32.
|
||||
func Int32(val string) (int32, error) {
|
||||
i, err := strconv.ParseInt(val, 0, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int32(i), nil
|
||||
}
|
||||
|
||||
// Int32Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a int32 slice.
|
||||
func Int32Slice(val, sep string) ([]int32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]int32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Int32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Uint64 converts the given string representation of an integer into uint64.
|
||||
func Uint64(val string) (uint64, error) {
|
||||
return strconv.ParseUint(val, 0, 64)
|
||||
}
|
||||
|
||||
// Uint64Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a uint64 slice.
|
||||
func Uint64Slice(val, sep string) ([]uint64, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]uint64, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Uint64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Uint32 converts the given string representation of an integer into uint32.
|
||||
func Uint32(val string) (uint32, error) {
|
||||
i, err := strconv.ParseUint(val, 0, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint32(i), nil
|
||||
}
|
||||
|
||||
// Uint32Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a uint32 slice.
|
||||
func Uint32Slice(val, sep string) ([]uint32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]uint32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Uint32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Bytes converts the given string representation of a byte sequence into a slice of bytes
|
||||
// A bytes sequence is encoded in URL-safe base64 without padding
|
||||
func Bytes(val string) ([]byte, error) {
|
||||
b, err := base64.StdEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
b, err = base64.URLEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
|
||||
// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
|
||||
func BytesSlice(val, sep string) ([][]byte, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([][]byte, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Bytes(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
|
||||
func Timestamp(val string) (*timestamp.Timestamp, error) {
|
||||
var r timestamp.Timestamp
|
||||
err := jsonpb.UnmarshalString(val, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
// Duration converts the given string into a timestamp.Duration.
|
||||
func Duration(val string) (*duration.Duration, error) {
|
||||
var r duration.Duration
|
||||
err := jsonpb.UnmarshalString(val, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
// Enum converts the given string into an int32 that should be type casted into the
|
||||
// correct enum proto type.
|
||||
func Enum(val string, enumValMap map[string]int32) (int32, error) {
|
||||
e, ok := enumValMap[val]
|
||||
if ok {
|
||||
return e, nil
|
||||
}
|
||||
|
||||
i, err := Int32(val)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s is not valid", val)
|
||||
}
|
||||
for _, v := range enumValMap {
|
||||
if v == i {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("%s is not valid", val)
|
||||
}
|
||||
|
||||
// EnumSlice converts 'val' where individual enums are separated by 'sep'
|
||||
// into a int32 slice. Each individual int32 should be type casted into the
|
||||
// correct enum proto type.
|
||||
func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]int32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Enum(v, enumValMap)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Support fot google.protobuf.wrappers on top of primitive types
|
||||
*/
|
||||
|
||||
// StringValue well-known type support as wrapper around string type
|
||||
func StringValue(val string) (*wrappers.StringValue, error) {
|
||||
return &wrappers.StringValue{Value: val}, nil
|
||||
}
|
||||
|
||||
// FloatValue well-known type support as wrapper around float32 type
|
||||
func FloatValue(val string) (*wrappers.FloatValue, error) {
|
||||
parsedVal, err := Float32(val)
|
||||
return &wrappers.FloatValue{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// DoubleValue well-known type support as wrapper around float64 type
|
||||
func DoubleValue(val string) (*wrappers.DoubleValue, error) {
|
||||
parsedVal, err := Float64(val)
|
||||
return &wrappers.DoubleValue{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// BoolValue well-known type support as wrapper around bool type
|
||||
func BoolValue(val string) (*wrappers.BoolValue, error) {
|
||||
parsedVal, err := Bool(val)
|
||||
return &wrappers.BoolValue{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// Int32Value well-known type support as wrapper around int32 type
|
||||
func Int32Value(val string) (*wrappers.Int32Value, error) {
|
||||
parsedVal, err := Int32(val)
|
||||
return &wrappers.Int32Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// UInt32Value well-known type support as wrapper around uint32 type
|
||||
func UInt32Value(val string) (*wrappers.UInt32Value, error) {
|
||||
parsedVal, err := Uint32(val)
|
||||
return &wrappers.UInt32Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// Int64Value well-known type support as wrapper around int64 type
|
||||
func Int64Value(val string) (*wrappers.Int64Value, error) {
|
||||
parsedVal, err := Int64(val)
|
||||
return &wrappers.Int64Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// UInt64Value well-known type support as wrapper around uint64 type
|
||||
func UInt64Value(val string) (*wrappers.UInt64Value, error) {
|
||||
parsedVal, err := Uint64(val)
|
||||
return &wrappers.UInt64Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// BytesValue well-known type support as wrapper around bytes[] type
|
||||
func BytesValue(val string) (*wrappers.BytesValue, error) {
|
||||
parsedVal, err := Bytes(val)
|
||||
return &wrappers.BytesValue{Value: parsedVal}, err
|
||||
}
|
5
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
generated
vendored
Normal file
5
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
/*
|
||||
Package runtime contains runtime helper functions used by
|
||||
servers which protoc-gen-grpc-gateway generates.
|
||||
*/
|
||||
package runtime
|
186
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
generated
vendored
Normal file
186
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
generated
vendored
Normal file
@ -0,0 +1,186 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/internal"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
|
||||
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
|
||||
func HTTPStatusFromCode(code codes.Code) int {
|
||||
switch code {
|
||||
case codes.OK:
|
||||
return http.StatusOK
|
||||
case codes.Canceled:
|
||||
return http.StatusRequestTimeout
|
||||
case codes.Unknown:
|
||||
return http.StatusInternalServerError
|
||||
case codes.InvalidArgument:
|
||||
return http.StatusBadRequest
|
||||
case codes.DeadlineExceeded:
|
||||
return http.StatusGatewayTimeout
|
||||
case codes.NotFound:
|
||||
return http.StatusNotFound
|
||||
case codes.AlreadyExists:
|
||||
return http.StatusConflict
|
||||
case codes.PermissionDenied:
|
||||
return http.StatusForbidden
|
||||
case codes.Unauthenticated:
|
||||
return http.StatusUnauthorized
|
||||
case codes.ResourceExhausted:
|
||||
return http.StatusTooManyRequests
|
||||
case codes.FailedPrecondition:
|
||||
// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
|
||||
return http.StatusBadRequest
|
||||
case codes.Aborted:
|
||||
return http.StatusConflict
|
||||
case codes.OutOfRange:
|
||||
return http.StatusBadRequest
|
||||
case codes.Unimplemented:
|
||||
return http.StatusNotImplemented
|
||||
case codes.Internal:
|
||||
return http.StatusInternalServerError
|
||||
case codes.Unavailable:
|
||||
return http.StatusServiceUnavailable
|
||||
case codes.DataLoss:
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
grpclog.Infof("Unknown gRPC error code: %v", code)
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
var (
|
||||
// HTTPError replies to the request with an error.
|
||||
//
|
||||
// HTTPError is called:
|
||||
// - From generated per-endpoint gateway handler code, when calling the backend results in an error.
|
||||
// - From gateway runtime code, when forwarding the response message results in an error.
|
||||
//
|
||||
// The default value for HTTPError calls the custom error handler configured on the ServeMux via the
|
||||
// WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise.
|
||||
//
|
||||
// To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler
|
||||
// serve option.
|
||||
//
|
||||
// To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve
|
||||
// option, set GlobalHTTPErrorHandler to a custom function.
|
||||
//
|
||||
// Setting this variable directly to customize error format is deprecated.
|
||||
HTTPError = MuxOrGlobalHTTPError
|
||||
|
||||
// GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the
|
||||
// WithProtoErrorHandler serve option.
|
||||
//
|
||||
// You can set a custom function to this variable to customize error format.
|
||||
GlobalHTTPErrorHandler = DefaultHTTPError
|
||||
|
||||
// OtherErrorHandler handles gateway errors from parsing and routing client requests for all
|
||||
// ServeMux instances not using the WithProtoErrorHandler serve option.
|
||||
//
|
||||
// It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
|
||||
//
|
||||
// To customize parsing and routing error handling of a particular ServeMux instance, use the
|
||||
// WithProtoErrorHandler serve option.
|
||||
//
|
||||
// To customize parsing and routing error handling of all ServeMux instances not using the
|
||||
// WithProtoErrorHandler serve option, set a custom function to this variable.
|
||||
OtherErrorHandler = DefaultOtherErrorHandler
|
||||
)
|
||||
|
||||
// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler.
|
||||
func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
|
||||
if mux.protoErrorHandler != nil {
|
||||
mux.protoErrorHandler(ctx, mux, marshaler, w, r, err)
|
||||
} else {
|
||||
GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err)
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultHTTPError is the default implementation of HTTPError.
|
||||
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
|
||||
// If otherwise, it replies with http.StatusInternalServerError.
|
||||
//
|
||||
// The response body returned by this function is a JSON object,
|
||||
// which contains a member whose key is "error" and whose value is err.Error().
|
||||
func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
|
||||
const fallback = `{"error": "failed to marshal error message"}`
|
||||
|
||||
s, ok := status.FromError(err)
|
||||
if !ok {
|
||||
s = status.New(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
w.Header().Del("Trailer")
|
||||
w.Header().Del("Transfer-Encoding")
|
||||
|
||||
contentType := marshaler.ContentType()
|
||||
// Check marshaler on run time in order to keep backwards compatibility
|
||||
// An interface param needs to be added to the ContentType() function on
|
||||
// the Marshal interface to be able to remove this check
|
||||
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
|
||||
pb := s.Proto()
|
||||
contentType = typeMarshaler.ContentTypeFromMessage(pb)
|
||||
}
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
|
||||
body := &internal.Error{
|
||||
Error: s.Message(),
|
||||
Message: s.Message(),
|
||||
Code: int32(s.Code()),
|
||||
Details: s.Proto().GetDetails(),
|
||||
}
|
||||
|
||||
buf, merr := marshaler.Marshal(body)
|
||||
if merr != nil {
|
||||
grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
if _, err := io.WriteString(w, fallback); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||
}
|
||||
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
|
||||
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||
// Unless the request includes a TE header field indicating "trailers"
|
||||
// is acceptable, as described in Section 4.3, a server SHOULD NOT
|
||||
// generate trailer fields that it believes are necessary for the user
|
||||
// agent to receive.
|
||||
var wantsTrailers bool
|
||||
|
||||
if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") {
|
||||
wantsTrailers = true
|
||||
handleForwardResponseTrailerHeader(w, md)
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
}
|
||||
|
||||
st := HTTPStatusFromCode(s.Code())
|
||||
w.WriteHeader(st)
|
||||
if _, err := w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
if wantsTrailers {
|
||||
handleForwardResponseTrailer(w, md)
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
|
||||
// It simply writes a string representation of the given error into "w".
|
||||
func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
|
||||
http.Error(w, msg, code)
|
||||
}
|
89
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
generated
vendored
Normal file
89
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
descriptor2 "github.com/golang/protobuf/descriptor"
|
||||
"github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||
"google.golang.org/genproto/protobuf/field_mask"
|
||||
)
|
||||
|
||||
func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) {
|
||||
// TODO - should really gate this with a test that the marshaller has used json names
|
||||
if md != nil {
|
||||
for _, f := range md.Field {
|
||||
if f.JsonName != nil && f.Name != nil && *f.JsonName == name {
|
||||
var subType *descriptor.DescriptorProto
|
||||
|
||||
// If the field has a TypeName then we retrieve the nested type for translating the embedded message names.
|
||||
if f.TypeName != nil {
|
||||
typeSplit := strings.Split(*f.TypeName, ".")
|
||||
typeName := typeSplit[len(typeSplit)-1]
|
||||
for _, t := range md.NestedType {
|
||||
if typeName == *t.Name {
|
||||
subType = t
|
||||
}
|
||||
}
|
||||
}
|
||||
return *f.Name, subType
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
|
||||
func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) {
|
||||
fm := &field_mask.FieldMask{}
|
||||
var root interface{}
|
||||
if err := json.NewDecoder(r).Decode(&root); err != nil {
|
||||
if err == io.EOF {
|
||||
return fm, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
queue := []fieldMaskPathItem{{node: root, md: md}}
|
||||
for len(queue) > 0 {
|
||||
// dequeue an item
|
||||
item := queue[0]
|
||||
queue = queue[1:]
|
||||
|
||||
if m, ok := item.node.(map[string]interface{}); ok {
|
||||
// if the item is an object, then enqueue all of its children
|
||||
for k, v := range m {
|
||||
protoName, subMd := translateName(k, item.md)
|
||||
if subMsg, ok := v.(descriptor2.Message); ok {
|
||||
_, subMd = descriptor2.ForMessage(subMsg)
|
||||
}
|
||||
|
||||
var path string
|
||||
if item.path == "" {
|
||||
path = protoName
|
||||
} else {
|
||||
path = item.path + "." + protoName
|
||||
}
|
||||
queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd})
|
||||
}
|
||||
} else if len(item.path) > 0 {
|
||||
// otherwise, it's a leaf node so print its path
|
||||
fm.Paths = append(fm.Paths, item.path)
|
||||
}
|
||||
}
|
||||
|
||||
return fm, nil
|
||||
}
|
||||
|
||||
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
|
||||
type fieldMaskPathItem struct {
|
||||
// the list of prior fields leading up to node connected by dots
|
||||
path string
|
||||
|
||||
// a generic decoded json object the current item to inspect for further path extraction
|
||||
node interface{}
|
||||
|
||||
// descriptor for parent message
|
||||
md *descriptor.DescriptorProto
|
||||
}
|
212
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
generated
vendored
Normal file
212
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
generated
vendored
Normal file
@ -0,0 +1,212 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/internal"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var errEmptyResponse = errors.New("empty response")
|
||||
|
||||
// ForwardResponseStream forwards the stream from gRPC server to REST client.
|
||||
func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||
f, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
grpclog.Infof("Flush not supported in %T", w)
|
||||
http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||
http.Error(w, "unexpected error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
w.Header().Set("Content-Type", marshaler.ContentType())
|
||||
if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
|
||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
var delimiter []byte
|
||||
if d, ok := marshaler.(Delimited); ok {
|
||||
delimiter = d.Delimiter()
|
||||
} else {
|
||||
delimiter = []byte("\n")
|
||||
}
|
||||
|
||||
var wroteHeader bool
|
||||
for {
|
||||
resp, err := recv()
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
return
|
||||
}
|
||||
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
return
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
switch {
|
||||
case resp == nil:
|
||||
buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse)))
|
||||
default:
|
||||
result := map[string]interface{}{"result": resp}
|
||||
if rb, ok := resp.(responseBody); ok {
|
||||
result["result"] = rb.XXX_ResponseBody()
|
||||
}
|
||||
|
||||
buf, err = marshaler.Marshal(result)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
grpclog.Infof("Failed to marshal response chunk: %v", err)
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
return
|
||||
}
|
||||
if _, err = w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to send response chunk: %v", err)
|
||||
return
|
||||
}
|
||||
wroteHeader = true
|
||||
if _, err = w.Write(delimiter); err != nil {
|
||||
grpclog.Infof("Failed to send delimiter chunk: %v", err)
|
||||
return
|
||||
}
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
|
||||
for k, vs := range md.HeaderMD {
|
||||
if h, ok := mux.outgoingHeaderMatcher(k); ok {
|
||||
for _, v := range vs {
|
||||
w.Header().Add(h, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
|
||||
for k := range md.TrailerMD {
|
||||
tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
|
||||
w.Header().Add("Trailer", tKey)
|
||||
}
|
||||
}
|
||||
|
||||
func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
|
||||
for k, vs := range md.TrailerMD {
|
||||
tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
|
||||
for _, v := range vs {
|
||||
w.Header().Add(tKey, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// responseBody interface contains method for getting field for marshaling to the response body
|
||||
// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
|
||||
type responseBody interface {
|
||||
XXX_ResponseBody() interface{}
|
||||
}
|
||||
|
||||
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
||||
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||
}
|
||||
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
handleForwardResponseTrailerHeader(w, md)
|
||||
|
||||
contentType := marshaler.ContentType()
|
||||
// Check marshaler on run time in order to keep backwards compatibility
|
||||
// An interface param needs to be added to the ContentType() function on
|
||||
// the Marshal interface to be able to remove this check
|
||||
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
|
||||
contentType = typeMarshaler.ContentTypeFromMessage(resp)
|
||||
}
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
|
||||
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
var buf []byte
|
||||
var err error
|
||||
if rb, ok := resp.(responseBody); ok {
|
||||
buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
|
||||
} else {
|
||||
buf, err = marshaler.Marshal(resp)
|
||||
}
|
||||
if err != nil {
|
||||
grpclog.Infof("Marshal error: %v", err)
|
||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
handleForwardResponseTrailer(w, md)
|
||||
}
|
||||
|
||||
func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
|
||||
if len(opts) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(ctx, w, resp); err != nil {
|
||||
grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
|
||||
serr := streamError(ctx, mux.streamErrorHandler, err)
|
||||
if !wroteHeader {
|
||||
w.WriteHeader(int(serr.HttpCode))
|
||||
}
|
||||
buf, merr := marshaler.Marshal(errorChunk(serr))
|
||||
if merr != nil {
|
||||
grpclog.Infof("Failed to marshal an error: %v", merr)
|
||||
return
|
||||
}
|
||||
if _, werr := w.Write(buf); werr != nil {
|
||||
grpclog.Infof("Failed to notify error to client: %v", werr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// streamError returns the payload for the final message in a response stream
|
||||
// that represents the given err.
|
||||
func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError {
|
||||
serr := errHandler(ctx, err)
|
||||
if serr != nil {
|
||||
return serr
|
||||
}
|
||||
// TODO: log about misbehaving stream error handler?
|
||||
return DefaultHTTPStreamErrorHandler(ctx, err)
|
||||
}
|
||||
|
||||
func errorChunk(err *StreamError) map[string]proto.Message {
|
||||
return map[string]proto.Message{"error": (*internal.StreamError)(err)}
|
||||
}
|
43
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
generated
vendored
Normal file
43
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"google.golang.org/genproto/googleapis/api/httpbody"
|
||||
)
|
||||
|
||||
// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
|
||||
func SetHTTPBodyMarshaler(serveMux *ServeMux) {
|
||||
serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
|
||||
Marshaler: &JSONPb{OrigName: true},
|
||||
}
|
||||
}
|
||||
|
||||
// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
|
||||
// google.api.HttpBody message as the full response body if it is
|
||||
// the actual message used as the response. If not, then this will
|
||||
// simply fallback to the Marshaler specified as its default Marshaler.
|
||||
type HTTPBodyMarshaler struct {
|
||||
Marshaler
|
||||
}
|
||||
|
||||
// ContentType implementation to keep backwards compatibility with marshal interface
|
||||
func (h *HTTPBodyMarshaler) ContentType() string {
|
||||
return h.ContentTypeFromMessage(nil)
|
||||
}
|
||||
|
||||
// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
|
||||
// its specified content type otherwise fall back to the default Marshaler.
|
||||
func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
|
||||
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
||||
return httpBody.GetContentType()
|
||||
}
|
||||
return h.Marshaler.ContentType()
|
||||
}
|
||||
|
||||
// Marshal marshals "v" by returning the body bytes if v is a
|
||||
// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
|
||||
func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
|
||||
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
||||
return httpBody.Data, nil
|
||||
}
|
||||
return h.Marshaler.Marshal(v)
|
||||
}
|
45
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
generated
vendored
Normal file
45
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
|
||||
// with the standard "encoding/json" package of Golang.
|
||||
// Although it is generally faster for simple proto messages than JSONPb,
|
||||
// it does not support advanced features of protobuf, e.g. map, oneof, ....
|
||||
//
|
||||
// The NewEncoder and NewDecoder types return *json.Encoder and
|
||||
// *json.Decoder respectively.
|
||||
type JSONBuiltin struct{}
|
||||
|
||||
// ContentType always Returns "application/json".
|
||||
func (*JSONBuiltin) ContentType() string {
|
||||
return "application/json"
|
||||
}
|
||||
|
||||
// Marshal marshals "v" into JSON
|
||||
func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals JSON data into "v".
|
||||
func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||
func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
|
||||
return json.NewDecoder(r)
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||
func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
|
||||
return json.NewEncoder(w)
|
||||
}
|
||||
|
||||
// Delimiter for newline encoded JSON streams.
|
||||
func (j *JSONBuiltin) Delimiter() []byte {
|
||||
return []byte("\n")
|
||||
}
|
262
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
generated
vendored
Normal file
262
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
generated
vendored
Normal file
@ -0,0 +1,262 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
|
||||
// with the "github.com/golang/protobuf/jsonpb".
|
||||
// It supports fully functionality of protobuf unlike JSONBuiltin.
|
||||
//
|
||||
// The NewDecoder method returns a DecoderWrapper, so the underlying
|
||||
// *json.Decoder methods can be used.
|
||||
type JSONPb jsonpb.Marshaler
|
||||
|
||||
// ContentType always returns "application/json".
|
||||
func (*JSONPb) ContentType() string {
|
||||
return "application/json"
|
||||
}
|
||||
|
||||
// Marshal marshals "v" into JSON.
|
||||
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
|
||||
if _, ok := v.(proto.Message); !ok {
|
||||
return j.marshalNonProtoField(v)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := j.marshalTo(&buf, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
|
||||
p, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
buf, err := j.marshalNonProtoField(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(buf)
|
||||
return err
|
||||
}
|
||||
return (*jsonpb.Marshaler)(j).Marshal(w, p)
|
||||
}
|
||||
|
||||
var (
|
||||
// protoMessageType is stored to prevent constant lookup of the same type at runtime.
|
||||
protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||
)
|
||||
|
||||
// marshalNonProto marshals a non-message field of a protobuf message.
|
||||
// This function does not correctly marshals arbitrary data structure into JSON,
|
||||
// but it is only capable of marshaling non-message field values of protobuf,
|
||||
// i.e. primitive types, enums; pointers to primitives or enums; maps from
|
||||
// integer/string types to primitives/enums/pointers to messages.
|
||||
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||
if v == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
for rv.Kind() == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
rv = rv.Elem()
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Slice {
|
||||
if rv.IsNil() {
|
||||
if j.EmitDefaults {
|
||||
return []byte("[]"), nil
|
||||
}
|
||||
return []byte("null"), nil
|
||||
}
|
||||
|
||||
if rv.Type().Elem().Implements(protoMessageType) {
|
||||
var buf bytes.Buffer
|
||||
err := buf.WriteByte('[')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
if i != 0 {
|
||||
err = buf.WriteByte(',')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = buf.WriteByte(']')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Map {
|
||||
m := make(map[string]*json.RawMessage)
|
||||
for _, k := range rv.MapKeys() {
|
||||
buf, err := j.Marshal(rv.MapIndex(k).Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
|
||||
}
|
||||
if j.Indent != "" {
|
||||
return json.MarshalIndent(m, "", j.Indent)
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
||||
if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
|
||||
return json.Marshal(enum.String())
|
||||
}
|
||||
return json.Marshal(rv.Interface())
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals JSON "data" into "v"
|
||||
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
|
||||
return unmarshalJSONPb(data, v)
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||
func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
|
||||
d := json.NewDecoder(r)
|
||||
return DecoderWrapper{Decoder: d}
|
||||
}
|
||||
|
||||
// DecoderWrapper is a wrapper around a *json.Decoder that adds
|
||||
// support for protos to the Decode method.
|
||||
type DecoderWrapper struct {
|
||||
*json.Decoder
|
||||
}
|
||||
|
||||
// Decode wraps the embedded decoder's Decode method to support
|
||||
// protos using a jsonpb.Unmarshaler.
|
||||
func (d DecoderWrapper) Decode(v interface{}) error {
|
||||
return decodeJSONPb(d.Decoder, v)
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||
func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
|
||||
return EncoderFunc(func(v interface{}) error {
|
||||
if err := j.marshalTo(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
// mimic json.Encoder by adding a newline (makes output
|
||||
// easier to read when it contains multiple encoded items)
|
||||
_, err := w.Write(j.Delimiter())
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func unmarshalJSONPb(data []byte, v interface{}) error {
|
||||
d := json.NewDecoder(bytes.NewReader(data))
|
||||
return decodeJSONPb(d, v)
|
||||
}
|
||||
|
||||
func decodeJSONPb(d *json.Decoder, v interface{}) error {
|
||||
p, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return decodeNonProtoField(d, v)
|
||||
}
|
||||
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
|
||||
return unmarshaler.UnmarshalNext(d, p)
|
||||
}
|
||||
|
||||
func decodeNonProtoField(d *json.Decoder, v interface{}) error {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("%T is not a pointer", v)
|
||||
}
|
||||
for rv.Kind() == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.New(rv.Type().Elem()))
|
||||
}
|
||||
if rv.Type().ConvertibleTo(typeProtoMessage) {
|
||||
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
|
||||
return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
|
||||
}
|
||||
rv = rv.Elem()
|
||||
}
|
||||
if rv.Kind() == reflect.Map {
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
conv, ok := convFromType[rv.Type().Key().Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
|
||||
}
|
||||
|
||||
m := make(map[string]*json.RawMessage)
|
||||
if err := d.Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
|
||||
if err := result[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
bk := result[0]
|
||||
bv := reflect.New(rv.Type().Elem())
|
||||
if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
rv.SetMapIndex(bk, bv.Elem())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if _, ok := rv.Interface().(protoEnum); ok {
|
||||
var repr interface{}
|
||||
if err := d.Decode(&repr); err != nil {
|
||||
return err
|
||||
}
|
||||
switch repr.(type) {
|
||||
case string:
|
||||
// TODO(yugui) Should use proto.StructProperties?
|
||||
return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
|
||||
case float64:
|
||||
rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
|
||||
}
|
||||
}
|
||||
return d.Decode(v)
|
||||
}
|
||||
|
||||
type protoEnum interface {
|
||||
fmt.Stringer
|
||||
EnumDescriptor() ([]byte, []int)
|
||||
}
|
||||
|
||||
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||
|
||||
// Delimiter for newline encoded JSON streams.
|
||||
func (j *JSONPb) Delimiter() []byte {
|
||||
return []byte("\n")
|
||||
}
|
||||
|
||||
// allowUnknownFields helps not to return an error when the destination
|
||||
// is a struct and the input contains object keys which do not match any
|
||||
// non-ignored, exported fields in the destination.
|
||||
var allowUnknownFields = true
|
||||
|
||||
// DisallowUnknownFields enables option in decoder (unmarshaller) to
|
||||
// return an error when it finds an unknown field. This function must be
|
||||
// called before using the JSON marshaller.
|
||||
func DisallowUnknownFields() {
|
||||
allowUnknownFields = false
|
||||
}
|
62
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
generated
vendored
Normal file
62
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"errors"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
|
||||
type ProtoMarshaller struct{}
|
||||
|
||||
// ContentType always returns "application/octet-stream".
|
||||
func (*ProtoMarshaller) ContentType() string {
|
||||
return "application/octet-stream"
|
||||
}
|
||||
|
||||
// Marshal marshals "value" into Proto
|
||||
func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
|
||||
message, ok := value.(proto.Message)
|
||||
if !ok {
|
||||
return nil, errors.New("unable to marshal non proto field")
|
||||
}
|
||||
return proto.Marshal(message)
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals proto "data" into "value"
|
||||
func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
|
||||
message, ok := value.(proto.Message)
|
||||
if !ok {
|
||||
return errors.New("unable to unmarshal non proto field")
|
||||
}
|
||||
return proto.Unmarshal(data, message)
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder which reads proto stream from "reader".
|
||||
func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
|
||||
return DecoderFunc(func(value interface{}) error {
|
||||
buffer, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return marshaller.Unmarshal(buffer, value)
|
||||
})
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder which writes proto stream into "writer".
|
||||
func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
|
||||
return EncoderFunc(func(value interface{}) error {
|
||||
buffer, err := marshaller.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = writer.Write(buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
55
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
generated
vendored
Normal file
55
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
|
||||
type Marshaler interface {
|
||||
// Marshal marshals "v" into byte sequence.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
// Unmarshal unmarshals "data" into "v".
|
||||
// "v" must be a pointer value.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
// NewDecoder returns a Decoder which reads byte sequence from "r".
|
||||
NewDecoder(r io.Reader) Decoder
|
||||
// NewEncoder returns an Encoder which writes bytes sequence into "w".
|
||||
NewEncoder(w io.Writer) Encoder
|
||||
// ContentType returns the Content-Type which this marshaler is responsible for.
|
||||
ContentType() string
|
||||
}
|
||||
|
||||
// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called
|
||||
// to set the Content-Type header on the response
|
||||
type contentTypeMarshaler interface {
|
||||
// ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message
|
||||
ContentTypeFromMessage(v interface{}) string
|
||||
}
|
||||
|
||||
// Decoder decodes a byte sequence
|
||||
type Decoder interface {
|
||||
Decode(v interface{}) error
|
||||
}
|
||||
|
||||
// Encoder encodes gRPC payloads / fields into byte sequence.
|
||||
type Encoder interface {
|
||||
Encode(v interface{}) error
|
||||
}
|
||||
|
||||
// DecoderFunc adapts an decoder function into Decoder.
|
||||
type DecoderFunc func(v interface{}) error
|
||||
|
||||
// Decode delegates invocations to the underlying function itself.
|
||||
func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
|
||||
|
||||
// EncoderFunc adapts an encoder function into Encoder
|
||||
type EncoderFunc func(v interface{}) error
|
||||
|
||||
// Encode delegates invocations to the underlying function itself.
|
||||
func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
|
||||
|
||||
// Delimited defines the streaming delimiter.
|
||||
type Delimited interface {
|
||||
// Delimiter returns the record separator for the stream.
|
||||
Delimiter() []byte
|
||||
}
|
99
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
generated
vendored
Normal file
99
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"mime"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// MIMEWildcard is the fallback MIME type used for requests which do not match
|
||||
// a registered MIME type.
|
||||
const MIMEWildcard = "*"
|
||||
|
||||
var (
|
||||
acceptHeader = http.CanonicalHeaderKey("Accept")
|
||||
contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
|
||||
|
||||
defaultMarshaler = &JSONPb{OrigName: true}
|
||||
)
|
||||
|
||||
// MarshalerForRequest returns the inbound/outbound marshalers for this request.
|
||||
// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
|
||||
// If it isn't set (or the request Content-Type is empty), checks for "*".
|
||||
// If there are multiple Content-Type headers set, choose the first one that it can
|
||||
// exactly match in the registry.
|
||||
// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
|
||||
func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
|
||||
for _, acceptVal := range r.Header[acceptHeader] {
|
||||
if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
|
||||
outbound = m
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, contentTypeVal := range r.Header[contentTypeHeader] {
|
||||
contentType, _, err := mime.ParseMediaType(contentTypeVal)
|
||||
if err != nil {
|
||||
grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err)
|
||||
continue
|
||||
}
|
||||
if m, ok := mux.marshalers.mimeMap[contentType]; ok {
|
||||
inbound = m
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if inbound == nil {
|
||||
inbound = mux.marshalers.mimeMap[MIMEWildcard]
|
||||
}
|
||||
if outbound == nil {
|
||||
outbound = inbound
|
||||
}
|
||||
|
||||
return inbound, outbound
|
||||
}
|
||||
|
||||
// marshalerRegistry is a mapping from MIME types to Marshalers.
|
||||
type marshalerRegistry struct {
|
||||
mimeMap map[string]Marshaler
|
||||
}
|
||||
|
||||
// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
|
||||
// MIME type).
|
||||
func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
|
||||
if len(mime) == 0 {
|
||||
return errors.New("empty MIME type")
|
||||
}
|
||||
|
||||
m.mimeMap[mime] = marshaler
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeMarshalerMIMERegistry returns a new registry of marshalers.
|
||||
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
|
||||
//
|
||||
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
|
||||
// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
|
||||
// with a "application/json" Content-Type.
|
||||
// "*" can be used to match any Content-Type.
|
||||
// This can be attached to a ServerMux with the marshaler option.
|
||||
func makeMarshalerMIMERegistry() marshalerRegistry {
|
||||
return marshalerRegistry{
|
||||
mimeMap: map[string]Marshaler{
|
||||
MIMEWildcard: defaultMarshaler,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
|
||||
// Marshalers to a MIME type in mux.
|
||||
func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
|
||||
return func(mux *ServeMux) {
|
||||
if err := mux.marshalers.add(mime, marshaler); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
300
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
generated
vendored
Normal file
300
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
generated
vendored
Normal file
@ -0,0 +1,300 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
||||
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
||||
|
||||
// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when
|
||||
// a request is received with a URI path that does not match any registered
|
||||
// service method.
|
||||
//
|
||||
// Since gRPC servers return an "Unimplemented" code for requests with an
|
||||
// unrecognized URI path, this error also has a gRPC "Unimplemented" code.
|
||||
var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
|
||||
|
||||
// ServeMux is a request multiplexer for grpc-gateway.
|
||||
// It matches http requests to patterns and invokes the corresponding handler.
|
||||
type ServeMux struct {
|
||||
// handlers maps HTTP method to a list of handlers.
|
||||
handlers map[string][]handler
|
||||
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
|
||||
marshalers marshalerRegistry
|
||||
incomingHeaderMatcher HeaderMatcherFunc
|
||||
outgoingHeaderMatcher HeaderMatcherFunc
|
||||
metadataAnnotators []func(context.Context, *http.Request) metadata.MD
|
||||
streamErrorHandler StreamErrorHandlerFunc
|
||||
protoErrorHandler ProtoErrorHandlerFunc
|
||||
disablePathLengthFallback bool
|
||||
lastMatchWins bool
|
||||
}
|
||||
|
||||
// ServeMuxOption is an option that can be given to a ServeMux on construction.
|
||||
type ServeMuxOption func(*ServeMux)
|
||||
|
||||
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
|
||||
//
|
||||
// forwardResponseOption is an option that will be called on the relevant context.Context,
|
||||
// http.ResponseWriter, and proto.Message before every forwarded response.
|
||||
//
|
||||
// The message may be nil in the case where just a header is being sent.
|
||||
func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
|
||||
}
|
||||
}
|
||||
|
||||
// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
|
||||
// Configuring this will mean the generated swagger output is no longer correct, and it should be
|
||||
// done with careful consideration.
|
||||
func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
currentQueryParser = queryParameterParser
|
||||
}
|
||||
}
|
||||
|
||||
// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
|
||||
type HeaderMatcherFunc func(string) (string, bool)
|
||||
|
||||
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
|
||||
// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
|
||||
// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
|
||||
func DefaultHeaderMatcher(key string) (string, bool) {
|
||||
key = textproto.CanonicalMIMEHeaderKey(key)
|
||||
if isPermanentHTTPHeader(key) {
|
||||
return MetadataPrefix + key, true
|
||||
} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
|
||||
return key[len(MetadataHeaderPrefix):], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
|
||||
//
|
||||
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
|
||||
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
|
||||
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||
return func(mux *ServeMux) {
|
||||
mux.incomingHeaderMatcher = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
|
||||
//
|
||||
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
|
||||
// passed to http response returned from gateway. To transform the header before passing to response,
|
||||
// matcher should return modified header.
|
||||
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||
return func(mux *ServeMux) {
|
||||
mux.outgoingHeaderMatcher = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
|
||||
//
|
||||
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
|
||||
// is reading token from cookie and adding it in gRPC context.
|
||||
func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
|
||||
}
|
||||
}
|
||||
|
||||
// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler.
|
||||
//
|
||||
// This can be used to handle an error as general proto message defined by gRPC.
|
||||
// When this option is used, the mux uses the configured error handler instead of HTTPError and
|
||||
// OtherErrorHandler.
|
||||
func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.protoErrorHandler = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
|
||||
func WithDisablePathLengthFallback() ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.disablePathLengthFallback = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
|
||||
// error handler, which allows for customizing the error trailer for server-streaming
|
||||
// calls.
|
||||
//
|
||||
// For stream errors that occur before any response has been written, the mux's
|
||||
// ProtoErrorHandler will be invoked. However, once data has been written, the errors must
|
||||
// be handled differently: they must be included in the response body. The response body's
|
||||
// final message will include the error details returned by the stream error handler.
|
||||
func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.streamErrorHandler = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithLastMatchWins returns a ServeMuxOption that will enable "last
|
||||
// match wins" behavior, where if multiple path patterns match a
|
||||
// request path, the last one defined in the .proto file will be used.
|
||||
func WithLastMatchWins() ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.lastMatchWins = true
|
||||
}
|
||||
}
|
||||
|
||||
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
||||
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
||||
serveMux := &ServeMux{
|
||||
handlers: make(map[string][]handler),
|
||||
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
|
||||
marshalers: makeMarshalerMIMERegistry(),
|
||||
streamErrorHandler: DefaultHTTPStreamErrorHandler,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(serveMux)
|
||||
}
|
||||
|
||||
if serveMux.incomingHeaderMatcher == nil {
|
||||
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
|
||||
}
|
||||
|
||||
if serveMux.outgoingHeaderMatcher == nil {
|
||||
serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
|
||||
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
|
||||
}
|
||||
}
|
||||
|
||||
return serveMux
|
||||
}
|
||||
|
||||
// Handle associates "h" to the pair of HTTP method and path pattern.
|
||||
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
|
||||
if s.lastMatchWins {
|
||||
s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...)
|
||||
} else {
|
||||
s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
|
||||
}
|
||||
}
|
||||
|
||||
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
|
||||
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
path := r.URL.Path
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
components := strings.Split(path[1:], "/")
|
||||
l := len(components)
|
||||
var verb string
|
||||
if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
}
|
||||
return
|
||||
} else if idx > 0 {
|
||||
c := components[l-1]
|
||||
components[l-1], verb = c[:idx], c[idx+1:]
|
||||
}
|
||||
|
||||
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
|
||||
r.Method = strings.ToUpper(override)
|
||||
if err := r.ParseForm(); err != nil {
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
sterr := status.Error(codes.InvalidArgument, err.Error())
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, h := range s.handlers[r.Method] {
|
||||
pathParams, err := h.pat.Match(components, verb)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
h.h(w, r, pathParams)
|
||||
return
|
||||
}
|
||||
|
||||
// lookup other methods to handle fallback from GET to POST and
|
||||
// to determine if it is MethodNotAllowed or NotFound.
|
||||
for m, handlers := range s.handlers {
|
||||
if m == r.Method {
|
||||
continue
|
||||
}
|
||||
for _, h := range handlers {
|
||||
pathParams, err := h.pat.Match(components, verb)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
|
||||
if s.isPathLengthFallback(r) {
|
||||
if err := r.ParseForm(); err != nil {
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
sterr := status.Error(codes.InvalidArgument, err.Error())
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
return
|
||||
}
|
||||
h.h(w, r, pathParams)
|
||||
return
|
||||
}
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
|
||||
func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
|
||||
return s.forwardResponseOptions
|
||||
}
|
||||
|
||||
func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
|
||||
return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
|
||||
}
|
||||
|
||||
type handler struct {
|
||||
pat Pattern
|
||||
h HandlerFunc
|
||||
}
|
262
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
generated
vendored
Normal file
262
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
generated
vendored
Normal file
@ -0,0 +1,262 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
|
||||
ErrNotMatch = errors.New("not match to the path pattern")
|
||||
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
|
||||
ErrInvalidPattern = errors.New("invalid pattern")
|
||||
)
|
||||
|
||||
type op struct {
|
||||
code utilities.OpCode
|
||||
operand int
|
||||
}
|
||||
|
||||
// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
|
||||
type Pattern struct {
|
||||
// ops is a list of operations
|
||||
ops []op
|
||||
// pool is a constant pool indexed by the operands or vars.
|
||||
pool []string
|
||||
// vars is a list of variables names to be bound by this pattern
|
||||
vars []string
|
||||
// stacksize is the max depth of the stack
|
||||
stacksize int
|
||||
// tailLen is the length of the fixed-size segments after a deep wildcard
|
||||
tailLen int
|
||||
// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
|
||||
verb string
|
||||
// assumeColonVerb indicates whether a path suffix after a final
|
||||
// colon may only be interpreted as a verb.
|
||||
assumeColonVerb bool
|
||||
}
|
||||
|
||||
type patternOptions struct {
|
||||
assumeColonVerb bool
|
||||
}
|
||||
|
||||
// PatternOpt is an option for creating Patterns.
|
||||
type PatternOpt func(*patternOptions)
|
||||
|
||||
// NewPattern returns a new Pattern from the given definition values.
|
||||
// "ops" is a sequence of op codes. "pool" is a constant pool.
|
||||
// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
|
||||
// "version" must be 1 for now.
|
||||
// It returns an error if the given definition is invalid.
|
||||
func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) {
|
||||
options := patternOptions{
|
||||
assumeColonVerb: true,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
if version != 1 {
|
||||
grpclog.Infof("unsupported version: %d", version)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
|
||||
l := len(ops)
|
||||
if l%2 != 0 {
|
||||
grpclog.Infof("odd number of ops codes: %d", l)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
|
||||
var (
|
||||
typedOps []op
|
||||
stack, maxstack int
|
||||
tailLen int
|
||||
pushMSeen bool
|
||||
vars []string
|
||||
)
|
||||
for i := 0; i < l; i += 2 {
|
||||
op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
|
||||
switch op.code {
|
||||
case utilities.OpNop:
|
||||
continue
|
||||
case utilities.OpPush:
|
||||
if pushMSeen {
|
||||
tailLen++
|
||||
}
|
||||
stack++
|
||||
case utilities.OpPushM:
|
||||
if pushMSeen {
|
||||
grpclog.Infof("pushM appears twice")
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
pushMSeen = true
|
||||
stack++
|
||||
case utilities.OpLitPush:
|
||||
if op.operand < 0 || len(pool) <= op.operand {
|
||||
grpclog.Infof("negative literal index: %d", op.operand)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
if pushMSeen {
|
||||
tailLen++
|
||||
}
|
||||
stack++
|
||||
case utilities.OpConcatN:
|
||||
if op.operand <= 0 {
|
||||
grpclog.Infof("negative concat size: %d", op.operand)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
stack -= op.operand
|
||||
if stack < 0 {
|
||||
grpclog.Print("stack underflow")
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
stack++
|
||||
case utilities.OpCapture:
|
||||
if op.operand < 0 || len(pool) <= op.operand {
|
||||
grpclog.Infof("variable name index out of bound: %d", op.operand)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
v := pool[op.operand]
|
||||
op.operand = len(vars)
|
||||
vars = append(vars, v)
|
||||
stack--
|
||||
if stack < 0 {
|
||||
grpclog.Infof("stack underflow")
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
default:
|
||||
grpclog.Infof("invalid opcode: %d", op.code)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
|
||||
if maxstack < stack {
|
||||
maxstack = stack
|
||||
}
|
||||
typedOps = append(typedOps, op)
|
||||
}
|
||||
return Pattern{
|
||||
ops: typedOps,
|
||||
pool: pool,
|
||||
vars: vars,
|
||||
stacksize: maxstack,
|
||||
tailLen: tailLen,
|
||||
verb: verb,
|
||||
assumeColonVerb: options.assumeColonVerb,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
|
||||
func MustPattern(p Pattern, err error) Pattern {
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Pattern initialization failed: %v", err)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Match examines components if it matches to the Pattern.
|
||||
// If it matches, the function returns a mapping from field paths to their captured values.
|
||||
// If otherwise, the function returns an error.
|
||||
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
|
||||
if p.verb != verb {
|
||||
if p.assumeColonVerb || p.verb != "" {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
if len(components) == 0 {
|
||||
components = []string{":" + verb}
|
||||
} else {
|
||||
components = append([]string{}, components...)
|
||||
components[len(components)-1] += ":" + verb
|
||||
}
|
||||
verb = ""
|
||||
}
|
||||
|
||||
var pos int
|
||||
stack := make([]string, 0, p.stacksize)
|
||||
captured := make([]string, len(p.vars))
|
||||
l := len(components)
|
||||
for _, op := range p.ops {
|
||||
switch op.code {
|
||||
case utilities.OpNop:
|
||||
continue
|
||||
case utilities.OpPush, utilities.OpLitPush:
|
||||
if pos >= l {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
c := components[pos]
|
||||
if op.code == utilities.OpLitPush {
|
||||
if lit := p.pool[op.operand]; c != lit {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
}
|
||||
stack = append(stack, c)
|
||||
pos++
|
||||
case utilities.OpPushM:
|
||||
end := len(components)
|
||||
if end < pos+p.tailLen {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
end -= p.tailLen
|
||||
stack = append(stack, strings.Join(components[pos:end], "/"))
|
||||
pos = end
|
||||
case utilities.OpConcatN:
|
||||
n := op.operand
|
||||
l := len(stack) - n
|
||||
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||
case utilities.OpCapture:
|
||||
n := len(stack) - 1
|
||||
captured[op.operand] = stack[n]
|
||||
stack = stack[:n]
|
||||
}
|
||||
}
|
||||
if pos < l {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
bindings := make(map[string]string)
|
||||
for i, val := range captured {
|
||||
bindings[p.vars[i]] = val
|
||||
}
|
||||
return bindings, nil
|
||||
}
|
||||
|
||||
// Verb returns the verb part of the Pattern.
|
||||
func (p Pattern) Verb() string { return p.verb }
|
||||
|
||||
func (p Pattern) String() string {
|
||||
var stack []string
|
||||
for _, op := range p.ops {
|
||||
switch op.code {
|
||||
case utilities.OpNop:
|
||||
continue
|
||||
case utilities.OpPush:
|
||||
stack = append(stack, "*")
|
||||
case utilities.OpLitPush:
|
||||
stack = append(stack, p.pool[op.operand])
|
||||
case utilities.OpPushM:
|
||||
stack = append(stack, "**")
|
||||
case utilities.OpConcatN:
|
||||
n := op.operand
|
||||
l := len(stack) - n
|
||||
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||
case utilities.OpCapture:
|
||||
n := len(stack) - 1
|
||||
stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
|
||||
}
|
||||
}
|
||||
segs := strings.Join(stack, "/")
|
||||
if p.verb != "" {
|
||||
return fmt.Sprintf("/%s:%s", segs, p.verb)
|
||||
}
|
||||
return "/" + segs
|
||||
}
|
||||
|
||||
// AssumeColonVerbOpt indicates whether a path suffix after a final
|
||||
// colon may only be interpreted as a verb.
|
||||
func AssumeColonVerbOpt(val bool) PatternOpt {
|
||||
return PatternOpt(func(o *patternOptions) {
|
||||
o.assumeColonVerb = val
|
||||
})
|
||||
}
|
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
generated
vendored
Normal file
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// StringP returns a pointer to a string whose pointee is same as the given string value.
|
||||
func StringP(val string) (*string, error) {
|
||||
return proto.String(val), nil
|
||||
}
|
||||
|
||||
// BoolP parses the given string representation of a boolean value,
|
||||
// and returns a pointer to a bool whose value is same as the parsed value.
|
||||
func BoolP(val string) (*bool, error) {
|
||||
b, err := Bool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Bool(b), nil
|
||||
}
|
||||
|
||||
// Float64P parses the given string representation of a floating point number,
|
||||
// and returns a pointer to a float64 whose value is same as the parsed number.
|
||||
func Float64P(val string) (*float64, error) {
|
||||
f, err := Float64(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Float64(f), nil
|
||||
}
|
||||
|
||||
// Float32P parses the given string representation of a floating point number,
|
||||
// and returns a pointer to a float32 whose value is same as the parsed number.
|
||||
func Float32P(val string) (*float32, error) {
|
||||
f, err := Float32(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Float32(f), nil
|
||||
}
|
||||
|
||||
// Int64P parses the given string representation of an integer
|
||||
// and returns a pointer to a int64 whose value is same as the parsed integer.
|
||||
func Int64P(val string) (*int64, error) {
|
||||
i, err := Int64(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Int64(i), nil
|
||||
}
|
||||
|
||||
// Int32P parses the given string representation of an integer
|
||||
// and returns a pointer to a int32 whose value is same as the parsed integer.
|
||||
func Int32P(val string) (*int32, error) {
|
||||
i, err := Int32(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Int32(i), err
|
||||
}
|
||||
|
||||
// Uint64P parses the given string representation of an integer
|
||||
// and returns a pointer to a uint64 whose value is same as the parsed integer.
|
||||
func Uint64P(val string) (*uint64, error) {
|
||||
i, err := Uint64(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Uint64(i), err
|
||||
}
|
||||
|
||||
// Uint32P parses the given string representation of an integer
|
||||
// and returns a pointer to a uint32 whose value is same as the parsed integer.
|
||||
func Uint32P(val string) (*uint32, error) {
|
||||
i, err := Uint32(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Uint32(i), err
|
||||
}
|
106
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
generated
vendored
Normal file
106
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/internal"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a
|
||||
// a proto struct used to represent error at the end of a stream.
|
||||
type StreamErrorHandlerFunc func(context.Context, error) *StreamError
|
||||
|
||||
// StreamError is the payload for the final message in a server stream in the event that the server returns an
|
||||
// error after a response message has already been sent.
|
||||
type StreamError internal.StreamError
|
||||
|
||||
// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
|
||||
type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
|
||||
|
||||
var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
|
||||
|
||||
// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
|
||||
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
|
||||
// If otherwise, it replies with http.StatusInternalServerError.
|
||||
//
|
||||
// The response body returned by this function is a Status message marshaled by a Marshaler.
|
||||
//
|
||||
// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
|
||||
func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
|
||||
// return Internal when Marshal failed
|
||||
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
|
||||
|
||||
s, ok := status.FromError(err)
|
||||
if !ok {
|
||||
s = status.New(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
w.Header().Del("Trailer")
|
||||
|
||||
contentType := marshaler.ContentType()
|
||||
// Check marshaler on run time in order to keep backwards compatibility
|
||||
// An interface param needs to be added to the ContentType() function on
|
||||
// the Marshal interface to be able to remove this check
|
||||
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
|
||||
pb := s.Proto()
|
||||
contentType = typeMarshaler.ContentTypeFromMessage(pb)
|
||||
}
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
|
||||
buf, merr := marshaler.Marshal(s.Proto())
|
||||
if merr != nil {
|
||||
grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
if _, err := io.WriteString(w, fallback); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||
}
|
||||
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
handleForwardResponseTrailerHeader(w, md)
|
||||
st := HTTPStatusFromCode(s.Code())
|
||||
w.WriteHeader(st)
|
||||
if _, err := w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
handleForwardResponseTrailer(w, md)
|
||||
}
|
||||
|
||||
// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via
|
||||
// default logic.
|
||||
//
|
||||
// It extracts the gRPC status from err if possible. The fields of the status are
|
||||
// used to populate the returned StreamError, and the HTTP status code is derived
|
||||
// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a
|
||||
// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code.
|
||||
func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError {
|
||||
grpcCode := codes.Unknown
|
||||
grpcMessage := err.Error()
|
||||
var grpcDetails []*any.Any
|
||||
if s, ok := status.FromError(err); ok {
|
||||
grpcCode = s.Code()
|
||||
grpcMessage = s.Message()
|
||||
grpcDetails = s.Proto().GetDetails()
|
||||
}
|
||||
httpCode := HTTPStatusFromCode(grpcCode)
|
||||
return &StreamError{
|
||||
GrpcCode: int32(grpcCode),
|
||||
HttpCode: int32(httpCode),
|
||||
Message: grpcMessage,
|
||||
HttpStatus: http.StatusText(httpCode),
|
||||
Details: grpcDetails,
|
||||
}
|
||||
}
|
406
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
generated
vendored
Normal file
406
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
generated
vendored
Normal file
@ -0,0 +1,406 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$")
|
||||
|
||||
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
|
||||
|
||||
// QueryParameterParser defines interface for all query parameter parsers
|
||||
type QueryParameterParser interface {
|
||||
Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
|
||||
}
|
||||
|
||||
// PopulateQueryParameters parses query parameters
|
||||
// into "msg" using current query parser
|
||||
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||
return currentQueryParser.Parse(msg, values, filter)
|
||||
}
|
||||
|
||||
type defaultQueryParser struct{}
|
||||
|
||||
// Parse populates "values" into "msg".
|
||||
// A value is ignored if its key starts with one of the elements in "filter".
|
||||
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||
for key, values := range values {
|
||||
match := valuesKeyRegexp.FindStringSubmatch(key)
|
||||
if len(match) == 3 {
|
||||
key = match[1]
|
||||
values = append([]string{match[2]}, values...)
|
||||
}
|
||||
fieldPath := strings.Split(key, ".")
|
||||
if filter.HasCommonPrefix(fieldPath) {
|
||||
continue
|
||||
}
|
||||
if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
|
||||
// It instantiates missing protobuf fields as it goes.
|
||||
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
|
||||
fieldPath := strings.Split(fieldPathString, ".")
|
||||
return populateFieldValueFromPath(msg, fieldPath, []string{value})
|
||||
}
|
||||
|
||||
func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
|
||||
m := reflect.ValueOf(msg)
|
||||
if m.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("unexpected type %T: %v", msg, msg)
|
||||
}
|
||||
var props *proto.Properties
|
||||
m = m.Elem()
|
||||
for i, fieldName := range fieldPath {
|
||||
isLast := i == len(fieldPath)-1
|
||||
if !isLast && m.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
|
||||
}
|
||||
var f reflect.Value
|
||||
var err error
|
||||
f, props, err = fieldByProtoName(m, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !f.IsValid() {
|
||||
grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
|
||||
return nil
|
||||
}
|
||||
|
||||
switch f.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !isLast {
|
||||
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
|
||||
}
|
||||
m = f
|
||||
case reflect.Slice:
|
||||
if !isLast {
|
||||
return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
|
||||
}
|
||||
// Handle []byte
|
||||
if f.Type().Elem().Kind() == reflect.Uint8 {
|
||||
m = f
|
||||
break
|
||||
}
|
||||
return populateRepeatedField(f, values, props)
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
m = reflect.New(f.Type().Elem())
|
||||
f.Set(m.Convert(f.Type()))
|
||||
}
|
||||
m = f.Elem()
|
||||
continue
|
||||
case reflect.Struct:
|
||||
m = f
|
||||
continue
|
||||
case reflect.Map:
|
||||
if !isLast {
|
||||
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
|
||||
}
|
||||
return populateMapField(f, values, props)
|
||||
default:
|
||||
return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
|
||||
}
|
||||
}
|
||||
switch len(values) {
|
||||
case 0:
|
||||
return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
|
||||
case 1:
|
||||
default:
|
||||
grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
|
||||
}
|
||||
return populateField(m, values[0], props)
|
||||
}
|
||||
|
||||
// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
|
||||
// "m" must be a struct value. It returns zero reflect.Value if no such field found.
|
||||
func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
|
||||
props := proto.GetProperties(m.Type())
|
||||
|
||||
// look up field name in oneof map
|
||||
for _, op := range props.OneofTypes {
|
||||
if name == op.Prop.OrigName || name == op.Prop.JSONName {
|
||||
v := reflect.New(op.Type.Elem())
|
||||
field := m.Field(op.Field)
|
||||
if !field.IsNil() {
|
||||
return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
|
||||
}
|
||||
field.Set(v)
|
||||
return v.Elem().Field(0), op.Prop, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range props.Prop {
|
||||
if p.OrigName == name {
|
||||
return m.FieldByName(p.Name), p, nil
|
||||
}
|
||||
if p.JSONName == name {
|
||||
return m.FieldByName(p.Name), p, nil
|
||||
}
|
||||
}
|
||||
return reflect.Value{}, nil, nil
|
||||
}
|
||||
|
||||
func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
|
||||
if len(values) != 2 {
|
||||
return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
|
||||
}
|
||||
|
||||
key, value := values[0], values[1]
|
||||
keyType := f.Type().Key()
|
||||
valueType := f.Type().Elem()
|
||||
if f.IsNil() {
|
||||
f.Set(reflect.MakeMap(f.Type()))
|
||||
}
|
||||
|
||||
keyConv, ok := convFromType[keyType.Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
|
||||
}
|
||||
valueConv, ok := convFromType[valueType.Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
|
||||
}
|
||||
|
||||
keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
|
||||
if err := keyV[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
|
||||
if err := valueV[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
|
||||
f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
|
||||
elemType := f.Type().Elem()
|
||||
|
||||
// is the destination field a slice of an enumeration type?
|
||||
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
|
||||
return populateFieldEnumRepeated(f, values, enumValMap)
|
||||
}
|
||||
|
||||
conv, ok := convFromType[elemType.Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported field type %s", elemType)
|
||||
}
|
||||
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
|
||||
for i, v := range values {
|
||||
result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
|
||||
if err := result[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateField(f reflect.Value, value string, props *proto.Properties) error {
|
||||
i := f.Addr().Interface()
|
||||
|
||||
// Handle protobuf well known types
|
||||
var name string
|
||||
switch m := i.(type) {
|
||||
case interface{ XXX_WellKnownType() string }:
|
||||
name = m.XXX_WellKnownType()
|
||||
case proto.Message:
|
||||
const wktPrefix = "google.protobuf."
|
||||
if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) {
|
||||
name = fullName[len(wktPrefix):]
|
||||
}
|
||||
}
|
||||
switch name {
|
||||
case "Timestamp":
|
||||
if value == "null" {
|
||||
f.FieldByName("Seconds").SetInt(0)
|
||||
f.FieldByName("Nanos").SetInt(0)
|
||||
return nil
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC3339Nano, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Timestamp: %v", err)
|
||||
}
|
||||
f.FieldByName("Seconds").SetInt(int64(t.Unix()))
|
||||
f.FieldByName("Nanos").SetInt(int64(t.Nanosecond()))
|
||||
return nil
|
||||
case "Duration":
|
||||
if value == "null" {
|
||||
f.FieldByName("Seconds").SetInt(0)
|
||||
f.FieldByName("Nanos").SetInt(0)
|
||||
return nil
|
||||
}
|
||||
d, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Duration: %v", err)
|
||||
}
|
||||
|
||||
ns := d.Nanoseconds()
|
||||
s := ns / 1e9
|
||||
ns %= 1e9
|
||||
f.FieldByName("Seconds").SetInt(s)
|
||||
f.FieldByName("Nanos").SetInt(ns)
|
||||
return nil
|
||||
case "DoubleValue":
|
||||
fallthrough
|
||||
case "FloatValue":
|
||||
float64Val, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad DoubleValue: %s", value)
|
||||
}
|
||||
f.FieldByName("Value").SetFloat(float64Val)
|
||||
return nil
|
||||
case "Int64Value":
|
||||
fallthrough
|
||||
case "Int32Value":
|
||||
int64Val, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad DoubleValue: %s", value)
|
||||
}
|
||||
f.FieldByName("Value").SetInt(int64Val)
|
||||
return nil
|
||||
case "UInt64Value":
|
||||
fallthrough
|
||||
case "UInt32Value":
|
||||
uint64Val, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad DoubleValue: %s", value)
|
||||
}
|
||||
f.FieldByName("Value").SetUint(uint64Val)
|
||||
return nil
|
||||
case "BoolValue":
|
||||
if value == "true" {
|
||||
f.FieldByName("Value").SetBool(true)
|
||||
} else if value == "false" {
|
||||
f.FieldByName("Value").SetBool(false)
|
||||
} else {
|
||||
return fmt.Errorf("bad BoolValue: %s", value)
|
||||
}
|
||||
return nil
|
||||
case "StringValue":
|
||||
f.FieldByName("Value").SetString(value)
|
||||
return nil
|
||||
case "BytesValue":
|
||||
bytesVal, err := base64.StdEncoding.DecodeString(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad BytesValue: %s", value)
|
||||
}
|
||||
f.FieldByName("Value").SetBytes(bytesVal)
|
||||
return nil
|
||||
case "FieldMask":
|
||||
p := f.FieldByName("Paths")
|
||||
for _, v := range strings.Split(value, ",") {
|
||||
if v != "" {
|
||||
p.Set(reflect.Append(p, reflect.ValueOf(v)))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle Time and Duration stdlib types
|
||||
switch t := i.(type) {
|
||||
case *time.Time:
|
||||
pt, err := time.Parse(time.RFC3339Nano, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Timestamp: %v", err)
|
||||
}
|
||||
*t = pt
|
||||
return nil
|
||||
case *time.Duration:
|
||||
d, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Duration: %v", err)
|
||||
}
|
||||
*t = d
|
||||
return nil
|
||||
}
|
||||
|
||||
// is the destination field an enumeration type?
|
||||
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
|
||||
return populateFieldEnum(f, value, enumValMap)
|
||||
}
|
||||
|
||||
conv, ok := convFromType[f.Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("field type %T is not supported in query parameters", i)
|
||||
}
|
||||
result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
|
||||
if err := result[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
f.Set(result[0].Convert(f.Type()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
|
||||
// see if it's an enumeration string
|
||||
if enumVal, ok := enumValMap[value]; ok {
|
||||
return reflect.ValueOf(enumVal).Convert(t), nil
|
||||
}
|
||||
|
||||
// check for an integer that matches an enumeration value
|
||||
eVal, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
|
||||
}
|
||||
for _, v := range enumValMap {
|
||||
if v == int32(eVal) {
|
||||
return reflect.ValueOf(eVal).Convert(t), nil
|
||||
}
|
||||
}
|
||||
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
|
||||
}
|
||||
|
||||
func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
|
||||
cval, err := convertEnum(value, f.Type(), enumValMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Set(cval)
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
|
||||
elemType := f.Type().Elem()
|
||||
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
|
||||
for i, v := range values {
|
||||
result, err := convertEnum(v, elemType, enumValMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Index(i).Set(result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
convFromType = map[reflect.Kind]reflect.Value{
|
||||
reflect.String: reflect.ValueOf(String),
|
||||
reflect.Bool: reflect.ValueOf(Bool),
|
||||
reflect.Float64: reflect.ValueOf(Float64),
|
||||
reflect.Float32: reflect.ValueOf(Float32),
|
||||
reflect.Int64: reflect.ValueOf(Int64),
|
||||
reflect.Int32: reflect.ValueOf(Int32),
|
||||
reflect.Uint64: reflect.ValueOf(Uint64),
|
||||
reflect.Uint32: reflect.ValueOf(Uint32),
|
||||
reflect.Slice: reflect.ValueOf(Bytes),
|
||||
}
|
||||
)
|
21
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
generated
vendored
Normal file
21
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"pattern.go",
|
||||
"readerfactory.go",
|
||||
"trie.go",
|
||||
],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["trie_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
)
|
2
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
generated
vendored
Normal file
2
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
// Package utilities provides members for internal use in grpc-gateway.
|
||||
package utilities
|
22
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
generated
vendored
Normal file
22
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package utilities
|
||||
|
||||
// An OpCode is a opcode of compiled path patterns.
|
||||
type OpCode int
|
||||
|
||||
// These constants are the valid values of OpCode.
|
||||
const (
|
||||
// OpNop does nothing
|
||||
OpNop = OpCode(iota)
|
||||
// OpPush pushes a component to stack
|
||||
OpPush
|
||||
// OpLitPush pushes a component to stack if it matches to the literal
|
||||
OpLitPush
|
||||
// OpPushM concatenates the remaining components and pushes it to stack
|
||||
OpPushM
|
||||
// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
|
||||
OpConcatN
|
||||
// OpCapture pops an item and binds it to the variable
|
||||
OpCapture
|
||||
// OpEnd is the least positive invalid opcode.
|
||||
OpEnd
|
||||
)
|
20
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
generated
vendored
Normal file
20
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
|
||||
// at the start of the stream
|
||||
func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return func() io.Reader {
|
||||
return bytes.NewReader(b)
|
||||
}, nil
|
||||
}
|
177
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
generated
vendored
Normal file
177
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
package utilities
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// DoubleArray is a Double Array implementation of trie on sequences of strings.
|
||||
type DoubleArray struct {
|
||||
// Encoding keeps an encoding from string to int
|
||||
Encoding map[string]int
|
||||
// Base is the base array of Double Array
|
||||
Base []int
|
||||
// Check is the check array of Double Array
|
||||
Check []int
|
||||
}
|
||||
|
||||
// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
|
||||
func NewDoubleArray(seqs [][]string) *DoubleArray {
|
||||
da := &DoubleArray{Encoding: make(map[string]int)}
|
||||
if len(seqs) == 0 {
|
||||
return da
|
||||
}
|
||||
|
||||
encoded := registerTokens(da, seqs)
|
||||
sort.Sort(byLex(encoded))
|
||||
|
||||
root := node{row: -1, col: -1, left: 0, right: len(encoded)}
|
||||
addSeqs(da, encoded, 0, root)
|
||||
|
||||
for i := len(da.Base); i > 0; i-- {
|
||||
if da.Check[i-1] != 0 {
|
||||
da.Base = da.Base[:i]
|
||||
da.Check = da.Check[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
return da
|
||||
}
|
||||
|
||||
func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
|
||||
var result [][]int
|
||||
for _, seq := range seqs {
|
||||
var encoded []int
|
||||
for _, token := range seq {
|
||||
if _, ok := da.Encoding[token]; !ok {
|
||||
da.Encoding[token] = len(da.Encoding)
|
||||
}
|
||||
encoded = append(encoded, da.Encoding[token])
|
||||
}
|
||||
result = append(result, encoded)
|
||||
}
|
||||
for i := range result {
|
||||
result[i] = append(result[i], len(da.Encoding))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
type node struct {
|
||||
row, col int
|
||||
left, right int
|
||||
}
|
||||
|
||||
func (n node) value(seqs [][]int) int {
|
||||
return seqs[n.row][n.col]
|
||||
}
|
||||
|
||||
func (n node) children(seqs [][]int) []*node {
|
||||
var result []*node
|
||||
lastVal := int(-1)
|
||||
last := new(node)
|
||||
for i := n.left; i < n.right; i++ {
|
||||
if lastVal == seqs[i][n.col+1] {
|
||||
continue
|
||||
}
|
||||
last.right = i
|
||||
last = &node{
|
||||
row: i,
|
||||
col: n.col + 1,
|
||||
left: i,
|
||||
}
|
||||
result = append(result, last)
|
||||
}
|
||||
last.right = n.right
|
||||
return result
|
||||
}
|
||||
|
||||
func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
|
||||
ensureSize(da, pos)
|
||||
|
||||
children := n.children(seqs)
|
||||
var i int
|
||||
for i = 1; ; i++ {
|
||||
ok := func() bool {
|
||||
for _, child := range children {
|
||||
code := child.value(seqs)
|
||||
j := i + code
|
||||
ensureSize(da, j)
|
||||
if da.Check[j] != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}()
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
da.Base[pos] = i
|
||||
for _, child := range children {
|
||||
code := child.value(seqs)
|
||||
j := i + code
|
||||
da.Check[j] = pos + 1
|
||||
}
|
||||
terminator := len(da.Encoding)
|
||||
for _, child := range children {
|
||||
code := child.value(seqs)
|
||||
if code == terminator {
|
||||
continue
|
||||
}
|
||||
j := i + code
|
||||
addSeqs(da, seqs, j, *child)
|
||||
}
|
||||
}
|
||||
|
||||
func ensureSize(da *DoubleArray, i int) {
|
||||
for i >= len(da.Base) {
|
||||
da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
|
||||
da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
|
||||
}
|
||||
}
|
||||
|
||||
type byLex [][]int
|
||||
|
||||
func (l byLex) Len() int { return len(l) }
|
||||
func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l byLex) Less(i, j int) bool {
|
||||
si := l[i]
|
||||
sj := l[j]
|
||||
var k int
|
||||
for k = 0; k < len(si) && k < len(sj); k++ {
|
||||
if si[k] < sj[k] {
|
||||
return true
|
||||
}
|
||||
if si[k] > sj[k] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if k < len(sj) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
|
||||
func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
|
||||
if len(da.Base) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var i int
|
||||
for _, t := range seq {
|
||||
code, ok := da.Encoding[t]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
j := da.Base[i] + code
|
||||
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||
break
|
||||
}
|
||||
i = j
|
||||
}
|
||||
j := da.Base[i] + len(da.Encoding)
|
||||
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
172
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
172
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
@ -13,12 +13,42 @@ const (
|
||||
compareGreater
|
||||
)
|
||||
|
||||
var (
|
||||
intType = reflect.TypeOf(int(1))
|
||||
int8Type = reflect.TypeOf(int8(1))
|
||||
int16Type = reflect.TypeOf(int16(1))
|
||||
int32Type = reflect.TypeOf(int32(1))
|
||||
int64Type = reflect.TypeOf(int64(1))
|
||||
|
||||
uintType = reflect.TypeOf(uint(1))
|
||||
uint8Type = reflect.TypeOf(uint8(1))
|
||||
uint16Type = reflect.TypeOf(uint16(1))
|
||||
uint32Type = reflect.TypeOf(uint32(1))
|
||||
uint64Type = reflect.TypeOf(uint64(1))
|
||||
|
||||
float32Type = reflect.TypeOf(float32(1))
|
||||
float64Type = reflect.TypeOf(float64(1))
|
||||
|
||||
stringType = reflect.TypeOf("")
|
||||
)
|
||||
|
||||
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
obj1Value := reflect.ValueOf(obj1)
|
||||
obj2Value := reflect.ValueOf(obj2)
|
||||
|
||||
// throughout this switch we try and avoid calling .Convert() if possible,
|
||||
// as this has a pretty big performance impact
|
||||
switch kind {
|
||||
case reflect.Int:
|
||||
{
|
||||
intobj1 := obj1.(int)
|
||||
intobj2 := obj2.(int)
|
||||
intobj1, ok := obj1.(int)
|
||||
if !ok {
|
||||
intobj1 = obj1Value.Convert(intType).Interface().(int)
|
||||
}
|
||||
intobj2, ok := obj2.(int)
|
||||
if !ok {
|
||||
intobj2 = obj2Value.Convert(intType).Interface().(int)
|
||||
}
|
||||
if intobj1 > intobj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -31,8 +61,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.Int8:
|
||||
{
|
||||
int8obj1 := obj1.(int8)
|
||||
int8obj2 := obj2.(int8)
|
||||
int8obj1, ok := obj1.(int8)
|
||||
if !ok {
|
||||
int8obj1 = obj1Value.Convert(int8Type).Interface().(int8)
|
||||
}
|
||||
int8obj2, ok := obj2.(int8)
|
||||
if !ok {
|
||||
int8obj2 = obj2Value.Convert(int8Type).Interface().(int8)
|
||||
}
|
||||
if int8obj1 > int8obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -45,8 +81,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.Int16:
|
||||
{
|
||||
int16obj1 := obj1.(int16)
|
||||
int16obj2 := obj2.(int16)
|
||||
int16obj1, ok := obj1.(int16)
|
||||
if !ok {
|
||||
int16obj1 = obj1Value.Convert(int16Type).Interface().(int16)
|
||||
}
|
||||
int16obj2, ok := obj2.(int16)
|
||||
if !ok {
|
||||
int16obj2 = obj2Value.Convert(int16Type).Interface().(int16)
|
||||
}
|
||||
if int16obj1 > int16obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -59,8 +101,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.Int32:
|
||||
{
|
||||
int32obj1 := obj1.(int32)
|
||||
int32obj2 := obj2.(int32)
|
||||
int32obj1, ok := obj1.(int32)
|
||||
if !ok {
|
||||
int32obj1 = obj1Value.Convert(int32Type).Interface().(int32)
|
||||
}
|
||||
int32obj2, ok := obj2.(int32)
|
||||
if !ok {
|
||||
int32obj2 = obj2Value.Convert(int32Type).Interface().(int32)
|
||||
}
|
||||
if int32obj1 > int32obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -73,8 +121,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.Int64:
|
||||
{
|
||||
int64obj1 := obj1.(int64)
|
||||
int64obj2 := obj2.(int64)
|
||||
int64obj1, ok := obj1.(int64)
|
||||
if !ok {
|
||||
int64obj1 = obj1Value.Convert(int64Type).Interface().(int64)
|
||||
}
|
||||
int64obj2, ok := obj2.(int64)
|
||||
if !ok {
|
||||
int64obj2 = obj2Value.Convert(int64Type).Interface().(int64)
|
||||
}
|
||||
if int64obj1 > int64obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -87,8 +141,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.Uint:
|
||||
{
|
||||
uintobj1 := obj1.(uint)
|
||||
uintobj2 := obj2.(uint)
|
||||
uintobj1, ok := obj1.(uint)
|
||||
if !ok {
|
||||
uintobj1 = obj1Value.Convert(uintType).Interface().(uint)
|
||||
}
|
||||
uintobj2, ok := obj2.(uint)
|
||||
if !ok {
|
||||
uintobj2 = obj2Value.Convert(uintType).Interface().(uint)
|
||||
}
|
||||
if uintobj1 > uintobj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -101,8 +161,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.Uint8:
|
||||
{
|
||||
uint8obj1 := obj1.(uint8)
|
||||
uint8obj2 := obj2.(uint8)
|
||||
uint8obj1, ok := obj1.(uint8)
|
||||
if !ok {
|
||||
uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8)
|
||||
}
|
||||
uint8obj2, ok := obj2.(uint8)
|
||||
if !ok {
|
||||
uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8)
|
||||
}
|
||||
if uint8obj1 > uint8obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -115,8 +181,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.Uint16:
|
||||
{
|
||||
uint16obj1 := obj1.(uint16)
|
||||
uint16obj2 := obj2.(uint16)
|
||||
uint16obj1, ok := obj1.(uint16)
|
||||
if !ok {
|
||||
uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16)
|
||||
}
|
||||
uint16obj2, ok := obj2.(uint16)
|
||||
if !ok {
|
||||
uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16)
|
||||
}
|
||||
if uint16obj1 > uint16obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -129,8 +201,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.Uint32:
|
||||
{
|
||||
uint32obj1 := obj1.(uint32)
|
||||
uint32obj2 := obj2.(uint32)
|
||||
uint32obj1, ok := obj1.(uint32)
|
||||
if !ok {
|
||||
uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32)
|
||||
}
|
||||
uint32obj2, ok := obj2.(uint32)
|
||||
if !ok {
|
||||
uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32)
|
||||
}
|
||||
if uint32obj1 > uint32obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -143,8 +221,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.Uint64:
|
||||
{
|
||||
uint64obj1 := obj1.(uint64)
|
||||
uint64obj2 := obj2.(uint64)
|
||||
uint64obj1, ok := obj1.(uint64)
|
||||
if !ok {
|
||||
uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64)
|
||||
}
|
||||
uint64obj2, ok := obj2.(uint64)
|
||||
if !ok {
|
||||
uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64)
|
||||
}
|
||||
if uint64obj1 > uint64obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -157,8 +241,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.Float32:
|
||||
{
|
||||
float32obj1 := obj1.(float32)
|
||||
float32obj2 := obj2.(float32)
|
||||
float32obj1, ok := obj1.(float32)
|
||||
if !ok {
|
||||
float32obj1 = obj1Value.Convert(float32Type).Interface().(float32)
|
||||
}
|
||||
float32obj2, ok := obj2.(float32)
|
||||
if !ok {
|
||||
float32obj2 = obj2Value.Convert(float32Type).Interface().(float32)
|
||||
}
|
||||
if float32obj1 > float32obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -171,8 +261,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.Float64:
|
||||
{
|
||||
float64obj1 := obj1.(float64)
|
||||
float64obj2 := obj2.(float64)
|
||||
float64obj1, ok := obj1.(float64)
|
||||
if !ok {
|
||||
float64obj1 = obj1Value.Convert(float64Type).Interface().(float64)
|
||||
}
|
||||
float64obj2, ok := obj2.(float64)
|
||||
if !ok {
|
||||
float64obj2 = obj2Value.Convert(float64Type).Interface().(float64)
|
||||
}
|
||||
if float64obj1 > float64obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -185,8 +281,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
}
|
||||
case reflect.String:
|
||||
{
|
||||
stringobj1 := obj1.(string)
|
||||
stringobj2 := obj2.(string)
|
||||
stringobj1, ok := obj1.(string)
|
||||
if !ok {
|
||||
stringobj1 = obj1Value.Convert(stringType).Interface().(string)
|
||||
}
|
||||
stringobj2, ok := obj2.(string)
|
||||
if !ok {
|
||||
stringobj2 = obj2Value.Convert(stringType).Interface().(string)
|
||||
}
|
||||
if stringobj1 > stringobj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
@ -240,6 +342,24 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
|
||||
}
|
||||
|
||||
// Positive asserts that the specified element is positive
|
||||
//
|
||||
// assert.Positive(t, 1)
|
||||
// assert.Positive(t, 1.23)
|
||||
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
||||
zero := reflect.Zero(reflect.TypeOf(e))
|
||||
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs)
|
||||
}
|
||||
|
||||
// Negative asserts that the specified element is negative
|
||||
//
|
||||
// assert.Negative(t, -1)
|
||||
// assert.Negative(t, -1.23)
|
||||
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
||||
zero := reflect.Zero(reflect.TypeOf(e))
|
||||
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs)
|
||||
}
|
||||
|
||||
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
|
97
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
97
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
@ -114,6 +114,24 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
|
||||
return Error(t, err, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
|
||||
// This is a wrapper for errors.As.
|
||||
func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Eventuallyf asserts that given condition will be met in waitFor time,
|
||||
// periodically checking target function each tick.
|
||||
//
|
||||
@ -321,6 +339,54 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil
|
||||
return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsDecreasingf asserts that the collection is decreasing
|
||||
//
|
||||
// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
|
||||
// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
|
||||
// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
|
||||
func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsDecreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsIncreasingf asserts that the collection is increasing
|
||||
//
|
||||
// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
|
||||
// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
|
||||
// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
|
||||
func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsIncreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsNonDecreasingf asserts that the collection is not decreasing
|
||||
//
|
||||
// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
|
||||
// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
|
||||
// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
|
||||
func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsNonIncreasingf asserts that the collection is not increasing
|
||||
//
|
||||
// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
|
||||
// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
|
||||
// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
|
||||
func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsTypef asserts that the specified objects are of the same type.
|
||||
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
@ -375,6 +441,17 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
|
||||
return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Negativef asserts that the specified element is negative
|
||||
//
|
||||
// assert.Negativef(t, -1, "error message %s", "formatted")
|
||||
// assert.Negativef(t, -1.23, "error message %s", "formatted")
|
||||
func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Negative(t, e, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
|
||||
// periodically checking the target function each tick.
|
||||
//
|
||||
@ -476,6 +553,15 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s
|
||||
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotNilf asserts that the specified object is not nil.
|
||||
//
|
||||
// assert.NotNilf(t, err, "error message %s", "formatted")
|
||||
@ -572,6 +658,17 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str
|
||||
return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Positivef asserts that the specified element is positive
|
||||
//
|
||||
// assert.Positivef(t, 1, "error message %s", "formatted")
|
||||
// assert.Positivef(t, 1.23, "error message %s", "formatted")
|
||||
func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Positive(t, e, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Regexpf asserts that a specified regexp matches a string.
|
||||
//
|
||||
// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
|
||||
|
194
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
194
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
@ -204,6 +204,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
|
||||
return Error(a.t, err, msgAndArgs...)
|
||||
}
|
||||
|
||||
// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
|
||||
// This is a wrapper for errors.As.
|
||||
func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorAs(a.t, err, target, msgAndArgs...)
|
||||
}
|
||||
|
||||
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
|
||||
// This is a wrapper for errors.As.
|
||||
func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorAsf(a.t, err, target, msg, args...)
|
||||
}
|
||||
|
||||
// ErrorIs asserts that at least one of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorIs(a.t, err, target, msgAndArgs...)
|
||||
}
|
||||
|
||||
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorIsf(a.t, err, target, msg, args...)
|
||||
}
|
||||
|
||||
// Errorf asserts that a function returned an error (i.e. not `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
@ -631,6 +667,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo
|
||||
return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
|
||||
}
|
||||
|
||||
// IsDecreasing asserts that the collection is decreasing
|
||||
//
|
||||
// a.IsDecreasing([]int{2, 1, 0})
|
||||
// a.IsDecreasing([]float{2, 1})
|
||||
// a.IsDecreasing([]string{"b", "a"})
|
||||
func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsDecreasing(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsDecreasingf asserts that the collection is decreasing
|
||||
//
|
||||
// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
|
||||
// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
|
||||
// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
|
||||
func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsDecreasingf(a.t, object, msg, args...)
|
||||
}
|
||||
|
||||
// IsIncreasing asserts that the collection is increasing
|
||||
//
|
||||
// a.IsIncreasing([]int{1, 2, 3})
|
||||
// a.IsIncreasing([]float{1, 2})
|
||||
// a.IsIncreasing([]string{"a", "b"})
|
||||
func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsIncreasing(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsIncreasingf asserts that the collection is increasing
|
||||
//
|
||||
// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
|
||||
// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
|
||||
// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
|
||||
func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsIncreasingf(a.t, object, msg, args...)
|
||||
}
|
||||
|
||||
// IsNonDecreasing asserts that the collection is not decreasing
|
||||
//
|
||||
// a.IsNonDecreasing([]int{1, 1, 2})
|
||||
// a.IsNonDecreasing([]float{1, 2})
|
||||
// a.IsNonDecreasing([]string{"a", "b"})
|
||||
func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsNonDecreasing(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsNonDecreasingf asserts that the collection is not decreasing
|
||||
//
|
||||
// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
|
||||
// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
|
||||
// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
|
||||
func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsNonDecreasingf(a.t, object, msg, args...)
|
||||
}
|
||||
|
||||
// IsNonIncreasing asserts that the collection is not increasing
|
||||
//
|
||||
// a.IsNonIncreasing([]int{2, 1, 1})
|
||||
// a.IsNonIncreasing([]float{2, 1})
|
||||
// a.IsNonIncreasing([]string{"b", "a"})
|
||||
func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsNonIncreasing(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsNonIncreasingf asserts that the collection is not increasing
|
||||
//
|
||||
// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
|
||||
// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
|
||||
// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
|
||||
func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsNonIncreasingf(a.t, object, msg, args...)
|
||||
}
|
||||
|
||||
// IsType asserts that the specified objects are of the same type.
|
||||
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
@ -739,6 +871,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
|
||||
return Lessf(a.t, e1, e2, msg, args...)
|
||||
}
|
||||
|
||||
// Negative asserts that the specified element is negative
|
||||
//
|
||||
// a.Negative(-1)
|
||||
// a.Negative(-1.23)
|
||||
func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Negative(a.t, e, msgAndArgs...)
|
||||
}
|
||||
|
||||
// Negativef asserts that the specified element is negative
|
||||
//
|
||||
// a.Negativef(-1, "error message %s", "formatted")
|
||||
// a.Negativef(-1.23, "error message %s", "formatted")
|
||||
func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Negativef(a.t, e, msg, args...)
|
||||
}
|
||||
|
||||
// Never asserts that the given condition doesn't satisfy in waitFor time,
|
||||
// periodically checking the target function each tick.
|
||||
//
|
||||
@ -941,6 +1095,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
|
||||
return NotEqualf(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// NotErrorIs asserts that at none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotErrorIs(a.t, err, target, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotErrorIsf(a.t, err, target, msg, args...)
|
||||
}
|
||||
|
||||
// NotNil asserts that the specified object is not nil.
|
||||
//
|
||||
// a.NotNil(err)
|
||||
@ -1133,6 +1305,28 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b
|
||||
return Panicsf(a.t, f, msg, args...)
|
||||
}
|
||||
|
||||
// Positive asserts that the specified element is positive
|
||||
//
|
||||
// a.Positive(1)
|
||||
// a.Positive(1.23)
|
||||
func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Positive(a.t, e, msgAndArgs...)
|
||||
}
|
||||
|
||||
// Positivef asserts that the specified element is positive
|
||||
//
|
||||
// a.Positivef(1, "error message %s", "formatted")
|
||||
// a.Positivef(1.23, "error message %s", "formatted")
|
||||
func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Positivef(a.t, e, msg, args...)
|
||||
}
|
||||
|
||||
// Regexp asserts that a specified regexp matches a string.
|
||||
//
|
||||
// a.Regexp(regexp.MustCompile("start"), "it's starting")
|
||||
|
81
vendor/github.com/stretchr/testify/assert/assertion_order.go
generated
vendored
Normal file
81
vendor/github.com/stretchr/testify/assert/assertion_order.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
package assert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// isOrdered checks that collection contains orderable elements.
|
||||
func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
|
||||
objKind := reflect.TypeOf(object).Kind()
|
||||
if objKind != reflect.Slice && objKind != reflect.Array {
|
||||
return false
|
||||
}
|
||||
|
||||
objValue := reflect.ValueOf(object)
|
||||
objLen := objValue.Len()
|
||||
|
||||
if objLen <= 1 {
|
||||
return true
|
||||
}
|
||||
|
||||
value := objValue.Index(0)
|
||||
valueInterface := value.Interface()
|
||||
firstValueKind := value.Kind()
|
||||
|
||||
for i := 1; i < objLen; i++ {
|
||||
prevValue := value
|
||||
prevValueInterface := valueInterface
|
||||
|
||||
value = objValue.Index(i)
|
||||
valueInterface = value.Interface()
|
||||
|
||||
compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
|
||||
|
||||
if !isComparable {
|
||||
return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
|
||||
}
|
||||
|
||||
if !containsValue(allowedComparesResults, compareResult) {
|
||||
return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// IsIncreasing asserts that the collection is increasing
|
||||
//
|
||||
// assert.IsIncreasing(t, []int{1, 2, 3})
|
||||
// assert.IsIncreasing(t, []float{1, 2})
|
||||
// assert.IsIncreasing(t, []string{"a", "b"})
|
||||
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
|
||||
}
|
||||
|
||||
// IsNonIncreasing asserts that the collection is not increasing
|
||||
//
|
||||
// assert.IsNonIncreasing(t, []int{2, 1, 1})
|
||||
// assert.IsNonIncreasing(t, []float{2, 1})
|
||||
// assert.IsNonIncreasing(t, []string{"b", "a"})
|
||||
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
|
||||
}
|
||||
|
||||
// IsDecreasing asserts that the collection is decreasing
|
||||
//
|
||||
// assert.IsDecreasing(t, []int{2, 1, 0})
|
||||
// assert.IsDecreasing(t, []float{2, 1})
|
||||
// assert.IsDecreasing(t, []string{"b", "a"})
|
||||
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
|
||||
}
|
||||
|
||||
// IsNonDecreasing asserts that the collection is not decreasing
|
||||
//
|
||||
// assert.IsNonDecreasing(t, []int{1, 1, 2})
|
||||
// assert.IsNonDecreasing(t, []float{1, 2})
|
||||
// assert.IsNonDecreasing(t, []string{"a", "b"})
|
||||
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
|
||||
}
|
83
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
83
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
@ -172,8 +172,8 @@ func isTest(name, prefix string) bool {
|
||||
if len(name) == len(prefix) { // "Test" is ok
|
||||
return true
|
||||
}
|
||||
rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
|
||||
return !unicode.IsLower(rune)
|
||||
r, _ := utf8.DecodeRuneInString(name[len(prefix):])
|
||||
return !unicode.IsLower(r)
|
||||
}
|
||||
|
||||
func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
|
||||
@ -1622,6 +1622,7 @@ var spewConfig = spew.ConfigState{
|
||||
DisableCapacities: true,
|
||||
SortKeys: true,
|
||||
DisableMethods: true,
|
||||
MaxDepth: 10,
|
||||
}
|
||||
|
||||
type tHelper interface {
|
||||
@ -1693,3 +1694,81 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorIs asserts that at least one of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if errors.Is(err, target) {
|
||||
return true
|
||||
}
|
||||
|
||||
var expectedText string
|
||||
if target != nil {
|
||||
expectedText = target.Error()
|
||||
}
|
||||
|
||||
chain := buildErrorChainString(err)
|
||||
|
||||
return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
|
||||
"expected: %q\n"+
|
||||
"in chain: %s", expectedText, chain,
|
||||
), msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotErrorIs asserts that at none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if !errors.Is(err, target) {
|
||||
return true
|
||||
}
|
||||
|
||||
var expectedText string
|
||||
if target != nil {
|
||||
expectedText = target.Error()
|
||||
}
|
||||
|
||||
chain := buildErrorChainString(err)
|
||||
|
||||
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
|
||||
"found: %q\n"+
|
||||
"in chain: %s", expectedText, chain,
|
||||
), msgAndArgs...)
|
||||
}
|
||||
|
||||
// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
|
||||
// This is a wrapper for errors.As.
|
||||
func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if errors.As(err, target) {
|
||||
return true
|
||||
}
|
||||
|
||||
chain := buildErrorChainString(err)
|
||||
|
||||
return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
|
||||
"expected: %q\n"+
|
||||
"in chain: %s", target, chain,
|
||||
), msgAndArgs...)
|
||||
}
|
||||
|
||||
func buildErrorChainString(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
e := errors.Unwrap(err)
|
||||
chain := fmt.Sprintf("%q", err.Error())
|
||||
for e != nil {
|
||||
chain += fmt.Sprintf("\n\t%q", e.Error())
|
||||
e = errors.Unwrap(e)
|
||||
}
|
||||
return chain
|
||||
}
|
||||
|
248
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
248
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
@ -256,6 +256,54 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
|
||||
// This is a wrapper for errors.As.
|
||||
func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.ErrorAs(t, err, target, msgAndArgs...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
|
||||
// This is a wrapper for errors.As.
|
||||
func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.ErrorAsf(t, err, target, msg, args...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// ErrorIs asserts that at least one of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.ErrorIs(t, err, target, msgAndArgs...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.ErrorIsf(t, err, target, msg, args...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Errorf asserts that a function returned an error (i.e. not `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
@ -806,6 +854,126 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// IsDecreasing asserts that the collection is decreasing
|
||||
//
|
||||
// assert.IsDecreasing(t, []int{2, 1, 0})
|
||||
// assert.IsDecreasing(t, []float{2, 1})
|
||||
// assert.IsDecreasing(t, []string{"b", "a"})
|
||||
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.IsDecreasing(t, object, msgAndArgs...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// IsDecreasingf asserts that the collection is decreasing
|
||||
//
|
||||
// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
|
||||
// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
|
||||
// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
|
||||
func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.IsDecreasingf(t, object, msg, args...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// IsIncreasing asserts that the collection is increasing
|
||||
//
|
||||
// assert.IsIncreasing(t, []int{1, 2, 3})
|
||||
// assert.IsIncreasing(t, []float{1, 2})
|
||||
// assert.IsIncreasing(t, []string{"a", "b"})
|
||||
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.IsIncreasing(t, object, msgAndArgs...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// IsIncreasingf asserts that the collection is increasing
|
||||
//
|
||||
// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
|
||||
// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
|
||||
// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
|
||||
func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.IsIncreasingf(t, object, msg, args...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// IsNonDecreasing asserts that the collection is not decreasing
|
||||
//
|
||||
// assert.IsNonDecreasing(t, []int{1, 1, 2})
|
||||
// assert.IsNonDecreasing(t, []float{1, 2})
|
||||
// assert.IsNonDecreasing(t, []string{"a", "b"})
|
||||
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.IsNonDecreasing(t, object, msgAndArgs...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// IsNonDecreasingf asserts that the collection is not decreasing
|
||||
//
|
||||
// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
|
||||
// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
|
||||
// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
|
||||
func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.IsNonDecreasingf(t, object, msg, args...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// IsNonIncreasing asserts that the collection is not increasing
|
||||
//
|
||||
// assert.IsNonIncreasing(t, []int{2, 1, 1})
|
||||
// assert.IsNonIncreasing(t, []float{2, 1})
|
||||
// assert.IsNonIncreasing(t, []string{"b", "a"})
|
||||
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.IsNonIncreasing(t, object, msgAndArgs...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// IsNonIncreasingf asserts that the collection is not increasing
|
||||
//
|
||||
// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
|
||||
// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
|
||||
// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
|
||||
func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.IsNonIncreasingf(t, object, msg, args...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// IsType asserts that the specified objects are of the same type.
|
||||
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
@ -944,6 +1112,34 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Negative asserts that the specified element is negative
|
||||
//
|
||||
// assert.Negative(t, -1)
|
||||
// assert.Negative(t, -1.23)
|
||||
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.Negative(t, e, msgAndArgs...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Negativef asserts that the specified element is negative
|
||||
//
|
||||
// assert.Negativef(t, -1, "error message %s", "formatted")
|
||||
// assert.Negativef(t, -1.23, "error message %s", "formatted")
|
||||
func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.Negativef(t, e, msg, args...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Never asserts that the given condition doesn't satisfy in waitFor time,
|
||||
// periodically checking the target function each tick.
|
||||
//
|
||||
@ -1200,6 +1396,30 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string,
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// NotErrorIs asserts that at none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.NotErrorIs(t, err, target, msgAndArgs...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.NotErrorIsf(t, err, target, msg, args...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// NotNil asserts that the specified object is not nil.
|
||||
//
|
||||
// assert.NotNil(t, err)
|
||||
@ -1446,6 +1666,34 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Positive asserts that the specified element is positive
|
||||
//
|
||||
// assert.Positive(t, 1)
|
||||
// assert.Positive(t, 1.23)
|
||||
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.Positive(t, e, msgAndArgs...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Positivef asserts that the specified element is positive
|
||||
//
|
||||
// assert.Positivef(t, 1, "error message %s", "formatted")
|
||||
// assert.Positivef(t, 1.23, "error message %s", "formatted")
|
||||
func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.Positivef(t, e, msg, args...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Regexp asserts that a specified regexp matches a string.
|
||||
//
|
||||
// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
|
||||
|
194
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
194
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
@ -205,6 +205,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
|
||||
Error(a.t, err, msgAndArgs...)
|
||||
}
|
||||
|
||||
// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
|
||||
// This is a wrapper for errors.As.
|
||||
func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
ErrorAs(a.t, err, target, msgAndArgs...)
|
||||
}
|
||||
|
||||
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
|
||||
// This is a wrapper for errors.As.
|
||||
func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
ErrorAsf(a.t, err, target, msg, args...)
|
||||
}
|
||||
|
||||
// ErrorIs asserts that at least one of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
ErrorIs(a.t, err, target, msgAndArgs...)
|
||||
}
|
||||
|
||||
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
ErrorIsf(a.t, err, target, msg, args...)
|
||||
}
|
||||
|
||||
// Errorf asserts that a function returned an error (i.e. not `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
@ -632,6 +668,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo
|
||||
InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
|
||||
}
|
||||
|
||||
// IsDecreasing asserts that the collection is decreasing
|
||||
//
|
||||
// a.IsDecreasing([]int{2, 1, 0})
|
||||
// a.IsDecreasing([]float{2, 1})
|
||||
// a.IsDecreasing([]string{"b", "a"})
|
||||
func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
IsDecreasing(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsDecreasingf asserts that the collection is decreasing
|
||||
//
|
||||
// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
|
||||
// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
|
||||
// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
|
||||
func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
IsDecreasingf(a.t, object, msg, args...)
|
||||
}
|
||||
|
||||
// IsIncreasing asserts that the collection is increasing
|
||||
//
|
||||
// a.IsIncreasing([]int{1, 2, 3})
|
||||
// a.IsIncreasing([]float{1, 2})
|
||||
// a.IsIncreasing([]string{"a", "b"})
|
||||
func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
IsIncreasing(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsIncreasingf asserts that the collection is increasing
|
||||
//
|
||||
// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
|
||||
// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
|
||||
// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
|
||||
func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
IsIncreasingf(a.t, object, msg, args...)
|
||||
}
|
||||
|
||||
// IsNonDecreasing asserts that the collection is not decreasing
|
||||
//
|
||||
// a.IsNonDecreasing([]int{1, 1, 2})
|
||||
// a.IsNonDecreasing([]float{1, 2})
|
||||
// a.IsNonDecreasing([]string{"a", "b"})
|
||||
func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
IsNonDecreasing(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsNonDecreasingf asserts that the collection is not decreasing
|
||||
//
|
||||
// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
|
||||
// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
|
||||
// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
|
||||
func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
IsNonDecreasingf(a.t, object, msg, args...)
|
||||
}
|
||||
|
||||
// IsNonIncreasing asserts that the collection is not increasing
|
||||
//
|
||||
// a.IsNonIncreasing([]int{2, 1, 1})
|
||||
// a.IsNonIncreasing([]float{2, 1})
|
||||
// a.IsNonIncreasing([]string{"b", "a"})
|
||||
func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
IsNonIncreasing(a.t, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsNonIncreasingf asserts that the collection is not increasing
|
||||
//
|
||||
// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
|
||||
// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
|
||||
// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
|
||||
func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
IsNonIncreasingf(a.t, object, msg, args...)
|
||||
}
|
||||
|
||||
// IsType asserts that the specified objects are of the same type.
|
||||
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
@ -740,6 +872,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
|
||||
Lessf(a.t, e1, e2, msg, args...)
|
||||
}
|
||||
|
||||
// Negative asserts that the specified element is negative
|
||||
//
|
||||
// a.Negative(-1)
|
||||
// a.Negative(-1.23)
|
||||
func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
Negative(a.t, e, msgAndArgs...)
|
||||
}
|
||||
|
||||
// Negativef asserts that the specified element is negative
|
||||
//
|
||||
// a.Negativef(-1, "error message %s", "formatted")
|
||||
// a.Negativef(-1.23, "error message %s", "formatted")
|
||||
func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
Negativef(a.t, e, msg, args...)
|
||||
}
|
||||
|
||||
// Never asserts that the given condition doesn't satisfy in waitFor time,
|
||||
// periodically checking the target function each tick.
|
||||
//
|
||||
@ -942,6 +1096,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
|
||||
NotEqualf(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// NotErrorIs asserts that at none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
NotErrorIs(a.t, err, target, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
NotErrorIsf(a.t, err, target, msg, args...)
|
||||
}
|
||||
|
||||
// NotNil asserts that the specified object is not nil.
|
||||
//
|
||||
// a.NotNil(err)
|
||||
@ -1134,6 +1306,28 @@ func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interfa
|
||||
Panicsf(a.t, f, msg, args...)
|
||||
}
|
||||
|
||||
// Positive asserts that the specified element is positive
|
||||
//
|
||||
// a.Positive(1)
|
||||
// a.Positive(1.23)
|
||||
func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
Positive(a.t, e, msgAndArgs...)
|
||||
}
|
||||
|
||||
// Positivef asserts that the specified element is positive
|
||||
//
|
||||
// a.Positivef(1, "error message %s", "formatted")
|
||||
// a.Positivef(1.23, "error message %s", "formatted")
|
||||
func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
Positivef(a.t, e, msg, args...)
|
||||
}
|
||||
|
||||
// Regexp asserts that a specified regexp matches a string.
|
||||
//
|
||||
// a.Regexp(regexp.MustCompile("start"), "it's starting")
|
||||
|
3
vendor/go.opentelemetry.io/contrib/.gitattributes
generated
vendored
Normal file
3
vendor/go.opentelemetry.io/contrib/.gitattributes
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
* text=auto eol=lf
|
||||
*.{cmd,[cC][mM][dD]} text eol=crlf
|
||||
*.{bat,[bB][aA][tT]} text eol=crlf
|
13
vendor/go.opentelemetry.io/contrib/.gitignore
generated
vendored
Normal file
13
vendor/go.opentelemetry.io/contrib/.gitignore
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
.tools/
|
||||
.idea/
|
||||
.vscode/
|
||||
*.iml
|
||||
*.so
|
||||
coverage.*
|
||||
example
|
||||
|
||||
instrumentation/google.golang.org/grpc/otelgrpc/example/server/server
|
||||
instrumentation/google.golang.org/grpc/otelgrpc/example/client/client
|
32
vendor/go.opentelemetry.io/contrib/.golangci.yml
generated
vendored
Normal file
32
vendor/go.opentelemetry.io/contrib/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
# See https://github.com/golangci/golangci-lint#config-file
|
||||
run:
|
||||
issues-exit-code: 1 #Default
|
||||
tests: true #Default
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- misspell
|
||||
- goimports
|
||||
- revive
|
||||
- gofmt
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
# helpers in tests often (rightfully) pass a *testing.T as their first argument
|
||||
- path: _test\.go
|
||||
text: "context.Context should be the first parameter of a function"
|
||||
linters:
|
||||
- revive
|
||||
# Yes, they are, but it's okay in a test
|
||||
- path: _test\.go
|
||||
text: "exported func.*returns unexported type.*which can be annoying to use"
|
||||
linters:
|
||||
- revive
|
||||
|
||||
linters-settings:
|
||||
misspell:
|
||||
locale: US
|
||||
#ignore-words:
|
||||
# - someword
|
||||
goimports:
|
||||
local-prefixes: go.opentelemetry.io
|
336
vendor/go.opentelemetry.io/contrib/CHANGELOG.md
generated
vendored
Normal file
336
vendor/go.opentelemetry.io/contrib/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,336 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
||||
|
||||
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.21.0] - 2021-06-18
|
||||
|
||||
### Fixed
|
||||
|
||||
- Dockerfile based examples for `otelgin` and `otelmacaron`. (#767)
|
||||
|
||||
### Changed
|
||||
|
||||
- Supported minimum version of Go bumped from 1.14 to 1.15. (#787)
|
||||
- EKS Resource Detector now use the Kubernetes Go client to obtain the ConfigMap. (#813)
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove service name from `otelmongodb` configuration and span attributes. (#763)
|
||||
|
||||
## [0.20.0] - 2021-04-23
|
||||
|
||||
### Changed
|
||||
|
||||
- The `go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo` instrumentation now accepts a `WithCommandAttributeDisabled`,
|
||||
so the caller can specify whether to opt-out of tracing the mongo command. (#712)
|
||||
- Upgrade to v0.20.0 of `go.opentelemetry.io/otel`. (#758)
|
||||
- The B3 and Jaeger propagators now store their debug or deferred state in the context.Context instead of the SpanContext. (#758)
|
||||
|
||||
## [0.19.0] - 2021-03-19
|
||||
|
||||
### Changed
|
||||
|
||||
- Upgrade to v0.19.0 of `go.opentelemetry.io/otel`.
|
||||
- Fix Span names created in HTTP Instrumentation package to conform with guidelines. (#757)
|
||||
|
||||
## [0.18.0] - 2021-03-04
|
||||
|
||||
### Fixed
|
||||
|
||||
- `otelmemcache` no longer sets span status to OK instead of leaving it unset. (#477)
|
||||
- Fix goroutine leak in gRPC `StreamClientInterceptor`. (#581)
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove service name from `otelmemcache` configuration and span attributes. (#477)
|
||||
|
||||
## [0.17.0] - 2021-02-15
|
||||
|
||||
### Added
|
||||
|
||||
- Add `ot-tracer` propagator (#562)
|
||||
|
||||
### Changed
|
||||
|
||||
- Rename project default branch from `master` to `main`.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Added failure message for AWS ECS resource detector for better debugging (#568)
|
||||
- Goroutine leak in gRPC StreamClientInterceptor while streamer returns an error. (#581)
|
||||
|
||||
## [0.16.0] - 2021-01-13
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix module path for AWS ECS resource detector (#517)
|
||||
|
||||
## [0.15.1] - 2020-12-14
|
||||
|
||||
### Added
|
||||
|
||||
- Add registry link check to `Makefile` and pre-release script. (#446)
|
||||
- A new AWS X-Ray ID Generator (#459)
|
||||
- Migrate CircleCI jobs to GitHub Actions (#476)
|
||||
- Add CodeQL GitHub Action (#506)
|
||||
- Add gosec workflow to GitHub Actions (#507)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixes the body replacement in otelhttp to not to mutate a nil body. (#484)
|
||||
|
||||
## [0.15.0] - 2020-12-11
|
||||
|
||||
### Added
|
||||
|
||||
- A new Amazon EKS resource detector. (#465)
|
||||
- A new `gcp.CloudRun` detector for detecting resource from a Cloud Run instance. (#455)
|
||||
|
||||
## [0.14.0] - 2020-11-20
|
||||
|
||||
### Added
|
||||
|
||||
- `otelhttp.{Get,Head,Post,PostForm}` convenience wrappers for their `http` counterparts. (#390)
|
||||
- The AWS detector now adds the cloud zone, host image ID, host type, and host name to the returned `Resource`. (#410)
|
||||
- Add Amazon ECS Resource Detector for AWS X-Ray. (#466)
|
||||
- Add propagator for AWS X-Ray (#462)
|
||||
|
||||
### Changed
|
||||
|
||||
- Add semantic version to `Tracer` / `Meter` created by instrumentation packages `otelsaram`, `otelrestful`, `otelmongo`, `otelhttp` and `otelhttptrace`. (#412)
|
||||
- Update instrumentation guidelines about tracer / meter semantic version. (#412)
|
||||
- Replace internal tracer and meter helpers by helpers from `go.opentelemetry.io/otel`. (#414)
|
||||
- gRPC instrumentation sets span attribute `rpc.grpc.status_code`. (#453)
|
||||
|
||||
## Fixed
|
||||
|
||||
- `/detectors/aws` no longer fails if instance metadata is not available (e.g. not running in AWS) (#401)
|
||||
- The AWS detector now returns a partial resource and an appropriate error if it encounters an error part way through determining a `Resource` identity. (#410)
|
||||
- The `host` instrumentation unit test has been updated to not depend on the system it runs on. (#426)
|
||||
|
||||
## [0.13.0] - 2020-10-09
|
||||
|
||||
## Added
|
||||
|
||||
- A Jaeger propagator. (#375)
|
||||
|
||||
## Changed
|
||||
|
||||
- The `go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc` package instrumentation no longer accepts a `Tracer` as an argument to the interceptor function.
|
||||
Instead, a new `WithTracerProvider` option is added to configure the `TracerProvider` used when creating the `Tracer` for the instrumentation. (#373)
|
||||
- The `go.opentelemetry.io/contrib/instrumentation/gopkg.in/macaron.v1/otelmacaron` instrumentation now accepts a `TracerProvider` rather than a `Tracer`. (#374)
|
||||
- Remove `go.opentelemetry.io/otel/sdk` dependency from instrumentation. (#381)
|
||||
- Use `httpsnoop` in `go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux` to ensure `http.ResponseWriter` additional interfaces are preserved. (#388)
|
||||
|
||||
### Fixed
|
||||
|
||||
- The `go.opentelemetry.io/contrib/instrumentation/github.com/labstack/echo/otelecho.Middleware` no longer sends duplicate errors to the global `ErrorHandler`. (#377, #364)
|
||||
- The import comment in `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` is now correctly quoted. (#379)
|
||||
- The B3 propagator sets the sample bitmask when the sampling decision is `debug`. (#369)
|
||||
|
||||
## [0.12.0] - 2020-09-25
|
||||
|
||||
### Changed
|
||||
|
||||
- Replace `WithTracer` with `WithTracerProvider` in the `go.opentelemetry.io/contrib/instrumentation/gopkg.in/macaron.v1/otelmacaron` instrumentation. (#374)
|
||||
|
||||
### Added
|
||||
|
||||
- Benchmark tests for the gRPC instrumentation. (#296)
|
||||
- Integration testing for the gRPC instrumentation. (#297)
|
||||
- Allow custom labels to be added to net/http metrics. (#306)
|
||||
- Added B3 propagator, moving it out of open.telemetry.io/otel repo. (#344)
|
||||
|
||||
### Changed
|
||||
|
||||
- Unify instrumentation about provider options for `go.mongodb.org/mongo-driver`, `gin-gonic/gin`, `gorilla/mux`,
|
||||
`labstack/echo`, `emicklei/go-restful`, `bradfitz/gomemcache`, `Shopify/sarama`, `net/http` and `beego`. (#303)
|
||||
- Update instrumentation guidelines about uniform provider options. Also, update style guide. (#303)
|
||||
- Make config struct of instrumentation unexported. (#303)
|
||||
- Instrumentations have been updated to adhere to the [configuration style guide's](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config)
|
||||
updated recommendation to use `newConfig()` instead of `configure()`. (#336)
|
||||
- A new instrumentation naming scheme is implemented to avoid package name conflicts for instrumented packages while still remaining discoverable. (#359)
|
||||
- `google.golang.org/grpc` -> `google.golang.org/grpc/otelgrpc`
|
||||
- `go.mongodb.org/mongo-driver` -> `go.mongodb.org/mongo-driver/mongo/otelmongo`
|
||||
- `net/http` -> `net/http/otelhttp`
|
||||
- `net/http/httptrace` -> `net/http/httptrace/otelhttptrace`
|
||||
- `github.com/labstack/echo` -> `github.com/labstack/echo/otelecho`
|
||||
- `github.com/bradfitz/gomemcache` -> `github.com/bradfitz/gomemcache/memcache/otelmemcache`
|
||||
- `github.com/gin-gonic/gin` -> `github.com/gin-gonic/gin/otelgin`
|
||||
- `github.com/gocql/gocql` -> `github.com/gocql/gocql/otelgocql`
|
||||
- `github.com/emicklei/go-restful` -> `github.com/emicklei/go-restful/otelrestful`
|
||||
- `github.com/Shopify/sarama` -> `github.com/Shopify/sarama/otelsarama`
|
||||
- `github.com/gorilla/mux` -> `github.com/gorilla/mux/otelmux`
|
||||
- `github.com/astaxie/beego` -> `github.com/astaxie/beego/otelbeego`
|
||||
- `gopkg.in/macaron.v1` -> `gopkg.in/macaron.v1/otelmacaron`
|
||||
- Rename `OTelBeegoHandler` to `Handler` in the `go.opentelemetry.io/contrib/instrumentation/github.com/astaxie/beego/otelbeego` package. (#359)
|
||||
|
||||
## [0.11.0] - 2020-08-25
|
||||
|
||||
### Added
|
||||
|
||||
- Top-level `Version()` and `SemVersion()` functions defining the current version of the contrib package. (#225)
|
||||
- Instrumentation for the `github.com/astaxie/beego` package. (#200)
|
||||
- Instrumentation for the `github.com/bradfitz/gomemcache` package. (#204)
|
||||
- Host metrics instrumentation. (#231)
|
||||
- Cortex histogram and distribution support. (#237)
|
||||
- Cortex example project. (#238)
|
||||
- Cortex HTTP authentication. (#246)
|
||||
|
||||
### Changed
|
||||
|
||||
- Remove service name as a parameter of Sarama instrumentation. (#221)
|
||||
- Replace `WithTracer` with `WithTracerProvider` in Sarama instrumentation. (#221)
|
||||
- Switch to use common top-level module `SemVersion()` when creating versioned tracer in `bradfitz/gomemcache`. (#226)
|
||||
- Use `IntegrationShouldRun` in `gomemcache_test`. (#254)
|
||||
- Use Go 1.15 for CI builds. (#236)
|
||||
- Improved configuration for `runtime` instrumentation. (#224)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Update dependabot configuration to include newly added `bradfitz/gomemcache` package. (#226)
|
||||
- Correct `runtime` instrumentation name. (#241)
|
||||
|
||||
## [0.10.1] - 2020-08-13
|
||||
|
||||
### Added
|
||||
|
||||
- The `go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc` module has been added to replace the instrumentation that had previoiusly existed in the `go.opentelemetry.io/otel/instrumentation/grpctrace` package. (#189)
|
||||
- Instrumentation for the stdlib `net/http` and `net/http/httptrace` packages. (#190)
|
||||
- Initial Cortex exporter. (#202, #205, #210, #211, #215)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Bump google.golang.org/grpc from 1.30.0 to 1.31.0. (#166)
|
||||
- Bump go.mongodb.org/mongo-driver from 1.3.5 to 1.4.0 in /instrumentation/go.mongodb.org/mongo-driver. (#170)
|
||||
- Bump google.golang.org/grpc in /instrumentation/github.com/gin-gonic/gin. (#173)
|
||||
- Bump google.golang.org/grpc in /instrumentation/github.com/labstack/echo. (#176)
|
||||
- Bump google.golang.org/grpc from 1.30.0 to 1.31.0 in /instrumentation/github.com/Shopify/sarama. (#179)
|
||||
- Bump cloud.google.com/go from 0.61.0 to 0.63.0 in /detectors/gcp. (#181, #199)
|
||||
- Bump github.com/aws/aws-sdk-go from 1.33.15 to 1.34.1 in /detectors/aws. (#184, #192, #193, #198, #201, #203)
|
||||
- Bump github.com/golangci/golangci-lint from 1.29.0 to 1.30.0 in /tools. (#186)
|
||||
- Setup CI to run tests that require external resources (Cassandra and MongoDB). (#191)
|
||||
- Bump github.com/Shopify/sarama from 1.26.4 to 1.27.0 in /instrumentation/github.com/Shopify/sarama. (#206)
|
||||
|
||||
## [0.10.0] - 2020-07-31
|
||||
|
||||
This release upgrades its [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0) dependency to v0.10.0 and includes new instrumentation for popular Kafka and Cassandra clients.
|
||||
|
||||
### Added
|
||||
|
||||
- A detector that generate resources from GCE instance. (#132)
|
||||
- A detector that generate resources from AWS instances. (#139)
|
||||
- Instrumentation for the Kafka client github.com/Shopify/sarama. (#134, #153)
|
||||
- Links and status message for mock span in the internal testing library. (#134)
|
||||
- Instrumentation for the Cassandra client github.com/gocql/gocql. (#137)
|
||||
- A detector that generate resources from GKE clusters. (#154)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Bump github.com/aws/aws-sdk-go from 1.33.8 to 1.33.15 in /detectors/aws. (#155, #157, #159, #162)
|
||||
- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#146)
|
||||
|
||||
## [0.9.0] - 2020-07-20
|
||||
|
||||
This release upgrades its [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0) dependency to v0.9.0.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Bump github.com/emicklei/go-restful/v3 from 3.0.0 to 3.2.0 in /instrumentation/github.com/emicklei/go-restful. (#133)
|
||||
- Update dependabot configuration to correctly check all included packages. (#131)
|
||||
- Update `RELEASING.md` with correct `tag.sh` command. (#130)
|
||||
|
||||
## [0.8.0] - 2020-07-10
|
||||
|
||||
This release upgrades its [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0) dependency to v0.8.0, includes minor fixes, and new instrumentation.
|
||||
|
||||
### Added
|
||||
|
||||
- Create this `CHANGELOG.md`. (#114)
|
||||
- Add `emicklei/go-restful/v3` trace instrumentation. (#115)
|
||||
|
||||
### Changed
|
||||
|
||||
- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#114)
|
||||
- Move all `github.com` package instrumentation under a `github.com` directory. (#118)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Update README to include information about external instrumentation.
|
||||
To start, this includes native instrumentation found in the `go-redis/redis` package. (#117)
|
||||
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.2 in /tools. (#122, #123, #125)
|
||||
- Bump go.mongodb.org/mongo-driver from 1.3.4 to 1.3.5 in /instrumentation/go.mongodb.org/mongo-driver. (#124)
|
||||
|
||||
## [0.7.0] - 2020-06-29
|
||||
|
||||
This release upgrades its [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0) dependency to v0.7.0.
|
||||
|
||||
### Added
|
||||
|
||||
- Create `RELEASING.md` instructions. (#101)
|
||||
- Apply transitive dependabot go.mod updates as part of a new automatic Github workflow. (#94)
|
||||
- New dependabot integration to automate package upgrades. (#61)
|
||||
- Add automatic tag generation script for release. (#60)
|
||||
|
||||
### Changed
|
||||
|
||||
- Upgrade Datadog metrics exporter to include Resource tags. (#46)
|
||||
- Added output validation to Datadog example. (#96)
|
||||
- Move Macaron package to match layout guidelines. (#92)
|
||||
- Update top-level README and instrumentation README. (#92)
|
||||
- Bump google.golang.org/grpc from 1.29.1 to 1.30.0. (#99)
|
||||
- Bump github.com/golangci/golangci-lint from 1.21.0 to 1.27.0 in /tools. (#77)
|
||||
- Bump go.mongodb.org/mongo-driver from 1.3.2 to 1.3.4 in /instrumentation/go.mongodb.org/mongo-driver. (#76)
|
||||
- Bump github.com/stretchr/testify from 1.5.1 to 1.6.1. (#74)
|
||||
- Bump gopkg.in/macaron.v1 from 1.3.5 to 1.3.9 in /instrumentation/macaron. (#68)
|
||||
- Bump github.com/gin-gonic/gin from 1.6.2 to 1.6.3 in /instrumentation/gin-gonic/gin. (#73)
|
||||
- Bump github.com/DataDog/datadog-go from 3.5.0+incompatible to 3.7.2+incompatible in /exporters/metric/datadog. (#78)
|
||||
- Replaced `internal/trace/http.go` helpers with `api/standard` helpers from otel-go repo. (#112)
|
||||
|
||||
## [0.6.1] - 2020-06-08
|
||||
|
||||
First official tagged release of `contrib` repository.
|
||||
|
||||
### Added
|
||||
|
||||
- `labstack/echo` trace instrumentation (#42)
|
||||
- `mongodb` trace instrumentation (#26)
|
||||
- Go Runtime metrics (#9)
|
||||
- `gorilla/mux` trace instrumentation (#19)
|
||||
- `gin-gonic` trace instrumentation (#15)
|
||||
- `macaron` trace instrumentation (#20)
|
||||
- `dogstatsd` metrics exporter (#10)
|
||||
- `datadog` metrics exporter (#22)
|
||||
- Tags to all modules in repository
|
||||
- Repository folder structure and automated build (#3)
|
||||
|
||||
### Changes
|
||||
|
||||
- Prefix support for dogstatsd (#34)
|
||||
- Update Go Runtime package to use batch observer (#44)
|
||||
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go-contrib/compare/v0.21.0...HEAD
|
||||
[0.21.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.21.0
|
||||
[0.20.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.20.0
|
||||
[0.19.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.19.0
|
||||
[0.18.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.18.0
|
||||
[0.17.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.17.0
|
||||
[0.16.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.16.0
|
||||
[0.15.1]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.15.1
|
||||
[0.15.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.15.0
|
||||
[0.14.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.14.0
|
||||
[0.13.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.13.0
|
||||
[0.12.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.12.0
|
||||
[0.11.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.11.0
|
||||
[0.10.1]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.10.1
|
||||
[0.10.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.10.0
|
||||
[0.9.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.9.0
|
||||
[0.8.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.8.0
|
||||
[0.7.0]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.7.0
|
||||
[0.6.1]: https://github.com/open-telemetry/opentelemetry-go-contrib/releases/tag/v0.6.1
|
17
vendor/go.opentelemetry.io/contrib/CODEOWNERS
generated
vendored
Normal file
17
vendor/go.opentelemetry.io/contrib/CODEOWNERS
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
#####################################################
|
||||
#
|
||||
# List of approvers for this repository
|
||||
#
|
||||
#####################################################
|
||||
#
|
||||
# Learn about membership in OpenTelemetry community:
|
||||
# https://github.com/open-telemetry/community/blob/main/community-membership.md
|
||||
#
|
||||
#
|
||||
# Learn about CODEOWNERS file format:
|
||||
# https://help.github.com/en/articles/about-code-owners
|
||||
#
|
||||
|
||||
* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo @MadVikingGod
|
||||
|
||||
CODEOWNERS @MrAlias @Aneurysm9
|
136
vendor/go.opentelemetry.io/contrib/CONTRIBUTING.md
generated
vendored
Normal file
136
vendor/go.opentelemetry.io/contrib/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
# Contributing to opentelemetry-go-contrib
|
||||
|
||||
The Go special interest group (SIG) meets regularly. See the
|
||||
OpenTelemetry
|
||||
[community](https://github.com/open-telemetry/community#golang-sdk)
|
||||
repo for information on this and other language SIGs.
|
||||
|
||||
See the [public meeting
|
||||
notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b)
|
||||
for a summary description of past meetings. To request edit access,
|
||||
join the meeting or get in touch on
|
||||
[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT).
|
||||
|
||||
## Development
|
||||
|
||||
There are some generated files checked into the repo. To make sure
|
||||
that the generated files are up-to-date, run `make` (or `make
|
||||
precommit` - the `precommit` target is the default).
|
||||
|
||||
The `precommit` target also fixes the formatting of the code and
|
||||
checks the status of the go module files.
|
||||
|
||||
If after running `make precommit` the output of `git status` contains
|
||||
`nothing to commit, working tree clean` then it means that everything
|
||||
is up-to-date and properly formatted.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
### How to Send Pull Requests
|
||||
|
||||
Everyone is welcome to contribute code to `opentelemetry-go-contrib` via
|
||||
GitHub pull requests (PRs).
|
||||
|
||||
To create a new PR, fork the project in GitHub and clone the upstream
|
||||
repo:
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/open-telemetry/opentelemetry-go-contrib
|
||||
```
|
||||
This would put the project in the `opentelemetry-go-contrib` directory in
|
||||
current working directory.
|
||||
|
||||
Enter the newly created directory and add your fork as a new remote:
|
||||
|
||||
```sh
|
||||
$ git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
|
||||
```
|
||||
|
||||
Check out a new branch, make modifications, run linters and tests, update
|
||||
`CHANGELOG.md` and push the branch to your fork:
|
||||
|
||||
```sh
|
||||
$ git checkout -b <YOUR_BRANCH_NAME>
|
||||
# edit files
|
||||
# update changelog
|
||||
$ make precommit
|
||||
$ git add -p
|
||||
$ git commit
|
||||
$ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
|
||||
```
|
||||
|
||||
Open a pull request against the main `opentelemetry-go-contrib` repo. Be sure to add the pull
|
||||
request ID to the entry you added to `CHANGELOG.md`.
|
||||
|
||||
### How to Receive Comments
|
||||
|
||||
* If the PR is not ready for review, please put `[WIP]` in the title,
|
||||
tag it as `work-in-progress`, or mark it as
|
||||
[`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/).
|
||||
* Make sure CLA is signed and CI is clear.
|
||||
|
||||
### How to Get PRs Merged
|
||||
|
||||
A PR is considered to be **ready to merge** when:
|
||||
|
||||
* It has received two approvals from Approvers/Maintainers (at
|
||||
different companies).
|
||||
* Feedback has been addressed.
|
||||
* Any substantive changes to your PR will require that you clear any prior
|
||||
Approval reviews, this includes changes resulting from other feedback. Unless
|
||||
the approver explicitly stated that their approval will persist across
|
||||
changes it should be assumed that the PR needs their review again. Other
|
||||
project members (e.g. approvers, maintainers) can help with this if there are
|
||||
any questions or if you forget to clear reviews.
|
||||
* It has been open for review for at least one working day. This gives
|
||||
people reasonable time to review.
|
||||
* Trivial change (typo, cosmetic, doc, etc.) doesn't have to wait for
|
||||
one day.
|
||||
* `CHANGELOG.md` has been updated to reflect what has been
|
||||
added, changed, removed, or fixed.
|
||||
* Urgent fix can take exception as long as it has been actively
|
||||
communicated.
|
||||
|
||||
Any Maintainer can merge the PR once it is **ready to merge**.
|
||||
|
||||
## Style Guide
|
||||
|
||||
* Make sure to run `make precommit` - this will find and fix the code
|
||||
formatting.
|
||||
* Check [opentelemetry-go Style Guide](https://github.com/open-telemetry/opentelemetry-go/blob/main/CONTRIBUTING.md#style-guide)
|
||||
|
||||
## Adding a new Contrib package
|
||||
|
||||
To add a new contrib package follow an existing one. An empty Sample instrumentation
|
||||
provides base structure with an example and a test. Each contrib package
|
||||
should be its own module. A contrib package may contain more than one go package.
|
||||
|
||||
### Folder Structure
|
||||
- instrumentation/\<instrumentation-package> (**Common**)
|
||||
- instrumentation/\<instrumentation-package>/trace (**specific to trace**)
|
||||
- instrumentation/\<instrumentation-package>/metrics (**specific to metrics**)
|
||||
|
||||
#### Example
|
||||
- instrumentation/gorm/trace
|
||||
- instrumentation/kafka/metrics
|
||||
|
||||
## Approvers and Maintainers
|
||||
|
||||
Approvers:
|
||||
|
||||
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
|
||||
- [Josh MacDonald](https://github.com/jmacd), LightStep
|
||||
- [Sam Xie](https://github.com/XSAM)
|
||||
- [David Ashpole](https://github.com/dashpole), Google
|
||||
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
|
||||
- [Aaron Clawson](https://github.com/MadVikingGod)
|
||||
|
||||
Maintainers:
|
||||
|
||||
- [Anthony Mirabella](https://github.com/Aneurysm9), Centene
|
||||
- [Tyler Yahn](https://github.com/MrAlias), New Relic
|
||||
|
||||
### Become an Approver or a Maintainer
|
||||
|
||||
See the [community membership document in OpenTelemetry community
|
||||
repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).
|
201
vendor/go.opentelemetry.io/contrib/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/contrib/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
203
vendor/go.opentelemetry.io/contrib/Makefile
generated
vendored
Normal file
203
vendor/go.opentelemetry.io/contrib/Makefile
generated
vendored
Normal file
@ -0,0 +1,203 @@
|
||||
TOOLS_MOD_DIR := ./tools
|
||||
|
||||
# All source code and documents. Used in spell check.
|
||||
ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
|
||||
# All directories with go.mod files related to opentelemetry library. Used for building, testing and linting.
|
||||
ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort))
|
||||
ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
|
||||
|
||||
# URLs to check if all contrib entries exist in the registry.
|
||||
REGISTRY_BASE_URL = https://raw.githubusercontent.com/open-telemetry/opentelemetry.io/main/content/en/registry
|
||||
CONTRIB_REPO_URL = https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main
|
||||
|
||||
# Mac OS Catalina 10.5.x doesn't support 386. Hence skip 386 test
|
||||
SKIP_386_TEST = false
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
SW_VERS := $(shell sw_vers -productVersion)
|
||||
ifeq ($(shell echo $(SW_VERS) | egrep '^(10.1[5-9]|1[1-9]|[2-9])'), $(SW_VERS))
|
||||
SKIP_386_TEST = true
|
||||
endif
|
||||
endif
|
||||
|
||||
|
||||
GOTEST_MIN = go test -v -timeout 30s
|
||||
GOTEST = $(GOTEST_MIN) -race
|
||||
GOTEST_WITH_COVERAGE = $(GOTEST) -coverprofile=coverage.out -covermode=atomic -coverpkg=./...
|
||||
|
||||
.DEFAULT_GOAL := precommit
|
||||
|
||||
.PHONY: precommit
|
||||
|
||||
TOOLS_DIR := $(abspath ./.tools)
|
||||
|
||||
$(TOOLS_DIR)/golangci-lint: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
|
||||
cd $(TOOLS_MOD_DIR) && \
|
||||
go build -o $(TOOLS_DIR)/golangci-lint github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
|
||||
$(TOOLS_DIR)/misspell: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
|
||||
cd $(TOOLS_MOD_DIR) && \
|
||||
go build -o $(TOOLS_DIR)/misspell github.com/client9/misspell/cmd/misspell
|
||||
|
||||
$(TOOLS_DIR)/stringer: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
|
||||
cd $(TOOLS_MOD_DIR) && \
|
||||
go build -o $(TOOLS_DIR)/stringer golang.org/x/tools/cmd/stringer
|
||||
|
||||
precommit: dependabot-check license-check generate build lint test
|
||||
|
||||
.PHONY: test-with-coverage
|
||||
test-with-coverage:
|
||||
set -e; \
|
||||
printf "" > coverage.txt; \
|
||||
for dir in $(ALL_COVERAGE_MOD_DIRS); do \
|
||||
echo "go test ./... + coverage in $${dir}"; \
|
||||
(cd "$${dir}" && \
|
||||
$(GOTEST_WITH_COVERAGE) ./... && \
|
||||
go tool cover -html=coverage.out -o coverage.html); \
|
||||
[ -f "$${dir}/coverage.out" ] && cat "$${dir}/coverage.out" >> coverage.txt; \
|
||||
done; \
|
||||
sed -i.bak -e '2,$$ { /^mode: /d; }' coverage.txt && rm coverage.txt.bak
|
||||
|
||||
.PHONY: ci
|
||||
ci: precommit check-clean-work-tree test-with-coverage test-386
|
||||
|
||||
.PHONY: test-gocql
|
||||
test-gocql:
|
||||
@if ./tools/should_build.sh gocql; then \
|
||||
set -e; \
|
||||
docker run --name cass-integ --rm -p 9042:9042 -d cassandra:3; \
|
||||
CMD=cassandra IMG_NAME=cass-integ ./tools/wait.sh; \
|
||||
(cd instrumentation/github.com/gocql/gocql/otelgocql && \
|
||||
$(GOTEST_WITH_COVERAGE) . && \
|
||||
go tool cover -html=coverage.out -o coverage.html); \
|
||||
docker stop cass-integ; \
|
||||
fi
|
||||
|
||||
.PHONY: test-mongo-driver
|
||||
test-mongo-driver:
|
||||
@if ./tools/should_build.sh mongo-driver; then \
|
||||
set -e; \
|
||||
docker run --name mongo-integ --rm -p 27017:27017 -d mongo; \
|
||||
CMD=mongo IMG_NAME=mongo-integ ./tools/wait.sh; \
|
||||
(cd instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo && \
|
||||
$(GOTEST_WITH_COVERAGE) . && \
|
||||
go tool cover -html=coverage.out -o coverage.html); \
|
||||
docker stop mongo-integ; \
|
||||
fi
|
||||
|
||||
.PHONY: test-gomemcache
|
||||
test-gomemcache:
|
||||
@if ./tools/should_build.sh gomemcache; then \
|
||||
set -e; \
|
||||
docker run --name gomemcache-integ --rm -p 11211:11211 -d memcached; \
|
||||
CMD=gomemcache IMG_NAME=gomemcache-integ ./tools/wait.sh; \
|
||||
(cd instrumentation/github.com/bradfitz/gomemcache/memcache/otelmemcache && \
|
||||
$(GOTEST_WITH_COVERAGE) . && \
|
||||
go tool cover -html=coverage.out -o coverage.html); \
|
||||
docker stop gomemcache-integ ; \
|
||||
fi
|
||||
|
||||
.PHONY: check-clean-work-tree
|
||||
check-clean-work-tree:
|
||||
@if ! git diff --quiet; then \
|
||||
echo; \
|
||||
echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
|
||||
echo; \
|
||||
git status; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
# TODO: Fix this on windows.
|
||||
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
|
||||
echo "compiling all packages in $${dir}"; \
|
||||
(cd "$${dir}" && \
|
||||
go build ./... && \
|
||||
go test -run xxxxxMatchNothingxxxxx ./... >/dev/null); \
|
||||
done
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
|
||||
echo "go test ./... + race in $${dir}"; \
|
||||
(cd "$${dir}" && \
|
||||
$(GOTEST) ./...); \
|
||||
done
|
||||
|
||||
.PHONY: test-386
|
||||
test-386:
|
||||
if [ $(SKIP_386_TEST) = true ] ; then \
|
||||
echo "skipping the test for GOARCH 386 as it is not supported on the current OS"; \
|
||||
else \
|
||||
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
|
||||
echo "go test ./... GOARCH 386 in $${dir}"; \
|
||||
(cd "$${dir}" && \
|
||||
GOARCH=386 $(GOTEST_MIN) ./...); \
|
||||
done; \
|
||||
fi
|
||||
|
||||
.PHONY: lint
|
||||
lint: $(TOOLS_DIR)/golangci-lint $(TOOLS_DIR)/misspell
|
||||
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
|
||||
echo "golangci-lint in $${dir}"; \
|
||||
(cd "$${dir}" && \
|
||||
$(TOOLS_DIR)/golangci-lint run --fix && \
|
||||
$(TOOLS_DIR)/golangci-lint run); \
|
||||
done
|
||||
$(TOOLS_DIR)/misspell -w $(ALL_DOCS)
|
||||
set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
|
||||
echo "go mod tidy in $${dir}"; \
|
||||
(cd "$${dir}" && \
|
||||
go mod tidy); \
|
||||
done
|
||||
|
||||
.PHONY: generate
|
||||
generate: $(TOOLS_DIR)/stringer
|
||||
PATH="$(TOOLS_DIR):$${PATH}" go generate ./...
|
||||
|
||||
.PHONY: license-check
|
||||
license-check:
|
||||
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path './vendor/*' ! -path './exporters/otlp/internal/opentelemetry-proto/*') ; do \
|
||||
awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
|
||||
done); \
|
||||
if [ -n "$${licRes}" ]; then \
|
||||
echo "license header checking failed:"; echo "$${licRes}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: registry-links-check
|
||||
registry-links-check:
|
||||
@checkRes=$$( \
|
||||
for f in $$( find ./instrumentation ./exporters ./detectors ! -path './instrumentation/net/*' -type f -name 'go.mod' -exec dirname {} \; | egrep -v '/example|/utils' | sort ) \
|
||||
./instrumentation/net/http; do \
|
||||
TYPE="instrumentation"; \
|
||||
if $$(echo "$$f" | grep -q "exporters"); then \
|
||||
TYPE="exporter"; \
|
||||
fi; \
|
||||
if $$(echo "$$f" | grep -q "detectors"); then \
|
||||
TYPE="detector"; \
|
||||
fi; \
|
||||
NAME=$$(echo "$$f" | sed -e 's/.*\///' -e 's/.*otel//'); \
|
||||
LINK=$(CONTRIB_REPO_URL)/$$(echo "$$f" | sed -e 's/..//' -e 's/\/otel.*$$//'); \
|
||||
if ! $$(curl -s $(REGISTRY_BASE_URL)/$${TYPE}-go-$${NAME}.md | grep -q "$${LINK}"); then \
|
||||
echo "$$f"; \
|
||||
fi \
|
||||
done; \
|
||||
); \
|
||||
if [ -n "$$checkRes" ]; then \
|
||||
echo "WARNING: registry link check failed for the following packages:"; echo "$${checkRes}"; \
|
||||
fi
|
||||
|
||||
.PHONY: dependabot-check
|
||||
dependabot-check:
|
||||
@result=$$( \
|
||||
for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.\/\?/\//' ); \
|
||||
do grep -q "$$f" .github/dependabot.yml \
|
||||
|| echo "$$f"; \
|
||||
done; \
|
||||
); \
|
||||
if [ -n "$$result" ]; then \
|
||||
echo "missing go.mod dependabot check:"; echo "$$result"; \
|
||||
exit 1; \
|
||||
fi
|
40
vendor/go.opentelemetry.io/contrib/README.md
generated
vendored
Normal file
40
vendor/go.opentelemetry.io/contrib/README.md
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
# OpenTelemetry-Go Contrib
|
||||
|
||||
[](https://github.com/open-telemetry/opentelemetry-go-contrib/actions?query=workflow%3Abuild_and_test+branch%3Amain)
|
||||
[](https://app.codecov.io/gh/open-telemetry/opentelemetry-go-contrib?branch=main)
|
||||
[](https://pkg.go.dev/go.opentelemetry.io/contrib)
|
||||
[](https://goreportcard.com/report/go.opentelemetry.io/contrib)
|
||||
[](https://cloud-native.slack.com/archives/C01NPAXACKT)
|
||||
|
||||
Collection of 3rd-party instrumentation and exporters for [OpenTelemetry-Go](https://github.com/open-telemetry/opentelemetry-go).
|
||||
|
||||
## Contents
|
||||
|
||||
- [Instrumentation](./instrumentation/): Packages providing OpenTelemetry instrumentation for 3rd-party libraries.
|
||||
- [Exporters](./exporters/): Packages providing OpenTelemetry exporters for 3rd-party telemetry systems.
|
||||
- [Propagators](./propagators/): Packages providing OpenTelemetry context propagators for 3rd-party propagation formats.
|
||||
- [Detectors](./detectors/): Packages providing OpenTelemetry resource detectors for 3rd-party cloud computing environments.
|
||||
|
||||
## Project Status
|
||||
|
||||
This project is currently in a pre-GA phase. Our progress towards a GA release
|
||||
candidate is tracked in [this project
|
||||
board](https://github.com/orgs/open-telemetry/projects/5).
|
||||
|
||||
### Compatibility
|
||||
|
||||
This project is tested on the following systems.
|
||||
|
||||
| OS | Go Version | Architecture |
|
||||
| ------- | ---------- | ------------ |
|
||||
| Ubuntu | 1.16 | amd64 |
|
||||
| Ubuntu | 1.15 | amd64 |
|
||||
| Ubuntu | 1.16 | 386 |
|
||||
| Ubuntu | 1.15 | 386 |
|
||||
|
||||
While this project should work for other systems, no compatibility guarantees
|
||||
are made for those systems currently.
|
||||
|
||||
## Contributing
|
||||
|
||||
For information on how to contribute, consult [the contributing guidelines](./CONTRIBUTING.md)
|
96
vendor/go.opentelemetry.io/contrib/RELEASING.md
generated
vendored
Normal file
96
vendor/go.opentelemetry.io/contrib/RELEASING.md
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
# Release Process
|
||||
|
||||
There are two types of release for the `go.opentelemetry.io/contrib` repo
|
||||
and submodules.
|
||||
|
||||
1. **Case 1** A release due to changes independent of the
|
||||
`go.opentelemetry.io/otel` module, e.g. perhaps a critical bug fix in
|
||||
one of the contrib modules.
|
||||
|
||||
2. **Case 2** A release due to a breaking API change in
|
||||
`go.opentelemetry.io/otel` which all modules in this repo
|
||||
depend on.
|
||||
|
||||
## Pre-Release
|
||||
|
||||
Update go.mod for submodules to depend on the upcoming new release of
|
||||
the module in this repo, `go.opentelemetry.io/contrib`. Decide on the
|
||||
next version of the semantic tag to apply to the contrib
|
||||
module based on whether you fall into Case 1 or Case 2.
|
||||
|
||||
### Case 1
|
||||
|
||||
If the changes are all internal to this repo, then the new tag will
|
||||
most often be a patch or minor version upgrade to the existing tag on
|
||||
this module. Let's call this `<new_contrib_tag>`.
|
||||
|
||||
### Case 2
|
||||
|
||||
If a new release is required due to breaking changes in
|
||||
`go.opentelemetry.io/otel`, then the new semantic tag for this repo
|
||||
should be bumped to match the `go.opentelemetry.io/otel` new tag.
|
||||
Let's call this `<new_otel_tag>`. The script checks that
|
||||
`go.opentelemetry.io/otel@v<new_otel_tag>` is a valid tag, so you need
|
||||
to wait until that tag has been pushed in the main repo.
|
||||
|
||||
In nearly all cases, `<new_contrib_tag>` should be the same as
|
||||
`<new_otel_tag>`.
|
||||
|
||||
1. Run `pre_release.sh` script to create a branch `pre_release_<new_contrib_tag>`.
|
||||
The script will also run `go mod tidy` and `make ci`.
|
||||
|
||||
* **Case 1** `./pre_release.sh -t <new_contrib_tag>`
|
||||
* **Case 2** `./pre_release.sh -o <new_otel_tag> [-t <new_contrib_tag>]`
|
||||
|
||||
2. If you used `-o <new_otel_tag>` to rewrite the modules to depend on
|
||||
a new version of `go.opentelemetry.io/otel`, there will likely be
|
||||
breaking changes that require fixes to the files in this
|
||||
`contrib` repo. Make the appropriate fixes to address any API
|
||||
breaks and run through the
|
||||
|
||||
```
|
||||
git commit -m "fixes due to API changes"
|
||||
make precommit
|
||||
```
|
||||
|
||||
cycle until everything passes
|
||||
|
||||
4. Push the changes to upstream.
|
||||
|
||||
```
|
||||
git diff main
|
||||
git push
|
||||
```
|
||||
|
||||
5. Create a PR on github and merge the PR once approved.
|
||||
|
||||
|
||||
### Tag
|
||||
Now create a `<new_contrib_tag>` on the commit hash of the changes made in pre-release step,
|
||||
|
||||
1. Run the tag.sh script.
|
||||
|
||||
```
|
||||
./tag.sh <new_contrib_tag> <commit-hash>
|
||||
```
|
||||
2. Push tags upstream. Make sure you push upstream for all the sub-module tags as well.
|
||||
|
||||
```
|
||||
git push upstream <new_contrib_tag>
|
||||
git push upstream <submodules-path/new_contrib_tag>
|
||||
...
|
||||
```
|
||||
|
||||
## Release
|
||||
Now create a release for the new `<new_contrib_tag>` on github.
|
||||
The release body should include all the release notes in the Changelog for this release.
|
||||
Additionally, the `tag.sh` script generates commit logs since last release which can be used to suppliment the release notes.
|
||||
|
||||
<!-- ## Verify Examples -->
|
||||
<!-- After releasing run following script to verify that examples build outside of the otel repo. -->
|
||||
<!-- The script copies examples into a different directory and builds them. -->
|
||||
|
||||
<!-- ``` -->
|
||||
<!-- ./verify_examples.sh -->
|
||||
<!-- ``` -->
|
||||
|
28
vendor/go.opentelemetry.io/contrib/contrib.go
generated
vendored
Normal file
28
vendor/go.opentelemetry.io/contrib/contrib.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package contrib contains common values used across all
|
||||
// instrumentation, exporter, and detector contributions.
|
||||
package contrib // import "go.opentelemetry.io/contrib"
|
||||
|
||||
// Version is the current release version of OpenTelemetry Contrib in use.
|
||||
func Version() string {
|
||||
return "0.21.0"
|
||||
// This string is updated by the pre_release.sh script during release
|
||||
}
|
||||
|
||||
// SemVersion is the semantic version to be supplied to tracer/meter creation.
|
||||
func SemVersion() string {
|
||||
return "semver:" + Version()
|
||||
}
|
20
vendor/go.opentelemetry.io/contrib/doc.go
generated
vendored
Normal file
20
vendor/go.opentelemetry.io/contrib/doc.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This package provides all of its functionality through its
|
||||
// submodules. The submodules in the exporters directory provide
|
||||
// implementations for trace and metric exporters for third-party
|
||||
// collectors, and submodules in the instrumentation directory provide the
|
||||
// instrumentation for the popular go libraries.
|
||||
package contrib
|
3
vendor/go.opentelemetry.io/contrib/go.mod
generated
vendored
Normal file
3
vendor/go.opentelemetry.io/contrib/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
module go.opentelemetry.io/contrib
|
||||
|
||||
go 1.15
|
0
vendor/go.opentelemetry.io/contrib/go.sum
generated
vendored
Normal file
0
vendor/go.opentelemetry.io/contrib/go.sum
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
16
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/go.mod
generated
vendored
Normal file
16
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/go.mod
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
module go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc
|
||||
|
||||
go 1.15
|
||||
|
||||
replace go.opentelemetry.io/contrib => ../../../../
|
||||
|
||||
require (
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/stretchr/testify v1.7.0
|
||||
go.opentelemetry.io/contrib v0.21.0
|
||||
go.opentelemetry.io/otel v1.0.0-RC1
|
||||
go.opentelemetry.io/otel/oteltest v1.0.0-RC1
|
||||
go.opentelemetry.io/otel/trace v1.0.0-RC1
|
||||
go.uber.org/goleak v1.1.10
|
||||
google.golang.org/grpc v1.38.0
|
||||
)
|
121
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/go.sum
generated
vendored
Normal file
121
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/go.sum
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
go.opentelemetry.io/otel v1.0.0-RC1 h1:4CeoX93DNTWt8awGK9JmNXzF9j7TyOu9upscEdtcdXc=
|
||||
go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I=
|
||||
go.opentelemetry.io/otel/oteltest v1.0.0-RC1 h1:G685iP3XiskCwk/z0eIabL55XUl2gk0cljhGk9sB0Yk=
|
||||
go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4=
|
||||
go.opentelemetry.io/otel/trace v1.0.0-RC1 h1:jrjqKJZEibFrDz+umEASeU3LvdVyWKlnTh7XEfwrT58=
|
||||
go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg=
|
||||
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 h1:Yq9t9jnGoR+dBuitxdo9l6Q7xh/zOyNnYUtDKaQ3x0E=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
130
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go
generated
vendored
Normal file
130
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otelgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/baggage"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
// instrumentationName is the name of this instrumentation package.
|
||||
instrumentationName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
// GRPCStatusCodeKey is convention for numeric status code of a gRPC request.
|
||||
GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
|
||||
)
|
||||
|
||||
// config is a group of options for this instrumentation.
|
||||
type config struct {
|
||||
Propagators propagation.TextMapPropagator
|
||||
TracerProvider trace.TracerProvider
|
||||
}
|
||||
|
||||
// Option applies an option value for a config.
|
||||
type Option interface {
|
||||
Apply(*config)
|
||||
}
|
||||
|
||||
// newConfig returns a config configured with all the passed Options.
|
||||
func newConfig(opts []Option) *config {
|
||||
c := &config{
|
||||
Propagators: otel.GetTextMapPropagator(),
|
||||
TracerProvider: otel.GetTracerProvider(),
|
||||
}
|
||||
for _, o := range opts {
|
||||
o.Apply(c)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type propagatorsOption struct{ p propagation.TextMapPropagator }
|
||||
|
||||
func (o propagatorsOption) Apply(c *config) {
|
||||
c.Propagators = o.p
|
||||
}
|
||||
|
||||
// WithPropagators returns an Option to use the Propagators when extracting
|
||||
// and injecting trace context from requests.
|
||||
func WithPropagators(p propagation.TextMapPropagator) Option {
|
||||
return propagatorsOption{p: p}
|
||||
}
|
||||
|
||||
type tracerProviderOption struct{ tp trace.TracerProvider }
|
||||
|
||||
func (o tracerProviderOption) Apply(c *config) {
|
||||
c.TracerProvider = o.tp
|
||||
}
|
||||
|
||||
// WithTracerProvider returns an Option to use the TracerProvider when
|
||||
// creating a Tracer.
|
||||
func WithTracerProvider(tp trace.TracerProvider) Option {
|
||||
return tracerProviderOption{tp: tp}
|
||||
}
|
||||
|
||||
type metadataSupplier struct {
|
||||
metadata *metadata.MD
|
||||
}
|
||||
|
||||
// assert that metadataSupplier implements the TextMapCarrier interface
|
||||
var _ propagation.TextMapCarrier = &metadataSupplier{}
|
||||
|
||||
func (s *metadataSupplier) Get(key string) string {
|
||||
values := s.metadata.Get(key)
|
||||
if len(values) == 0 {
|
||||
return ""
|
||||
}
|
||||
return values[0]
|
||||
}
|
||||
|
||||
func (s *metadataSupplier) Set(key string, value string) {
|
||||
s.metadata.Set(key, value)
|
||||
}
|
||||
|
||||
func (s *metadataSupplier) Keys() []string {
|
||||
out := make([]string, 0, len(*s.metadata))
|
||||
for key := range *s.metadata {
|
||||
out = append(out, key)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Inject injects correlation context and span context into the gRPC
|
||||
// metadata object. This function is meant to be used on outgoing
|
||||
// requests.
|
||||
func Inject(ctx context.Context, metadata *metadata.MD, opts ...Option) {
|
||||
c := newConfig(opts)
|
||||
c.Propagators.Inject(ctx, &metadataSupplier{
|
||||
metadata: metadata,
|
||||
})
|
||||
}
|
||||
|
||||
// Extract returns the correlation context and span context that
|
||||
// another service encoded in the gRPC metadata object with Inject.
|
||||
// This function is meant to be used on incoming requests.
|
||||
func Extract(ctx context.Context, metadata *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) {
|
||||
c := newConfig(opts)
|
||||
ctx = c.Propagators.Extract(ctx, &metadataSupplier{
|
||||
metadata: metadata,
|
||||
})
|
||||
|
||||
return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx)
|
||||
}
|
499
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
generated
vendored
Normal file
499
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
generated
vendored
Normal file
@ -0,0 +1,499 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otelgrpc
|
||||
|
||||
// gRPC tracing middleware
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto" // nolint:staticcheck
|
||||
|
||||
"google.golang.org/grpc"
|
||||
grpc_codes "google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/baggage"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
otelcontrib "go.opentelemetry.io/contrib"
|
||||
)
|
||||
|
||||
type messageType attribute.KeyValue
|
||||
|
||||
// Event adds an event of the messageType to the span associated with the
|
||||
// passed context with id and size (if message is a proto message).
|
||||
func (m messageType) Event(ctx context.Context, id int, message interface{}) {
|
||||
span := trace.SpanFromContext(ctx)
|
||||
if p, ok := message.(proto.Message); ok {
|
||||
span.AddEvent("message", trace.WithAttributes(
|
||||
attribute.KeyValue(m),
|
||||
RPCMessageIDKey.Int(id),
|
||||
RPCMessageUncompressedSizeKey.Int(proto.Size(p)),
|
||||
))
|
||||
} else {
|
||||
span.AddEvent("message", trace.WithAttributes(
|
||||
attribute.KeyValue(m),
|
||||
RPCMessageIDKey.Int(id),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
messageSent = messageType(RPCMessageTypeSent)
|
||||
messageReceived = messageType(RPCMessageTypeReceived)
|
||||
)
|
||||
|
||||
// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable
|
||||
// for use in a grpc.Dial call.
|
||||
func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
|
||||
return func(
|
||||
ctx context.Context,
|
||||
method string,
|
||||
req, reply interface{},
|
||||
cc *grpc.ClientConn,
|
||||
invoker grpc.UnaryInvoker,
|
||||
callOpts ...grpc.CallOption,
|
||||
) error {
|
||||
requestMetadata, _ := metadata.FromOutgoingContext(ctx)
|
||||
metadataCopy := requestMetadata.Copy()
|
||||
|
||||
tracer := newConfig(opts).TracerProvider.Tracer(
|
||||
instrumentationName,
|
||||
trace.WithInstrumentationVersion(otelcontrib.SemVersion()),
|
||||
)
|
||||
|
||||
name, attr := spanInfo(method, cc.Target())
|
||||
var span trace.Span
|
||||
ctx, span = tracer.Start(
|
||||
ctx,
|
||||
name,
|
||||
trace.WithSpanKind(trace.SpanKindClient),
|
||||
trace.WithAttributes(attr...),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
Inject(ctx, &metadataCopy, opts...)
|
||||
ctx = metadata.NewOutgoingContext(ctx, metadataCopy)
|
||||
|
||||
messageSent.Event(ctx, 1, req)
|
||||
|
||||
err := invoker(ctx, method, req, reply, cc, callOpts...)
|
||||
|
||||
messageReceived.Event(ctx, 1, reply)
|
||||
|
||||
if err != nil {
|
||||
s, _ := status.FromError(err)
|
||||
span.SetStatus(codes.Error, s.Message())
|
||||
span.SetAttributes(statusCodeAttr(s.Code()))
|
||||
} else {
|
||||
span.SetAttributes(statusCodeAttr(grpc_codes.OK))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
type streamEventType int
|
||||
|
||||
type streamEvent struct {
|
||||
Type streamEventType
|
||||
Err error
|
||||
}
|
||||
|
||||
const (
|
||||
closeEvent streamEventType = iota
|
||||
receiveEndEvent
|
||||
errorEvent
|
||||
)
|
||||
|
||||
// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and
|
||||
// SendMsg method call.
|
||||
type clientStream struct {
|
||||
grpc.ClientStream
|
||||
|
||||
desc *grpc.StreamDesc
|
||||
events chan streamEvent
|
||||
eventsDone chan struct{}
|
||||
finished chan error
|
||||
|
||||
receivedMessageID int
|
||||
sentMessageID int
|
||||
}
|
||||
|
||||
var _ = proto.Marshal
|
||||
|
||||
func (w *clientStream) RecvMsg(m interface{}) error {
|
||||
err := w.ClientStream.RecvMsg(m)
|
||||
|
||||
if err == nil && !w.desc.ServerStreams {
|
||||
w.sendStreamEvent(receiveEndEvent, nil)
|
||||
} else if err == io.EOF {
|
||||
w.sendStreamEvent(receiveEndEvent, nil)
|
||||
} else if err != nil {
|
||||
w.sendStreamEvent(errorEvent, err)
|
||||
} else {
|
||||
w.receivedMessageID++
|
||||
messageReceived.Event(w.Context(), w.receivedMessageID, m)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *clientStream) SendMsg(m interface{}) error {
|
||||
err := w.ClientStream.SendMsg(m)
|
||||
|
||||
w.sentMessageID++
|
||||
messageSent.Event(w.Context(), w.sentMessageID, m)
|
||||
|
||||
if err != nil {
|
||||
w.sendStreamEvent(errorEvent, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *clientStream) Header() (metadata.MD, error) {
|
||||
md, err := w.ClientStream.Header()
|
||||
|
||||
if err != nil {
|
||||
w.sendStreamEvent(errorEvent, err)
|
||||
}
|
||||
|
||||
return md, err
|
||||
}
|
||||
|
||||
func (w *clientStream) CloseSend() error {
|
||||
err := w.ClientStream.CloseSend()
|
||||
|
||||
if err != nil {
|
||||
w.sendStreamEvent(errorEvent, err)
|
||||
} else {
|
||||
w.sendStreamEvent(closeEvent, nil)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
const (
|
||||
clientClosedState byte = 1 << iota
|
||||
receiveEndedState
|
||||
)
|
||||
|
||||
func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc) *clientStream {
|
||||
events := make(chan streamEvent)
|
||||
eventsDone := make(chan struct{})
|
||||
finished := make(chan error)
|
||||
|
||||
go func() {
|
||||
defer close(eventsDone)
|
||||
|
||||
// Both streams have to be closed
|
||||
state := byte(0)
|
||||
|
||||
for event := range events {
|
||||
switch event.Type {
|
||||
case closeEvent:
|
||||
state |= clientClosedState
|
||||
case receiveEndEvent:
|
||||
state |= receiveEndedState
|
||||
case errorEvent:
|
||||
finished <- event.Err
|
||||
return
|
||||
}
|
||||
|
||||
if state == clientClosedState|receiveEndedState {
|
||||
finished <- nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return &clientStream{
|
||||
ClientStream: s,
|
||||
desc: desc,
|
||||
events: events,
|
||||
eventsDone: eventsDone,
|
||||
finished: finished,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) {
|
||||
select {
|
||||
case <-w.eventsDone:
|
||||
case w.events <- streamEvent{Type: eventType, Err: err}:
|
||||
}
|
||||
}
|
||||
|
||||
// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
|
||||
// for use in a grpc.Dial call.
|
||||
func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
|
||||
return func(
|
||||
ctx context.Context,
|
||||
desc *grpc.StreamDesc,
|
||||
cc *grpc.ClientConn,
|
||||
method string,
|
||||
streamer grpc.Streamer,
|
||||
callOpts ...grpc.CallOption,
|
||||
) (grpc.ClientStream, error) {
|
||||
requestMetadata, _ := metadata.FromOutgoingContext(ctx)
|
||||
metadataCopy := requestMetadata.Copy()
|
||||
|
||||
tracer := newConfig(opts).TracerProvider.Tracer(
|
||||
instrumentationName,
|
||||
trace.WithInstrumentationVersion(otelcontrib.SemVersion()),
|
||||
)
|
||||
|
||||
name, attr := spanInfo(method, cc.Target())
|
||||
var span trace.Span
|
||||
ctx, span = tracer.Start(
|
||||
ctx,
|
||||
name,
|
||||
trace.WithSpanKind(trace.SpanKindClient),
|
||||
trace.WithAttributes(attr...),
|
||||
)
|
||||
|
||||
Inject(ctx, &metadataCopy, opts...)
|
||||
ctx = metadata.NewOutgoingContext(ctx, metadataCopy)
|
||||
|
||||
s, err := streamer(ctx, desc, cc, method, callOpts...)
|
||||
if err != nil {
|
||||
grpcStatus, _ := status.FromError(err)
|
||||
span.SetStatus(codes.Error, grpcStatus.Message())
|
||||
span.SetAttributes(statusCodeAttr(grpcStatus.Code()))
|
||||
span.End()
|
||||
return s, err
|
||||
}
|
||||
stream := wrapClientStream(s, desc)
|
||||
|
||||
go func() {
|
||||
err := <-stream.finished
|
||||
|
||||
if err != nil {
|
||||
s, _ := status.FromError(err)
|
||||
span.SetStatus(codes.Error, s.Message())
|
||||
span.SetAttributes(statusCodeAttr(s.Code()))
|
||||
} else {
|
||||
span.SetAttributes(statusCodeAttr(grpc_codes.OK))
|
||||
}
|
||||
|
||||
span.End()
|
||||
}()
|
||||
|
||||
return stream, nil
|
||||
}
|
||||
}
|
||||
|
||||
// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable
|
||||
// for use in a grpc.NewServer call.
|
||||
func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
|
||||
return func(
|
||||
ctx context.Context,
|
||||
req interface{},
|
||||
info *grpc.UnaryServerInfo,
|
||||
handler grpc.UnaryHandler,
|
||||
) (interface{}, error) {
|
||||
requestMetadata, _ := metadata.FromIncomingContext(ctx)
|
||||
metadataCopy := requestMetadata.Copy()
|
||||
|
||||
bags, spanCtx := Extract(ctx, &metadataCopy, opts...)
|
||||
ctx = baggage.ContextWithBaggage(ctx, bags)
|
||||
|
||||
tracer := newConfig(opts).TracerProvider.Tracer(
|
||||
instrumentationName,
|
||||
trace.WithInstrumentationVersion(otelcontrib.SemVersion()),
|
||||
)
|
||||
|
||||
name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx))
|
||||
ctx, span := tracer.Start(
|
||||
trace.ContextWithRemoteSpanContext(ctx, spanCtx),
|
||||
name,
|
||||
trace.WithSpanKind(trace.SpanKindServer),
|
||||
trace.WithAttributes(attr...),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
messageReceived.Event(ctx, 1, req)
|
||||
|
||||
resp, err := handler(ctx, req)
|
||||
if err != nil {
|
||||
s, _ := status.FromError(err)
|
||||
span.SetStatus(codes.Error, s.Message())
|
||||
span.SetAttributes(statusCodeAttr(s.Code()))
|
||||
messageSent.Event(ctx, 1, s.Proto())
|
||||
} else {
|
||||
span.SetAttributes(statusCodeAttr(grpc_codes.OK))
|
||||
messageSent.Event(ctx, 1, resp)
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and
|
||||
// SendMsg method call.
|
||||
type serverStream struct {
|
||||
grpc.ServerStream
|
||||
ctx context.Context
|
||||
|
||||
receivedMessageID int
|
||||
sentMessageID int
|
||||
}
|
||||
|
||||
func (w *serverStream) Context() context.Context {
|
||||
return w.ctx
|
||||
}
|
||||
|
||||
func (w *serverStream) RecvMsg(m interface{}) error {
|
||||
err := w.ServerStream.RecvMsg(m)
|
||||
|
||||
if err == nil {
|
||||
w.receivedMessageID++
|
||||
messageReceived.Event(w.Context(), w.receivedMessageID, m)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *serverStream) SendMsg(m interface{}) error {
|
||||
err := w.ServerStream.SendMsg(m)
|
||||
|
||||
w.sentMessageID++
|
||||
messageSent.Event(w.Context(), w.sentMessageID, m)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func wrapServerStream(ctx context.Context, ss grpc.ServerStream) *serverStream {
|
||||
return &serverStream{
|
||||
ServerStream: ss,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable
|
||||
// for use in a grpc.NewServer call.
|
||||
func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
|
||||
return func(
|
||||
srv interface{},
|
||||
ss grpc.ServerStream,
|
||||
info *grpc.StreamServerInfo,
|
||||
handler grpc.StreamHandler,
|
||||
) error {
|
||||
ctx := ss.Context()
|
||||
|
||||
requestMetadata, _ := metadata.FromIncomingContext(ctx)
|
||||
metadataCopy := requestMetadata.Copy()
|
||||
|
||||
bags, spanCtx := Extract(ctx, &metadataCopy, opts...)
|
||||
ctx = baggage.ContextWithBaggage(ctx, bags)
|
||||
|
||||
tracer := newConfig(opts).TracerProvider.Tracer(
|
||||
instrumentationName,
|
||||
trace.WithInstrumentationVersion(otelcontrib.SemVersion()),
|
||||
)
|
||||
|
||||
name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx))
|
||||
ctx, span := tracer.Start(
|
||||
trace.ContextWithRemoteSpanContext(ctx, spanCtx),
|
||||
name,
|
||||
trace.WithSpanKind(trace.SpanKindServer),
|
||||
trace.WithAttributes(attr...),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
err := handler(srv, wrapServerStream(ctx, ss))
|
||||
|
||||
if err != nil {
|
||||
s, _ := status.FromError(err)
|
||||
span.SetStatus(codes.Error, s.Message())
|
||||
span.SetAttributes(statusCodeAttr(s.Code()))
|
||||
} else {
|
||||
span.SetAttributes(statusCodeAttr(grpc_codes.OK))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// spanInfo returns a span name and all appropriate attributes from the gRPC
|
||||
// method and peer address.
|
||||
func spanInfo(fullMethod, peerAddress string) (string, []attribute.KeyValue) {
|
||||
attrs := []attribute.KeyValue{RPCSystemGRPC}
|
||||
name, mAttrs := parseFullMethod(fullMethod)
|
||||
attrs = append(attrs, mAttrs...)
|
||||
attrs = append(attrs, peerAttr(peerAddress)...)
|
||||
return name, attrs
|
||||
}
|
||||
|
||||
// peerAttr returns attributes about the peer address.
|
||||
func peerAttr(addr string) []attribute.KeyValue {
|
||||
host, port, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return []attribute.KeyValue(nil)
|
||||
}
|
||||
|
||||
if host == "" {
|
||||
host = "127.0.0.1"
|
||||
}
|
||||
|
||||
return []attribute.KeyValue{
|
||||
semconv.NetPeerIPKey.String(host),
|
||||
semconv.NetPeerPortKey.String(port),
|
||||
}
|
||||
}
|
||||
|
||||
// peerFromCtx returns a peer address from a context, if one exists.
|
||||
func peerFromCtx(ctx context.Context) string {
|
||||
p, ok := peer.FromContext(ctx)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return p.Addr.String()
|
||||
}
|
||||
|
||||
// parseFullMethod returns a span name following the OpenTelemetry semantic
|
||||
// conventions as well as all applicable span attribute.KeyValue attributes based
|
||||
// on a gRPC's FullMethod.
|
||||
func parseFullMethod(fullMethod string) (string, []attribute.KeyValue) {
|
||||
name := strings.TrimLeft(fullMethod, "/")
|
||||
parts := strings.SplitN(name, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
// Invalid format, does not follow `/package.service/method`.
|
||||
return name, []attribute.KeyValue(nil)
|
||||
}
|
||||
|
||||
var attrs []attribute.KeyValue
|
||||
if service := parts[0]; service != "" {
|
||||
attrs = append(attrs, semconv.RPCServiceKey.String(service))
|
||||
}
|
||||
if method := parts[1]; method != "" {
|
||||
attrs = append(attrs, semconv.RPCMethodKey.String(method))
|
||||
}
|
||||
return name, attrs
|
||||
}
|
||||
|
||||
// statusCodeAttr returns status code attribute based on given gRPC code
|
||||
func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue {
|
||||
return GRPCStatusCodeKey.Int64(int64(c))
|
||||
}
|
52
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
generated
vendored
Normal file
52
vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otelgrpc
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
)
|
||||
|
||||
// Semantic conventions for attribute keys for gRPC.
|
||||
const (
|
||||
// Name of message transmitted or received.
|
||||
RPCNameKey = attribute.Key("name")
|
||||
|
||||
// Type of message transmitted or received.
|
||||
RPCMessageTypeKey = attribute.Key("message.type")
|
||||
|
||||
// Identifier of message transmitted or received.
|
||||
RPCMessageIDKey = attribute.Key("message.id")
|
||||
|
||||
// The compressed size of the message transmitted or received in bytes.
|
||||
RPCMessageCompressedSizeKey = attribute.Key("message.compressed_size")
|
||||
|
||||
// The uncompressed size of the message transmitted or received in
|
||||
// bytes.
|
||||
RPCMessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
|
||||
)
|
||||
|
||||
// Semantic conventions for common RPC attributes.
|
||||
var (
|
||||
// Semantic convention for gRPC as the remoting system.
|
||||
RPCSystemGRPC = semconv.RPCSystemKey.String("grpc")
|
||||
|
||||
// Semantic convention for a message named message.
|
||||
RPCNameMessage = RPCNameKey.String("message")
|
||||
|
||||
// Semantic conventions for RPC message types.
|
||||
RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
|
||||
RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
|
||||
)
|
158
vendor/go.opentelemetry.io/contrib/pre_release.sh
generated
vendored
Normal file
158
vendor/go.opentelemetry.io/contrib/pre_release.sh
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#
|
||||
# This script is used for
|
||||
# a) creating a new tagged release of go.opentelemetry.io/contrib
|
||||
# b) bumping the referenced version of go.opentelemetry.io/otel
|
||||
#
|
||||
# The options can be used together or individually.
|
||||
#
|
||||
set -e
|
||||
|
||||
declare CONTRIB_TAG
|
||||
declare OTEL_TAG
|
||||
|
||||
help() {
|
||||
printf "\n"
|
||||
printf "Usage: %s [-o otel_tag] [-t tag]\n" "$0"
|
||||
printf "\t-o Otel release tag. Update all go.mod to reference go.opentelemetry.io/otel <otel_tag>.\n"
|
||||
printf "\t-t New Contrib unreleased tag. Update all go.mod files with this tag.\n"
|
||||
exit 1 # Exit script after printing help
|
||||
}
|
||||
|
||||
while getopts "t:o:" opt
|
||||
do
|
||||
case "$opt" in
|
||||
t ) CONTRIB_TAG="$OPTARG" ;;
|
||||
o ) OTEL_TAG="$OPTARG" ;;
|
||||
? ) help ;; # Print help
|
||||
esac
|
||||
done
|
||||
|
||||
declare -r SEMVER_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$"
|
||||
|
||||
validate_tag() {
|
||||
local tag_=$1
|
||||
if [[ "${tag_}" =~ ${SEMVER_REGEX} ]]; then
|
||||
printf "%s is valid semver tag.\n" "${tag_}"
|
||||
else
|
||||
printf "%s is not a valid semver tag.\n" "${tag_}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Print help in case parameters are empty
|
||||
if [[ -z "$CONTRIB_TAG" && -z "$OTEL_TAG" ]]
|
||||
then
|
||||
printf "At least one of '-o' or '-t' must be specified.\n"
|
||||
help
|
||||
fi
|
||||
|
||||
|
||||
## Validate tags first
|
||||
if [ -n "${OTEL_TAG}" ]; then
|
||||
validate_tag "${OTEL_TAG}" || exit $?
|
||||
|
||||
# check that OTEL_TAG is a currently released tag for go.opentelemetry.io/otel
|
||||
TMPDIR=$(mktemp -d "/tmp/otel-contrib.XXXXXX") || exit 1
|
||||
trap "rm -fr ${TMPDIR}" EXIT
|
||||
(cd "${TMPDIR}" && go mod init tagtest)
|
||||
# requires go 1.14 for support of '-modfile'
|
||||
if ! go get -modfile="${TMPDIR}/go.mod" -d -v "go.opentelemetry.io/otel@${OTEL_TAG}"; then
|
||||
printf "go.opentelemetry.io/otel %s does not exist. Please supply a valid tag\n" "${OTEL_TAG}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
if [ -n "${CONTRIB_TAG}" ]; then
|
||||
validate_tag "${CONTRIB_TAG}" || exit $?
|
||||
TAG_FOUND=$(git tag --list "${CONTRIB_TAG}")
|
||||
if [[ ${TAG_FOUND} = "${CONTRIB_TAG}" ]] ; then
|
||||
printf "Tag %s already exists in this repo\n" "${CONTRIB_TAG}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
CONTRIB_TAG=${OTEL_TAG} # if contrib_tag not specified, but OTEL_TAG is, then set it to OTEL_TAG
|
||||
fi
|
||||
|
||||
# Get version for contrib.go
|
||||
OTEL_CONTRIB_VERSION=$(echo "${CONTRIB_TAG}" | egrep -o "${SEMVER_REGEX}")
|
||||
# Strip leading v
|
||||
OTEL_CONTRIB_VERSION="${OTEL_CONTRIB_VERSION#v}"
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if ! git diff --quiet; then \
|
||||
printf "Working tree is not clean, can't proceed\n"
|
||||
git status
|
||||
git diff
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Update contrib.go version definition
|
||||
cp contrib.go contrib.go.bak
|
||||
sed "s/\(return \"\)[0-9]*\.[0-9]*\.[0-9]*\"/\1${OTEL_CONTRIB_VERSION}\"/" ./contrib.go.bak > ./contrib.go
|
||||
rm -f ./contrib.go.bak
|
||||
|
||||
declare -r BRANCH_NAME=pre_release_${CONTRIB_TAG}
|
||||
|
||||
patch_gomods() {
|
||||
local pkg_=$1
|
||||
local tag_=$2
|
||||
# now do the same for all the directories underneath
|
||||
PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep -v 'tools' | sed 's|^\.\/||' | sort)
|
||||
# quote any '.' characters in the pkg name
|
||||
local quoted_pkg_=${pkg_//./\\.}
|
||||
for dir in $PACKAGE_DIRS; do
|
||||
cp "${dir}/go.mod" "${dir}/go.mod.bak"
|
||||
sed "s|${quoted_pkg_}\([^ ]*\) v[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*[^0-9]*.*$|${pkg_}\1 ${tag_}|" "${dir}/go.mod.bak" >"${dir}/go.mod"
|
||||
rm -f "${dir}/go.mod.bak"
|
||||
done
|
||||
}
|
||||
|
||||
# branch off from existing main
|
||||
git checkout -b "${BRANCH_NAME}" main
|
||||
|
||||
# Update go.mods
|
||||
if [ -n "${OTEL_TAG}" ]; then
|
||||
# first update the top most module
|
||||
go get "go.opentelemetry.io/otel@${OTEL_TAG}"
|
||||
patch_gomods go.opentelemetry.io/otel "${OTEL_TAG}"
|
||||
fi
|
||||
|
||||
if [ -n "${CONTRIB_TAG}" ]; then
|
||||
patch_gomods go.opentelemetry.io/contrib "${CONTRIB_TAG}"
|
||||
fi
|
||||
|
||||
git diff
|
||||
# Run lint to update go.sum
|
||||
make lint
|
||||
|
||||
# Add changes and commit.
|
||||
git add .
|
||||
make ci
|
||||
# Check whether registry links are up to date
|
||||
make registry-links-check
|
||||
|
||||
declare COMMIT_MSG=""
|
||||
if [ -n "${OTEL_TAG}" ]; then
|
||||
COMMIT_MSG+="Bumping otel version to ${OTEL_TAG}"
|
||||
fi
|
||||
COMMIT_MSG+=". Prepare for releasing ${CONTRIB_TAG}"
|
||||
git commit -m "${COMMIT_MSG}"
|
||||
|
||||
printf "Now run following to verify the changes.\ngit diff main\n"
|
||||
printf "\nThen push the changes to upstream\n"
|
178
vendor/go.opentelemetry.io/contrib/tag.sh
generated
vendored
Normal file
178
vendor/go.opentelemetry.io/contrib/tag.sh
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
readonly PROGNAME=$(basename "$0")
|
||||
readonly PROGDIR=$(readlink -m "$(dirname "$0")")
|
||||
|
||||
readonly EXCLUDE_PACKAGES="tools"
|
||||
readonly SEMVER_REGEX="v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?"
|
||||
|
||||
usage() {
|
||||
cat <<- EOF
|
||||
Usage: $PROGNAME [OPTIONS] SEMVER_TAG COMMIT_HASH
|
||||
|
||||
Creates git tag for all Go packages in project.
|
||||
|
||||
OPTIONS:
|
||||
-h --help Show this help.
|
||||
|
||||
ARGUMENTS:
|
||||
SEMVER_TAG Semantic version to tag with.
|
||||
COMMIT_HASH Git commit hash to tag.
|
||||
EOF
|
||||
}
|
||||
|
||||
cmdline() {
|
||||
local arg commit
|
||||
|
||||
for arg
|
||||
do
|
||||
local delim=""
|
||||
case "$arg" in
|
||||
# Translate long form options to short form.
|
||||
--help) args="${args}-h ";;
|
||||
# Pass through for everything else.
|
||||
*) [[ "${arg:0:1}" == "-" ]] || delim="\""
|
||||
args="${args}${delim}${arg}${delim} ";;
|
||||
esac
|
||||
done
|
||||
|
||||
# Reset and process short form options.
|
||||
eval set -- "$args"
|
||||
|
||||
while getopts "h" OPTION
|
||||
do
|
||||
case $OPTION in
|
||||
h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "unknown option: $OPTION"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Positional arguments.
|
||||
shift $((OPTIND-1))
|
||||
readonly TAG="$1"
|
||||
if [ -z "$TAG" ]
|
||||
then
|
||||
echo "missing SEMVER_TAG"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! "$TAG" =~ $SEMVER_REGEX ]]
|
||||
then
|
||||
printf "invalid semantic version: %s\n" "$TAG"
|
||||
exit 2
|
||||
fi
|
||||
if [[ "$( git tag --list "$TAG" )" ]]
|
||||
then
|
||||
printf "tag already exists: %s\n" "$TAG"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
shift
|
||||
commit="$1"
|
||||
if [ -z "$commit" ]
|
||||
then
|
||||
echo "missing COMMIT_HASH"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
# Verify rev is for a commit and unify hashes into a complete SHA1.
|
||||
readonly SHA="$( git rev-parse --quiet --verify "${commit}^{commit}" )"
|
||||
if [ -z "$SHA" ]
|
||||
then
|
||||
printf "invalid commit hash: %s\n" "$commit"
|
||||
exit 2
|
||||
fi
|
||||
if [ "$( git merge-base "$SHA" HEAD )" != "$SHA" ]
|
||||
then
|
||||
printf "commit '%s' not found on this branch\n" "$commit"
|
||||
exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
package_dirs() {
|
||||
# Return a list of package directories in the form:
|
||||
#
|
||||
# package/directory/a
|
||||
# package/directory/b
|
||||
# deeper/package/directory/a
|
||||
# ...
|
||||
#
|
||||
# Making sure to exclude any packages in the EXCLUDE_PACKAGES regexp.
|
||||
find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; \
|
||||
| grep -E -v "$EXCLUDE_PACKAGES" \
|
||||
| sed 's/^\.\///' \
|
||||
| sort
|
||||
}
|
||||
|
||||
git_tag() {
|
||||
local tag="$1"
|
||||
local commit="$2"
|
||||
|
||||
git tag -a "$tag" -s -m "Version $tag" "$commit"
|
||||
}
|
||||
|
||||
previous_version() {
|
||||
local current="$1"
|
||||
|
||||
# Requires git > 2.0
|
||||
git tag -l --sort=v:refname \
|
||||
| grep -E "^${SEMVER_REGEX}$" \
|
||||
| grep -v "$current" \
|
||||
| tail -1
|
||||
}
|
||||
|
||||
print_changes() {
|
||||
local tag="$1"
|
||||
local previous
|
||||
|
||||
previous="$( previous_version "$tag" )"
|
||||
if [ -n "$previous" ]
|
||||
then
|
||||
printf "\nRaw changes made between %s and %s\n" "$previous" "$tag"
|
||||
printf "======================================\n"
|
||||
git --no-pager log --pretty=oneline "${previous}..$tag"
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
local dir
|
||||
|
||||
cmdline "$@"
|
||||
|
||||
cd "$PROGDIR" || exit 3
|
||||
|
||||
# Create tag for root package.
|
||||
git_tag "$TAG" "$SHA"
|
||||
printf "created tag: %s\n" "$TAG"
|
||||
|
||||
# Create tag for all sub-packages.
|
||||
for dir in $( package_dirs )
|
||||
do
|
||||
git_tag "${dir}/$TAG" "$SHA"
|
||||
printf "created tag: %s\n" "${dir}/$TAG"
|
||||
done
|
||||
|
||||
print_changes "$TAG"
|
||||
}
|
||||
main "$@"
|
3
vendor/go.opentelemetry.io/otel/.gitattributes
generated
vendored
Normal file
3
vendor/go.opentelemetry.io/otel/.gitattributes
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
* text=auto eol=lf
|
||||
*.{cmd,[cC][mM][dD]} text eol=crlf
|
||||
*.{bat,[bB][aA][tT]} text eol=crlf
|
20
vendor/go.opentelemetry.io/otel/.gitignore
generated
vendored
Normal file
20
vendor/go.opentelemetry.io/otel/.gitignore
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
.tools/
|
||||
.idea/
|
||||
.vscode/
|
||||
*.iml
|
||||
*.so
|
||||
coverage.*
|
||||
|
||||
gen/
|
||||
|
||||
/example/jaeger/jaeger
|
||||
/example/namedtracer/namedtracer
|
||||
/example/opencensus/opencensus
|
||||
/example/passthrough/passthrough
|
||||
/example/prometheus/prometheus
|
||||
/example/prom-collector/prom-collector
|
||||
/example/zipkin/zipkin
|
||||
/example/otel-collector/otel-collector
|
3
vendor/go.opentelemetry.io/otel/.gitmodules
generated
vendored
Normal file
3
vendor/go.opentelemetry.io/otel/.gitmodules
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
[submodule "opentelemetry-proto"]
|
||||
path = exporters/otlp/internal/opentelemetry-proto
|
||||
url = https://github.com/open-telemetry/opentelemetry-proto
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user