Bump OpenTelemetry dependencies

Signed-off-by: Luca Comellini <luca.com@gmail.com>
This commit is contained in:
Luca Comellini 2022-05-11 17:54:56 -07:00
parent e85b5a0b81
commit c1140aef65
No known key found for this signature in database
GPG Key ID: 4850B36E838507C6
153 changed files with 5457 additions and 4134 deletions

28
go.mod
View File

@ -28,7 +28,7 @@ require (
github.com/docker/go-units v0.4.0 github.com/docker/go-units v0.4.0
github.com/emicklei/go-restful/v3 v3.7.3 github.com/emicklei/go-restful/v3 v3.7.3
github.com/fsnotify/fsnotify v1.5.1 github.com/fsnotify/fsnotify v1.5.1
github.com/google/go-cmp v0.5.6 github.com/google/go-cmp v0.5.8
github.com/google/uuid v1.2.0 github.com/google/uuid v1.2.0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
@ -49,18 +49,18 @@ require (
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.12.1 github.com/prometheus/client_golang v1.12.1
github.com/sirupsen/logrus v1.8.1 github.com/sirupsen/logrus v1.8.1
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.1
github.com/tchap/go-patricia/v2 v2.3.1 github.com/tchap/go-patricia/v2 v2.3.1
github.com/urfave/cli v1.22.2 github.com/urfave/cli v1.22.2
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
go.etcd.io/bbolt v1.3.6 go.etcd.io/bbolt v1.3.6
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.32.0
go.opentelemetry.io/otel v1.3.0 go.opentelemetry.io/otel v1.7.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.7.0
go.opentelemetry.io/otel/sdk v1.3.0 go.opentelemetry.io/otel/sdk v1.7.0
go.opentelemetry.io/otel/trace v1.3.0 go.opentelemetry.io/otel/trace v1.7.0
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46
@ -80,21 +80,21 @@ require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver v3.5.1+incompatible // indirect
github.com/blang/semver/v4 v4.0.0 // indirect github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cilium/ebpf v0.7.0 // indirect github.com/cilium/ebpf v0.7.0 // indirect
github.com/containernetworking/cni v1.1.0 // indirect github.com/containernetworking/cni v1.1.0 // indirect
github.com/containers/ocicrypt v1.1.3 // indirect github.com/containers/ocicrypt v1.1.3 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/go-logr/logr v1.2.2 // indirect github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
@ -118,8 +118,8 @@ require (
github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
go.opencensus.io v0.23.0 // indirect go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0 // indirect
go.opentelemetry.io/proto/otlp v0.11.0 // indirect go.opentelemetry.io/proto/otlp v0.16.0 // indirect
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect

47
go.sum
View File

@ -135,8 +135,9 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
@ -402,8 +403,9 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
@ -444,6 +446,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -494,8 +498,10 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -544,8 +550,9 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -885,8 +892,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@ -960,34 +968,43 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0 h1:Ky1MObd188aGbgb5OgNnwGuEEwI9MVIcc7rBW6zk5Ak=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.32.0 h1:WenoaOMNP71oq3KkMZ/jnxI9xU/JSCLw8yZILSI2lfU=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.32.0/go.mod h1:J0dBVrt7dPS/lKJyQoW0xzQiUr4r2Ik1VwPjAUWnofI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y=
go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 h1:R/OBkMoGgfy2fLhs2QhkCI1w4HLEQX92GCcJB6SSdNk=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdme7xSp2pIxThWopw8+QP51Yk= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0 h1:7Yxsak1q4XrJ5y7XBnNwqWx9amMZvoidCctv62XOQ6Y=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 h1:VQbUHoJqytHHSJ1OZodPH9tvZZSVzUHjPHpkO85sT6k= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0 h1:cMDtmgJ5FpRvqx9x2Aq+Mm0O6K/zcUkH73SFz20TuBw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 h1:Ydage/P0fRrSPpZeCVxzjqGcI6iVmG2xb43+IR8cjqM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0 h1:MFAyzUPrTwLOwCi+cltN0ZVyy4phU41lwH+lyMyQTS4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.7.0 h1:pLP0MH4MAqeTEV0g/4flxw9O8Is48uAIauAnjznbW50=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.7.0/go.mod h1:aFXT9Ng2seM9eizF+LfKiyPBGy8xIZKwhusC1gIu3hA=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
go.opentelemetry.io/otel/sdk v1.3.0 h1:3278edCoH89MEJ0Ky8WQXVmDQv3FX4ZJ3Pp+9fJreAI=
go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0=
go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY=
go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU=
go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
go.opentelemetry.io/proto/otlp v0.16.0 h1:WHzDWdXUvbc5bG2ObdrGfaNpQz7ft7QN9HHmJlbiB1E=
go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
@ -1340,7 +1357,6 @@ golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
@ -1421,6 +1437,7 @@ google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 h1:G1IeWbjrqEq9ChWxEuRPJu6laA67+XgTFHVSAvepr38= google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 h1:G1IeWbjrqEq9ChWxEuRPJu6laA67+XgTFHVSAvepr38=

View File

@ -15,7 +15,7 @@ require (
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/sirupsen/logrus v1.8.1 github.com/sirupsen/logrus v1.8.1
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.1
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
) )
@ -34,7 +34,7 @@ require (
github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.6 // indirect github.com/google/go-cmp v0.5.8 // indirect
github.com/google/uuid v1.2.0 // indirect github.com/google/uuid v1.2.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect

View File

@ -96,7 +96,7 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
@ -248,9 +248,8 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
@ -279,6 +278,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -327,8 +327,10 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -369,6 +371,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -630,8 +633,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@ -684,25 +688,25 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.32.0/go.mod h1:J0dBVrt7dPS/lKJyQoW0xzQiUr4r2Ik1VwPjAUWnofI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.7.0/go.mod h1:aFXT9Ng2seM9eizF+LfKiyPBGy8xIZKwhusC1gIu3hA=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
@ -1018,7 +1022,6 @@ golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@ -1095,6 +1098,7 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 h1:G1IeWbjrqEq9ChWxEuRPJu6laA67+XgTFHVSAvepr38= google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 h1:G1IeWbjrqEq9ChWxEuRPJu6laA67+XgTFHVSAvepr38=
google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=

View File

@ -147,6 +147,9 @@ func (b *ExponentialBackOff) incrementCurrentInterval() {
// Returns a random value from the following interval: // Returns a random value from the following interval:
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. // [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
if randomizationFactor == 0 {
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
}
var delta = randomizationFactor * float64(currentInterval) var delta = randomizationFactor * float64(currentInterval)
var minInterval = float64(currentInterval) - delta var minInterval = float64(currentInterval) - delta
var maxInterval = float64(currentInterval) + delta var maxInterval = float64(currentInterval) + delta

View File

@ -105,14 +105,18 @@ with higher verbosity means more (and less important) logs will be generated.
There are implementations for the following logging libraries: There are implementations for the following logging libraries:
- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) - **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) - **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) - **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
## FAQ ## FAQ

View File

@ -351,15 +351,15 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
if v, ok := value.(logr.Marshaler); ok { if v, ok := value.(logr.Marshaler); ok {
// Replace the value with what the type wants to get logged. // Replace the value with what the type wants to get logged.
// That then gets handled below via reflection. // That then gets handled below via reflection.
value = v.MarshalLog() value = invokeMarshaler(v)
} }
// Handle types that want to format themselves. // Handle types that want to format themselves.
switch v := value.(type) { switch v := value.(type) {
case fmt.Stringer: case fmt.Stringer:
value = v.String() value = invokeStringer(v)
case error: case error:
value = v.Error() value = invokeError(v)
} }
// Handling the most common types without reflect is a small perf win. // Handling the most common types without reflect is a small perf win.
@ -408,8 +408,9 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
if i > 0 { if i > 0 {
buf.WriteByte(',') buf.WriteByte(',')
} }
k, _ := v[i].(string) // sanitize() above means no need to check success
// arbitrary keys might need escaping // arbitrary keys might need escaping
buf.WriteString(prettyString(v[i].(string))) buf.WriteString(prettyString(k))
buf.WriteByte(':') buf.WriteByte(':')
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
} }
@ -596,6 +597,33 @@ func isEmpty(v reflect.Value) bool {
return false return false
} }
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r)
}
}()
return m.MarshalLog()
}
func invokeStringer(s fmt.Stringer) (ret string) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r)
}
}()
return s.String()
}
func invokeError(e error) (ret string) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r)
}
}()
return e.Error()
}
// Caller represents the original call site for a log line, after considering // Caller represents the original call site for a log line, after considering
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and // logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
// Line fields will always be provided, while the Func field is optional. // Line fields will always be provided, while the Func field is optional.

View File

@ -115,6 +115,15 @@ limitations under the License.
// may be any Go value, but how the value is formatted is determined by the // may be any Go value, but how the value is formatted is determined by the
// LogSink implementation. // LogSink implementation.
// //
// Logger instances are meant to be passed around by value. Code that receives
// such a value can call its methods without having to check whether the
// instance is ready for use.
//
// Calling methods with the null logger (Logger{}) as instance will crash
// because it has no LogSink. Therefore this null logger should never be passed
// around. For cases where passing a logger is optional, a pointer to Logger
// should be used.
//
// Key Naming Conventions // Key Naming Conventions
// //
// Keys are not strictly required to conform to any specification or regex, but // Keys are not strictly required to conform to any specification or regex, but

View File

@ -1,180 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package descriptor provides functions for obtaining the protocol buffer
// descriptors of generated Go types.
//
// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package
// for how to obtain an EnumDescriptor or MessageDescriptor in order to
// programatically interact with the protobuf type system.
package descriptor
import (
"bytes"
"compress/gzip"
"io/ioutil"
"sync"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoimpl"
descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
)
// Message is proto.Message with a method to return its descriptor.
//
// Deprecated: The Descriptor method may not be generated by future
// versions of protoc-gen-go, meaning that this interface may not
// be implemented by many concrete message types.
type Message interface {
proto.Message
Descriptor() ([]byte, []int)
}
// ForMessage returns the file descriptor proto containing
// the message and the message descriptor proto for the message itself.
// The returned proto messages must not be mutated.
//
// Deprecated: Not all concrete message types satisfy the Message interface.
// Use MessageDescriptorProto instead. If possible, the calling code should
// be rewritten to use protobuf reflection instead.
// See package "google.golang.org/protobuf/reflect/protoreflect" for details.
func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
return MessageDescriptorProto(m)
}
type rawDesc struct {
fileDesc []byte
indexes []int
}
var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc
func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) {
// Fast-path: check whether raw descriptors are already cached.
origDesc := d
if v, ok := rawDescCache.Load(origDesc); ok {
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
}
// Slow-path: derive the raw descriptor from the v2 descriptor.
// Start with the leaf (a given enum or message declaration) and
// ascend upwards until we hit the parent file descriptor.
var idxs []int
for {
idxs = append(idxs, d.Index())
d = d.Parent()
if d == nil {
// TODO: We could construct a FileDescriptor stub for standalone
// descriptors to satisfy the API.
return nil, nil
}
if _, ok := d.(protoreflect.FileDescriptor); ok {
break
}
}
// Obtain the raw file descriptor.
fd := d.(protoreflect.FileDescriptor)
b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd))
file := protoimpl.X.CompressGZIP(b)
// Reverse the indexes, since we populated it in reverse.
for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 {
idxs[i], idxs[j] = idxs[j], idxs[i]
}
if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok {
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
}
return file, idxs
}
// EnumRawDescriptor returns the GZIP'd raw file descriptor representing
// the enum and the index path to reach the enum declaration.
// The returned slices must not be mutated.
func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) {
if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok {
return ev.EnumDescriptor()
}
ed := protoimpl.X.EnumTypeOf(e)
return deriveRawDescriptor(ed.Descriptor())
}
// MessageRawDescriptor returns the GZIP'd raw file descriptor representing
// the message and the index path to reach the message declaration.
// The returned slices must not be mutated.
func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) {
if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok {
return mv.Descriptor()
}
md := protoimpl.X.MessageTypeOf(m)
return deriveRawDescriptor(md.Descriptor())
}
var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto
func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto {
// Fast-path: check whether descriptor protos are already cached.
if v, ok := fileDescCache.Load(&rawDesc[0]); ok {
return v.(*descriptorpb.FileDescriptorProto)
}
// Slow-path: derive the descriptor proto from the GZIP'd message.
zr, err := gzip.NewReader(bytes.NewReader(rawDesc))
if err != nil {
panic(err)
}
b, err := ioutil.ReadAll(zr)
if err != nil {
panic(err)
}
fd := new(descriptorpb.FileDescriptorProto)
if err := proto.Unmarshal(b, fd); err != nil {
panic(err)
}
if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok {
return v.(*descriptorpb.FileDescriptorProto)
}
return fd
}
// EnumDescriptorProto returns the file descriptor proto representing
// the enum and the enum descriptor proto for the enum itself.
// The returned proto messages must not be mutated.
func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) {
rawDesc, idxs := EnumRawDescriptor(e)
if rawDesc == nil || idxs == nil {
return nil, nil
}
fd := deriveFileDescriptor(rawDesc)
if len(idxs) == 1 {
return fd, fd.EnumType[idxs[0]]
}
md := fd.MessageType[idxs[0]]
for _, i := range idxs[1 : len(idxs)-1] {
md = md.NestedType[i]
}
ed := md.EnumType[idxs[len(idxs)-1]]
return fd, ed
}
// MessageDescriptorProto returns the file descriptor proto representing
// the message and the message descriptor proto for the message itself.
// The returned proto messages must not be mutated.
func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
rawDesc, idxs := MessageRawDescriptor(m)
if rawDesc == nil || idxs == nil {
return nil, nil
}
fd := deriveFileDescriptor(rawDesc)
md := fd.MessageType[idxs[0]]
for _, i := range idxs[1:] {
md = md.NestedType[i]
}
return fd, md
}

View File

@ -1,200 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
package descriptor
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/descriptor.proto.
type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type
const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE
const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT
const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64
const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64
const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32
const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64
const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32
const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL
const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING
const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP
const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE
const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES
const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32
const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM
const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32
const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64
const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32
const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64
var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name
var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value
type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label
const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL
const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED
const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED
var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name
var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value
type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode
const FileOptions_SPEED = descriptorpb.FileOptions_SPEED
const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE
const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME
var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name
var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value
type FieldOptions_CType = descriptorpb.FieldOptions_CType
const FieldOptions_STRING = descriptorpb.FieldOptions_STRING
const FieldOptions_CORD = descriptorpb.FieldOptions_CORD
const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE
var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name
var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value
type FieldOptions_JSType = descriptorpb.FieldOptions_JSType
const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL
const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING
const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER
var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name
var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value
type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel
const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN
const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS
const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT
var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name
var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value
type FileDescriptorSet = descriptorpb.FileDescriptorSet
type FileDescriptorProto = descriptorpb.FileDescriptorProto
type DescriptorProto = descriptorpb.DescriptorProto
type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions
type FieldDescriptorProto = descriptorpb.FieldDescriptorProto
type OneofDescriptorProto = descriptorpb.OneofDescriptorProto
type EnumDescriptorProto = descriptorpb.EnumDescriptorProto
type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto
type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto
type MethodDescriptorProto = descriptorpb.MethodDescriptorProto
const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming
const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming
type FileOptions = descriptorpb.FileOptions
const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles
const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8
const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor
const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices
const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices
const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices
const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices
const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated
const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas
type MessageOptions = descriptorpb.MessageOptions
const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat
const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor
const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated
type FieldOptions = descriptorpb.FieldOptions
const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype
const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype
const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy
const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated
const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak
type OneofOptions = descriptorpb.OneofOptions
type EnumOptions = descriptorpb.EnumOptions
const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated
type EnumValueOptions = descriptorpb.EnumValueOptions
const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated
type ServiceOptions = descriptorpb.ServiceOptions
const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated
type MethodOptions = descriptorpb.MethodOptions
const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated
const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel
type UninterpretedOption = descriptorpb.UninterpretedOption
type SourceCodeInfo = descriptorpb.SourceCodeInfo
type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo
type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange
type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange
type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange
type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart
type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location
type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation
var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{
0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65,
0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b,
0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x32,
}
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() }
func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() {
if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil
}

View File

@ -1,71 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
package wrappers
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/wrappers.proto.
type DoubleValue = wrapperspb.DoubleValue
type FloatValue = wrapperspb.FloatValue
type Int64Value = wrapperspb.Int64Value
type UInt64Value = wrapperspb.UInt64Value
type Int32Value = wrapperspb.Int32Value
type UInt32Value = wrapperspb.UInt32Value
type BoolValue = wrapperspb.BoolValue
type StringValue = wrapperspb.StringValue
type BytesValue = wrapperspb.BytesValue
var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61,
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() }
func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() {
if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil
}

View File

@ -36,11 +36,12 @@ import (
"strings" "strings"
"github.com/google/go-cmp/cmp/internal/diff" "github.com/google/go-cmp/cmp/internal/diff"
"github.com/google/go-cmp/cmp/internal/flags"
"github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/function"
"github.com/google/go-cmp/cmp/internal/value" "github.com/google/go-cmp/cmp/internal/value"
) )
// TODO(≥go1.18): Use any instead of interface{}.
// Equal reports whether x and y are equal by recursively applying the // Equal reports whether x and y are equal by recursively applying the
// following rules in the given order to x and y and all of their sub-values: // following rules in the given order to x and y and all of their sub-values:
// //
@ -319,7 +320,6 @@ func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
} }
func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
v = sanitizeValue(v, f.Type().In(0))
if !s.dynChecker.Next() { if !s.dynChecker.Next() {
return f.Call([]reflect.Value{v})[0] return f.Call([]reflect.Value{v})[0]
} }
@ -343,8 +343,6 @@ func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
} }
func (s *state) callTTBFunc(f, x, y reflect.Value) bool { func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
x = sanitizeValue(x, f.Type().In(0))
y = sanitizeValue(y, f.Type().In(1))
if !s.dynChecker.Next() { if !s.dynChecker.Next() {
return f.Call([]reflect.Value{x, y})[0].Bool() return f.Call([]reflect.Value{x, y})[0].Bool()
} }
@ -372,19 +370,6 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
ret = f.Call(vs)[0] ret = f.Call(vs)[0]
} }
// sanitizeValue converts nil interfaces of type T to those of type R,
// assuming that T is assignable to R.
// Otherwise, it returns the input value as is.
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
// TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143).
if !flags.AtLeastGo110 {
if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
return reflect.New(t).Elem()
}
}
return v
}
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
var addr bool var addr bool
var vax, vay reflect.Value // Addressable versions of vx and vy var vax, vay reflect.Value // Addressable versions of vx and vy

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build purego
// +build purego // +build purego
package cmp package cmp

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !purego
// +build !purego // +build !purego
package cmp package cmp

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !cmp_debug
// +build !cmp_debug // +build !cmp_debug
package diff package diff

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build cmp_debug
// +build cmp_debug // +build cmp_debug
package diff package diff

View File

@ -1,10 +0,0 @@
// Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.10
package flags
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
const AtLeastGo110 = false

View File

@ -1,10 +0,0 @@
// Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.10
package flags
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
const AtLeastGo110 = true

View File

@ -9,6 +9,8 @@ import (
"strconv" "strconv"
) )
var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
// TypeString is nearly identical to reflect.Type.String, // TypeString is nearly identical to reflect.Type.String,
// but has an additional option to specify that full type names be used. // but has an additional option to specify that full type names be used.
func TypeString(t reflect.Type, qualified bool) string { func TypeString(t reflect.Type, qualified bool) string {
@ -20,6 +22,11 @@ func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte
// of the same name and within the same package, // of the same name and within the same package,
// but declared within the namespace of different functions. // but declared within the namespace of different functions.
// Use the "any" alias instead of "interface{}" for better readability.
if t == anyType {
return append(b, "any"...)
}
// Named type. // Named type.
if t.Name() != "" { if t.Name() != "" {
if qualified && t.PkgPath() != "" { if qualified && t.PkgPath() != "" {

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build purego
// +build purego // +build purego
package value package value

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !purego
// +build !purego // +build !purego
package value package value

View File

@ -178,7 +178,7 @@ type structField struct {
unexported bool unexported bool
mayForce bool // Forcibly allow visibility mayForce bool // Forcibly allow visibility
paddr bool // Was parent addressable? paddr bool // Was parent addressable?
pvx, pvy reflect.Value // Parent values (always addressible) pvx, pvy reflect.Value // Parent values (always addressable)
field reflect.StructField // Field information field reflect.StructField // Field information
} }

View File

@ -116,7 +116,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out
} }
// For leaf nodes, format the value based on the reflect.Values alone. // For leaf nodes, format the value based on the reflect.Values alone.
if v.MaxDepth == 0 { // As a special case, treat equal []byte as a leaf nodes.
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0))
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
if v.MaxDepth == 0 || isEqualBytes {
switch opts.DiffMode { switch opts.DiffMode {
case diffUnknown, diffIdentical: case diffUnknown, diffIdentical:
// Format Equal. // Format Equal.

View File

@ -207,10 +207,11 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
// Check whether this is a []byte of text data. // Check whether this is a []byte of text data.
if t.Elem() == reflect.TypeOf(byte(0)) { if t.Elem() == reflect.TypeOf(byte(0)) {
b := v.Bytes() b := v.Bytes()
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
out = opts.formatString("", string(b)) out = opts.formatString("", string(b))
return opts.WithTypeMode(emitType).FormatType(t, out) skipType = true
return opts.FormatType(t, out)
} }
} }
@ -281,7 +282,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
} }
defer ptrs.Pop() defer ptrs.Pop()
// Skip the name only if this is an unnamed pointer type.
// Otherwise taking the address of a value does not reproduce
// the named pointer type.
if v.Type().Name() == "" {
skipType = true // Let the underlying value print the type instead skipType = true // Let the underlying value print the type instead
}
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
out = &textWrap{Prefix: "&", Value: out} out = &textWrap{Prefix: "&", Value: out}
@ -292,7 +298,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
} }
// Interfaces accept different concrete types, // Interfaces accept different concrete types,
// so configure the underlying value to explicitly print the type. // so configure the underlying value to explicitly print the type.
skipType = true // Print the concrete type instead
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
default: default:
panic(fmt.Sprintf("%v kind not handled", v.Kind())) panic(fmt.Sprintf("%v kind not handled", v.Kind()))

View File

@ -80,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
} }
// Use specialized string diffing for longer slices or strings. // Use specialized string diffing for longer slices or strings.
const minLength = 64 const minLength = 32
return vx.Len() >= minLength && vy.Len() >= minLength return vx.Len() >= minLength && vy.Len() >= minLength
} }
@ -563,10 +563,10 @@ func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []d
nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
ny := ds.NumIdentical + ds.NumInserted + ds.NumModified ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
var numLeadingIdentical, numTrailingIdentical int var numLeadingIdentical, numTrailingIdentical int
for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
numLeadingIdentical++ numLeadingIdentical++
} }
for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
numTrailingIdentical++ numTrailingIdentical++
} }
if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {

View File

@ -1,23 +0,0 @@
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
package(default_visibility = ["//visibility:public"])
proto_library(
name = "internal_proto",
srcs = ["errors.proto"],
deps = ["@com_google_protobuf//:any_proto"],
)
go_proto_library(
name = "internal_go_proto",
importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
proto = ":internal_proto",
)
go_library(
name = "go_default_library",
embed = [":internal_go_proto"],
importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
)

View File

@ -1,189 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: internal/errors.proto
package internal
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Error is the generic error returned from unary RPCs.
type Error struct {
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
// This is to make the error more compatible with users that expect errors to be Status objects:
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
// It should be the exact same message as the Error field.
Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Error) Reset() { *m = Error{} }
func (m *Error) String() string { return proto.CompactTextString(m) }
func (*Error) ProtoMessage() {}
func (*Error) Descriptor() ([]byte, []int) {
return fileDescriptor_9b093362ca6d1e03, []int{0}
}
func (m *Error) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Error.Unmarshal(m, b)
}
func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Error.Marshal(b, m, deterministic)
}
func (m *Error) XXX_Merge(src proto.Message) {
xxx_messageInfo_Error.Merge(m, src)
}
func (m *Error) XXX_Size() int {
return xxx_messageInfo_Error.Size(m)
}
func (m *Error) XXX_DiscardUnknown() {
xxx_messageInfo_Error.DiscardUnknown(m)
}
var xxx_messageInfo_Error proto.InternalMessageInfo
func (m *Error) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *Error) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Error) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *Error) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
return nil
}
// StreamError is a response type which is returned when
// streaming rpc returns an error.
type StreamError struct {
GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StreamError) Reset() { *m = StreamError{} }
func (m *StreamError) String() string { return proto.CompactTextString(m) }
func (*StreamError) ProtoMessage() {}
func (*StreamError) Descriptor() ([]byte, []int) {
return fileDescriptor_9b093362ca6d1e03, []int{1}
}
func (m *StreamError) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamError.Unmarshal(m, b)
}
func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
}
func (m *StreamError) XXX_Merge(src proto.Message) {
xxx_messageInfo_StreamError.Merge(m, src)
}
func (m *StreamError) XXX_Size() int {
return xxx_messageInfo_StreamError.Size(m)
}
func (m *StreamError) XXX_DiscardUnknown() {
xxx_messageInfo_StreamError.DiscardUnknown(m)
}
var xxx_messageInfo_StreamError proto.InternalMessageInfo
func (m *StreamError) GetGrpcCode() int32 {
if m != nil {
return m.GrpcCode
}
return 0
}
func (m *StreamError) GetHttpCode() int32 {
if m != nil {
return m.HttpCode
}
return 0
}
func (m *StreamError) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *StreamError) GetHttpStatus() string {
if m != nil {
return m.HttpStatus
}
return ""
}
func (m *StreamError) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error")
proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
}
func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) }
var fileDescriptor_9b093362ca6d1e03 = []byte{
// 252 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30,
0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52,
0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24,
0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c,
0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2,
0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1,
0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b,
0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c,
0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b,
0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72,
0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3,
0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6,
0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7,
0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae,
0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03,
0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00,
}

View File

@ -1,26 +0,0 @@
syntax = "proto3";
package grpc.gateway.runtime;
option go_package = "internal";
import "google/protobuf/any.proto";
// Error is the generic error returned from unary RPCs.
message Error {
string error = 1;
// This is to make the error more compatible with users that expect errors to be Status objects:
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
// It should be the exact same message as the Error field.
int32 code = 2;
string message = 3;
repeated google.protobuf.Any details = 4;
}
// StreamError is a response type which is returned when
// streaming rpc returns an error.
message StreamError {
int32 grpc_code = 1;
int32 http_code = 2;
string message = 3;
string http_status = 4;
repeated google.protobuf.Any details = 5;
}

View File

@ -1,85 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = [
"context.go",
"convert.go",
"doc.go",
"errors.go",
"fieldmask.go",
"handler.go",
"marshal_httpbodyproto.go",
"marshal_json.go",
"marshal_jsonpb.go",
"marshal_proto.go",
"marshaler.go",
"marshaler_registry.go",
"mux.go",
"pattern.go",
"proto2_convert.go",
"proto_errors.go",
"query.go",
],
importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
deps = [
"//internal:go_default_library",
"//utilities:go_default_library",
"@com_github_golang_protobuf//descriptor:go_default_library_gen",
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
"@com_github_golang_protobuf//proto:go_default_library",
"@go_googleapis//google/api:httpbody_go_proto",
"@io_bazel_rules_go//proto/wkt:any_go_proto",
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//grpclog:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"context_test.go",
"convert_test.go",
"errors_test.go",
"fieldmask_test.go",
"handler_test.go",
"marshal_httpbodyproto_test.go",
"marshal_json_test.go",
"marshal_jsonpb_test.go",
"marshal_proto_test.go",
"marshaler_registry_test.go",
"mux_test.go",
"pattern_test.go",
"query_test.go",
],
embed = [":go_default_library"],
deps = [
"//internal:go_default_library",
"//runtime/internal/examplepb:go_default_library",
"//utilities:go_default_library",
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
"@com_github_golang_protobuf//proto:go_default_library",
"@com_github_golang_protobuf//ptypes:go_default_library_gen",
"@go_googleapis//google/api:httpbody_go_proto",
"@go_googleapis//google/rpc:errdetails_go_proto",
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
"@io_bazel_rules_go//proto/wkt:struct_go_proto",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
],
)

View File

@ -1,186 +0,0 @@
package runtime
import (
"context"
"io"
"net/http"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/internal"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
func HTTPStatusFromCode(code codes.Code) int {
switch code {
case codes.OK:
return http.StatusOK
case codes.Canceled:
return http.StatusRequestTimeout
case codes.Unknown:
return http.StatusInternalServerError
case codes.InvalidArgument:
return http.StatusBadRequest
case codes.DeadlineExceeded:
return http.StatusGatewayTimeout
case codes.NotFound:
return http.StatusNotFound
case codes.AlreadyExists:
return http.StatusConflict
case codes.PermissionDenied:
return http.StatusForbidden
case codes.Unauthenticated:
return http.StatusUnauthorized
case codes.ResourceExhausted:
return http.StatusTooManyRequests
case codes.FailedPrecondition:
// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
return http.StatusBadRequest
case codes.Aborted:
return http.StatusConflict
case codes.OutOfRange:
return http.StatusBadRequest
case codes.Unimplemented:
return http.StatusNotImplemented
case codes.Internal:
return http.StatusInternalServerError
case codes.Unavailable:
return http.StatusServiceUnavailable
case codes.DataLoss:
return http.StatusInternalServerError
}
grpclog.Infof("Unknown gRPC error code: %v", code)
return http.StatusInternalServerError
}
var (
// HTTPError replies to the request with an error.
//
// HTTPError is called:
// - From generated per-endpoint gateway handler code, when calling the backend results in an error.
// - From gateway runtime code, when forwarding the response message results in an error.
//
// The default value for HTTPError calls the custom error handler configured on the ServeMux via the
// WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise.
//
// To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler
// serve option.
//
// To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve
// option, set GlobalHTTPErrorHandler to a custom function.
//
// Setting this variable directly to customize error format is deprecated.
HTTPError = MuxOrGlobalHTTPError
// GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the
// WithProtoErrorHandler serve option.
//
// You can set a custom function to this variable to customize error format.
GlobalHTTPErrorHandler = DefaultHTTPError
// OtherErrorHandler handles gateway errors from parsing and routing client requests for all
// ServeMux instances not using the WithProtoErrorHandler serve option.
//
// It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
//
// To customize parsing and routing error handling of a particular ServeMux instance, use the
// WithProtoErrorHandler serve option.
//
// To customize parsing and routing error handling of all ServeMux instances not using the
// WithProtoErrorHandler serve option, set a custom function to this variable.
OtherErrorHandler = DefaultOtherErrorHandler
)
// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler.
func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
if mux.protoErrorHandler != nil {
mux.protoErrorHandler(ctx, mux, marshaler, w, r, err)
} else {
GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err)
}
}
// DefaultHTTPError is the default implementation of HTTPError.
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body returned by this function is a JSON object,
// which contains a member whose key is "error" and whose value is err.Error().
func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
const fallback = `{"error": "failed to marshal error message"}`
s, ok := status.FromError(err)
if !ok {
s = status.New(codes.Unknown, err.Error())
}
w.Header().Del("Trailer")
w.Header().Del("Transfer-Encoding")
contentType := marshaler.ContentType()
// Check marshaler on run time in order to keep backwards compatibility
// An interface param needs to be added to the ContentType() function on
// the Marshal interface to be able to remove this check
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
pb := s.Proto()
contentType = typeMarshaler.ContentTypeFromMessage(pb)
}
w.Header().Set("Content-Type", contentType)
body := &internal.Error{
Error: s.Message(),
Message: s.Message(),
Code: int32(s.Code()),
Details: s.Proto().GetDetails(),
}
buf, merr := marshaler.Marshal(body)
if merr != nil {
grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallback); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
// Unless the request includes a TE header field indicating "trailers"
// is acceptable, as described in Section 4.3, a server SHOULD NOT
// generate trailer fields that it believes are necessary for the user
// agent to receive.
var wantsTrailers bool
if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") {
wantsTrailers = true
handleForwardResponseTrailerHeader(w, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
st := HTTPStatusFromCode(s.Code())
w.WriteHeader(st)
if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
if wantsTrailers {
handleForwardResponseTrailer(w, md)
}
}
// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
// It simply writes a string representation of the given error into "w".
func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
http.Error(w, msg, code)
}

View File

@ -1,89 +0,0 @@
package runtime
import (
"encoding/json"
"io"
"strings"
descriptor2 "github.com/golang/protobuf/descriptor"
"github.com/golang/protobuf/protoc-gen-go/descriptor"
"google.golang.org/genproto/protobuf/field_mask"
)
func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) {
// TODO - should really gate this with a test that the marshaller has used json names
if md != nil {
for _, f := range md.Field {
if f.JsonName != nil && f.Name != nil && *f.JsonName == name {
var subType *descriptor.DescriptorProto
// If the field has a TypeName then we retrieve the nested type for translating the embedded message names.
if f.TypeName != nil {
typeSplit := strings.Split(*f.TypeName, ".")
typeName := typeSplit[len(typeSplit)-1]
for _, t := range md.NestedType {
if typeName == *t.Name {
subType = t
}
}
}
return *f.Name, subType
}
}
}
return name, nil
}
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) {
fm := &field_mask.FieldMask{}
var root interface{}
if err := json.NewDecoder(r).Decode(&root); err != nil {
if err == io.EOF {
return fm, nil
}
return nil, err
}
queue := []fieldMaskPathItem{{node: root, md: md}}
for len(queue) > 0 {
// dequeue an item
item := queue[0]
queue = queue[1:]
if m, ok := item.node.(map[string]interface{}); ok {
// if the item is an object, then enqueue all of its children
for k, v := range m {
protoName, subMd := translateName(k, item.md)
if subMsg, ok := v.(descriptor2.Message); ok {
_, subMd = descriptor2.ForMessage(subMsg)
}
var path string
if item.path == "" {
path = protoName
} else {
path = item.path + "." + protoName
}
queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd})
}
} else if len(item.path) > 0 {
// otherwise, it's a leaf node so print its path
fm.Paths = append(fm.Paths, item.path)
}
}
return fm, nil
}
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
type fieldMaskPathItem struct {
// the list of prior fields leading up to node connected by dots
path string
// a generic decoded json object the current item to inspect for further path extraction
node interface{}
// descriptor for parent message
md *descriptor.DescriptorProto
}

View File

@ -1,106 +0,0 @@
package runtime
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/ptypes/any"
"github.com/grpc-ecosystem/grpc-gateway/internal"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a
// a proto struct used to represent error at the end of a stream.
type StreamErrorHandlerFunc func(context.Context, error) *StreamError
// StreamError is the payload for the final message in a server stream in the event that the server returns an
// error after a response message has already been sent.
type StreamError internal.StreamError
// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body returned by this function is a Status message marshaled by a Marshaler.
//
// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
// return Internal when Marshal failed
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
s, ok := status.FromError(err)
if !ok {
s = status.New(codes.Unknown, err.Error())
}
w.Header().Del("Trailer")
contentType := marshaler.ContentType()
// Check marshaler on run time in order to keep backwards compatibility
// An interface param needs to be added to the ContentType() function on
// the Marshal interface to be able to remove this check
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
pb := s.Proto()
contentType = typeMarshaler.ContentTypeFromMessage(pb)
}
w.Header().Set("Content-Type", contentType)
buf, merr := marshaler.Marshal(s.Proto())
if merr != nil {
grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallback); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
handleForwardResponseTrailerHeader(w, md)
st := HTTPStatusFromCode(s.Code())
w.WriteHeader(st)
if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
handleForwardResponseTrailer(w, md)
}
// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via
// default logic.
//
// It extracts the gRPC status from err if possible. The fields of the status are
// used to populate the returned StreamError, and the HTTP status code is derived
// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a
// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code.
func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError {
grpcCode := codes.Unknown
grpcMessage := err.Error()
var grpcDetails []*any.Any
if s, ok := status.FromError(err); ok {
grpcCode = s.Code()
grpcMessage = s.Message()
grpcDetails = s.Proto().GetDetails()
}
httpCode := HTTPStatusFromCode(grpcCode)
return &StreamError{
GrpcCode: int32(grpcCode),
HttpCode: int32(httpCode),
Message: grpcMessage,
HttpStatus: http.StatusText(httpCode),
Details: grpcDetails,
}
}

View File

@ -1,406 +0,0 @@
package runtime
import (
"encoding/base64"
"fmt"
"net/url"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc/grpclog"
)
var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$")
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
// QueryParameterParser defines interface for all query parameter parsers
type QueryParameterParser interface {
Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
}
// PopulateQueryParameters parses query parameters
// into "msg" using current query parser
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
return currentQueryParser.Parse(msg, values, filter)
}
type defaultQueryParser struct{}
// Parse populates "values" into "msg".
// A value is ignored if its key starts with one of the elements in "filter".
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
for key, values := range values {
match := valuesKeyRegexp.FindStringSubmatch(key)
if len(match) == 3 {
key = match[1]
values = append([]string{match[2]}, values...)
}
fieldPath := strings.Split(key, ".")
if filter.HasCommonPrefix(fieldPath) {
continue
}
if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
return err
}
}
return nil
}
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
// It instantiates missing protobuf fields as it goes.
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
fieldPath := strings.Split(fieldPathString, ".")
return populateFieldValueFromPath(msg, fieldPath, []string{value})
}
func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
m := reflect.ValueOf(msg)
if m.Kind() != reflect.Ptr {
return fmt.Errorf("unexpected type %T: %v", msg, msg)
}
var props *proto.Properties
m = m.Elem()
for i, fieldName := range fieldPath {
isLast := i == len(fieldPath)-1
if !isLast && m.Kind() != reflect.Struct {
return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
}
var f reflect.Value
var err error
f, props, err = fieldByProtoName(m, fieldName)
if err != nil {
return err
} else if !f.IsValid() {
grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
return nil
}
switch f.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
if !isLast {
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
}
m = f
case reflect.Slice:
if !isLast {
return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
}
// Handle []byte
if f.Type().Elem().Kind() == reflect.Uint8 {
m = f
break
}
return populateRepeatedField(f, values, props)
case reflect.Ptr:
if f.IsNil() {
m = reflect.New(f.Type().Elem())
f.Set(m.Convert(f.Type()))
}
m = f.Elem()
continue
case reflect.Struct:
m = f
continue
case reflect.Map:
if !isLast {
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
}
return populateMapField(f, values, props)
default:
return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
}
}
switch len(values) {
case 0:
return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
case 1:
default:
grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
}
return populateField(m, values[0], props)
}
// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
// "m" must be a struct value. It returns zero reflect.Value if no such field found.
func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
props := proto.GetProperties(m.Type())
// look up field name in oneof map
for _, op := range props.OneofTypes {
if name == op.Prop.OrigName || name == op.Prop.JSONName {
v := reflect.New(op.Type.Elem())
field := m.Field(op.Field)
if !field.IsNil() {
return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
}
field.Set(v)
return v.Elem().Field(0), op.Prop, nil
}
}
for _, p := range props.Prop {
if p.OrigName == name {
return m.FieldByName(p.Name), p, nil
}
if p.JSONName == name {
return m.FieldByName(p.Name), p, nil
}
}
return reflect.Value{}, nil, nil
}
func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
if len(values) != 2 {
return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
}
key, value := values[0], values[1]
keyType := f.Type().Key()
valueType := f.Type().Elem()
if f.IsNil() {
f.Set(reflect.MakeMap(f.Type()))
}
keyConv, ok := convFromType[keyType.Kind()]
if !ok {
return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
}
valueConv, ok := convFromType[valueType.Kind()]
if !ok {
return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
}
keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
if err := keyV[1].Interface(); err != nil {
return err.(error)
}
valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
if err := valueV[1].Interface(); err != nil {
return err.(error)
}
f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
return nil
}
func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
elemType := f.Type().Elem()
// is the destination field a slice of an enumeration type?
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
return populateFieldEnumRepeated(f, values, enumValMap)
}
conv, ok := convFromType[elemType.Kind()]
if !ok {
return fmt.Errorf("unsupported field type %s", elemType)
}
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
for i, v := range values {
result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
if err := result[1].Interface(); err != nil {
return err.(error)
}
f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
}
return nil
}
func populateField(f reflect.Value, value string, props *proto.Properties) error {
i := f.Addr().Interface()
// Handle protobuf well known types
var name string
switch m := i.(type) {
case interface{ XXX_WellKnownType() string }:
name = m.XXX_WellKnownType()
case proto.Message:
const wktPrefix = "google.protobuf."
if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) {
name = fullName[len(wktPrefix):]
}
}
switch name {
case "Timestamp":
if value == "null" {
f.FieldByName("Seconds").SetInt(0)
f.FieldByName("Nanos").SetInt(0)
return nil
}
t, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
f.FieldByName("Seconds").SetInt(int64(t.Unix()))
f.FieldByName("Nanos").SetInt(int64(t.Nanosecond()))
return nil
case "Duration":
if value == "null" {
f.FieldByName("Seconds").SetInt(0)
f.FieldByName("Nanos").SetInt(0)
return nil
}
d, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
ns := d.Nanoseconds()
s := ns / 1e9
ns %= 1e9
f.FieldByName("Seconds").SetInt(s)
f.FieldByName("Nanos").SetInt(ns)
return nil
case "DoubleValue":
fallthrough
case "FloatValue":
float64Val, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetFloat(float64Val)
return nil
case "Int64Value":
fallthrough
case "Int32Value":
int64Val, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetInt(int64Val)
return nil
case "UInt64Value":
fallthrough
case "UInt32Value":
uint64Val, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetUint(uint64Val)
return nil
case "BoolValue":
if value == "true" {
f.FieldByName("Value").SetBool(true)
} else if value == "false" {
f.FieldByName("Value").SetBool(false)
} else {
return fmt.Errorf("bad BoolValue: %s", value)
}
return nil
case "StringValue":
f.FieldByName("Value").SetString(value)
return nil
case "BytesValue":
bytesVal, err := base64.StdEncoding.DecodeString(value)
if err != nil {
return fmt.Errorf("bad BytesValue: %s", value)
}
f.FieldByName("Value").SetBytes(bytesVal)
return nil
case "FieldMask":
p := f.FieldByName("Paths")
for _, v := range strings.Split(value, ",") {
if v != "" {
p.Set(reflect.Append(p, reflect.ValueOf(v)))
}
}
return nil
}
// Handle Time and Duration stdlib types
switch t := i.(type) {
case *time.Time:
pt, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
*t = pt
return nil
case *time.Duration:
d, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
*t = d
return nil
}
// is the destination field an enumeration type?
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
return populateFieldEnum(f, value, enumValMap)
}
conv, ok := convFromType[f.Kind()]
if !ok {
return fmt.Errorf("field type %T is not supported in query parameters", i)
}
result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
if err := result[1].Interface(); err != nil {
return err.(error)
}
f.Set(result[0].Convert(f.Type()))
return nil
}
func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
// see if it's an enumeration string
if enumVal, ok := enumValMap[value]; ok {
return reflect.ValueOf(enumVal).Convert(t), nil
}
// check for an integer that matches an enumeration value
eVal, err := strconv.Atoi(value)
if err != nil {
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
}
for _, v := range enumValMap {
if v == int32(eVal) {
return reflect.ValueOf(eVal).Convert(t), nil
}
}
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
}
func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
cval, err := convertEnum(value, f.Type(), enumValMap)
if err != nil {
return err
}
f.Set(cval)
return nil
}
func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
elemType := f.Type().Elem()
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
for i, v := range values {
result, err := convertEnum(v, elemType, enumValMap)
if err != nil {
return err
}
f.Index(i).Set(result)
}
return nil
}
var (
convFromType = map[reflect.Kind]reflect.Value{
reflect.String: reflect.ValueOf(String),
reflect.Bool: reflect.ValueOf(Bool),
reflect.Float64: reflect.ValueOf(Float64),
reflect.Float32: reflect.ValueOf(Float32),
reflect.Int64: reflect.ValueOf(Int64),
reflect.Int32: reflect.ValueOf(Int32),
reflect.Uint64: reflect.ValueOf(Uint64),
reflect.Uint32: reflect.ValueOf(Uint32),
reflect.Slice: reflect.ValueOf(Bytes),
}
)

View File

@ -0,0 +1,35 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"])
go_library(
name = "httprule",
srcs = [
"compile.go",
"parse.go",
"types.go",
],
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule",
deps = ["//utilities"],
)
go_test(
name = "httprule_test",
size = "small",
srcs = [
"compile_test.go",
"parse_test.go",
"types_test.go",
],
embed = [":httprule"],
deps = [
"//utilities",
"@com_github_golang_glog//:glog",
],
)
alias(
name = "go_default_library",
actual = ":httprule",
visibility = ["//:__subpackages__"],
)

View File

@ -0,0 +1,121 @@
package httprule
import (
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
)
const (
opcodeVersion = 1
)
// Template is a compiled representation of path templates.
type Template struct {
// Version is the version number of the format.
Version int
// OpCodes is a sequence of operations.
OpCodes []int
// Pool is a constant pool
Pool []string
// Verb is a VERB part in the template.
Verb string
// Fields is a list of field paths bound in this template.
Fields []string
// Original template (example: /v1/a_bit_of_everything)
Template string
}
// Compiler compiles utilities representation of path templates into marshallable operations.
// They can be unmarshalled by runtime.NewPattern.
type Compiler interface {
Compile() Template
}
type op struct {
// code is the opcode of the operation
code utilities.OpCode
// str is a string operand of the code.
// num is ignored if str is not empty.
str string
// num is a numeric operand of the code.
num int
}
func (w wildcard) compile() []op {
return []op{
{code: utilities.OpPush},
}
}
func (w deepWildcard) compile() []op {
return []op{
{code: utilities.OpPushM},
}
}
func (l literal) compile() []op {
return []op{
{
code: utilities.OpLitPush,
str: string(l),
},
}
}
func (v variable) compile() []op {
var ops []op
for _, s := range v.segments {
ops = append(ops, s.compile()...)
}
ops = append(ops, op{
code: utilities.OpConcatN,
num: len(v.segments),
}, op{
code: utilities.OpCapture,
str: v.path,
})
return ops
}
func (t template) Compile() Template {
var rawOps []op
for _, s := range t.segments {
rawOps = append(rawOps, s.compile()...)
}
var (
ops []int
pool []string
fields []string
)
consts := make(map[string]int)
for _, op := range rawOps {
ops = append(ops, int(op.code))
if op.str == "" {
ops = append(ops, op.num)
} else {
// eof segment literal represents the "/" path pattern
if op.str == eof {
op.str = ""
}
if _, ok := consts[op.str]; !ok {
consts[op.str] = len(pool)
pool = append(pool, op.str)
}
ops = append(ops, consts[op.str])
}
if op.code == utilities.OpCapture {
fields = append(fields, op.str)
}
}
return Template{
Version: opcodeVersion,
OpCodes: ops,
Pool: pool,
Verb: t.verb,
Fields: fields,
Template: t.template,
}
}

View File

@ -0,0 +1,11 @@
// +build gofuzz
package httprule
func Fuzz(data []byte) int {
_, err := Parse(string(data))
if err != nil {
return 0
}
return 0
}

View File

@ -0,0 +1,368 @@
package httprule
import (
"fmt"
"strings"
)
// InvalidTemplateError indicates that the path template is not valid.
type InvalidTemplateError struct {
tmpl string
msg string
}
func (e InvalidTemplateError) Error() string {
return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
}
// Parse parses the string representation of path template
func Parse(tmpl string) (Compiler, error) {
if !strings.HasPrefix(tmpl, "/") {
return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
}
tokens, verb := tokenize(tmpl[1:])
p := parser{tokens: tokens}
segs, err := p.topLevelSegments()
if err != nil {
return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
}
return template{
segments: segs,
verb: verb,
template: tmpl,
}, nil
}
func tokenize(path string) (tokens []string, verb string) {
if path == "" {
return []string{eof}, ""
}
const (
init = iota
field
nested
)
st := init
for path != "" {
var idx int
switch st {
case init:
idx = strings.IndexAny(path, "/{")
case field:
idx = strings.IndexAny(path, ".=}")
case nested:
idx = strings.IndexAny(path, "/}")
}
if idx < 0 {
tokens = append(tokens, path)
break
}
switch r := path[idx]; r {
case '/', '.':
case '{':
st = field
case '=':
st = nested
case '}':
st = init
}
if idx == 0 {
tokens = append(tokens, path[idx:idx+1])
} else {
tokens = append(tokens, path[:idx], path[idx:idx+1])
}
path = path[idx+1:]
}
l := len(tokens)
// See
// https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ;
// although normal and backwards-compat logic here is to use the last index
// of a colon, if the final segment is a variable followed by a colon, the
// part following the colon must be a verb. Hence if the previous token is
// an end var marker, we switch the index we're looking for to Index instead
// of LastIndex, so that we correctly grab the remaining part of the path as
// the verb.
var penultimateTokenIsEndVar bool
switch l {
case 0, 1:
// Not enough to be variable so skip this logic and don't result in an
// invalid index
default:
penultimateTokenIsEndVar = tokens[l-2] == "}"
}
t := tokens[l-1]
var idx int
if penultimateTokenIsEndVar {
idx = strings.Index(t, ":")
} else {
idx = strings.LastIndex(t, ":")
}
if idx == 0 {
tokens, verb = tokens[:l-1], t[1:]
} else if idx > 0 {
tokens[l-1], verb = t[:idx], t[idx+1:]
}
tokens = append(tokens, eof)
return tokens, verb
}
// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
type parser struct {
tokens []string
accepted []string
}
// topLevelSegments is the target of this parser.
func (p *parser) topLevelSegments() ([]segment, error) {
if _, err := p.accept(typeEOF); err == nil {
p.tokens = p.tokens[:0]
return []segment{literal(eof)}, nil
}
segs, err := p.segments()
if err != nil {
return nil, err
}
if _, err := p.accept(typeEOF); err != nil {
return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
}
return segs, nil
}
func (p *parser) segments() ([]segment, error) {
s, err := p.segment()
if err != nil {
return nil, err
}
segs := []segment{s}
for {
if _, err := p.accept("/"); err != nil {
return segs, nil
}
s, err := p.segment()
if err != nil {
return segs, err
}
segs = append(segs, s)
}
}
func (p *parser) segment() (segment, error) {
if _, err := p.accept("*"); err == nil {
return wildcard{}, nil
}
if _, err := p.accept("**"); err == nil {
return deepWildcard{}, nil
}
if l, err := p.literal(); err == nil {
return l, nil
}
v, err := p.variable()
if err != nil {
return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err)
}
return v, err
}
func (p *parser) literal() (segment, error) {
lit, err := p.accept(typeLiteral)
if err != nil {
return nil, err
}
return literal(lit), nil
}
func (p *parser) variable() (segment, error) {
if _, err := p.accept("{"); err != nil {
return nil, err
}
path, err := p.fieldPath()
if err != nil {
return nil, err
}
var segs []segment
if _, err := p.accept("="); err == nil {
segs, err = p.segments()
if err != nil {
return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err)
}
} else {
segs = []segment{wildcard{}}
}
if _, err := p.accept("}"); err != nil {
return nil, fmt.Errorf("unterminated variable segment: %s", path)
}
return variable{
path: path,
segments: segs,
}, nil
}
func (p *parser) fieldPath() (string, error) {
c, err := p.accept(typeIdent)
if err != nil {
return "", err
}
components := []string{c}
for {
if _, err = p.accept("."); err != nil {
return strings.Join(components, "."), nil
}
c, err := p.accept(typeIdent)
if err != nil {
return "", fmt.Errorf("invalid field path component: %v", err)
}
components = append(components, c)
}
}
// A termType is a type of terminal symbols.
type termType string
// These constants define some of valid values of termType.
// They improve readability of parse functions.
//
// You can also use "/", "*", "**", "." or "=" as valid values.
const (
typeIdent = termType("ident")
typeLiteral = termType("literal")
typeEOF = termType("$")
)
const (
// eof is the terminal symbol which always appears at the end of token sequence.
eof = "\u0000"
)
// accept tries to accept a token in "p".
// This function consumes a token and returns it if it matches to the specified "term".
// If it doesn't match, the function does not consume any tokens and return an error.
func (p *parser) accept(term termType) (string, error) {
t := p.tokens[0]
switch term {
case "/", "*", "**", ".", "=", "{", "}":
if t != string(term) && t != "/" {
return "", fmt.Errorf("expected %q but got %q", term, t)
}
case typeEOF:
if t != eof {
return "", fmt.Errorf("expected EOF but got %q", t)
}
case typeIdent:
if err := expectIdent(t); err != nil {
return "", err
}
case typeLiteral:
if err := expectPChars(t); err != nil {
return "", err
}
default:
return "", fmt.Errorf("unknown termType %q", term)
}
p.tokens = p.tokens[1:]
p.accepted = append(p.accepted, t)
return t, nil
}
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
//
// https://www.ietf.org/rfc/rfc3986.txt, P.49
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
// / "*" / "+" / "," / ";" / "="
// pct-encoded = "%" HEXDIG HEXDIG
func expectPChars(t string) error {
const (
init = iota
pct1
pct2
)
st := init
for _, r := range t {
if st != init {
if !isHexDigit(r) {
return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
}
switch st {
case pct1:
st = pct2
case pct2:
st = init
}
continue
}
// unreserved
switch {
case 'A' <= r && r <= 'Z':
continue
case 'a' <= r && r <= 'z':
continue
case '0' <= r && r <= '9':
continue
}
switch r {
case '-', '.', '_', '~':
// unreserved
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
// sub-delims
case ':', '@':
// rest of pchar
case '%':
// pct-encoded
st = pct1
default:
return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
}
}
if st != init {
return fmt.Errorf("invalid percent-encoding in %q", t)
}
return nil
}
// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
func expectIdent(ident string) error {
if ident == "" {
return fmt.Errorf("empty identifier")
}
for pos, r := range ident {
switch {
case '0' <= r && r <= '9':
if pos == 0 {
return fmt.Errorf("identifier starting with digit: %s", ident)
}
continue
case 'A' <= r && r <= 'Z':
continue
case 'a' <= r && r <= 'z':
continue
case r == '_':
continue
default:
return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
}
}
return nil
}
func isHexDigit(r rune) bool {
switch {
case '0' <= r && r <= '9':
return true
case 'A' <= r && r <= 'F':
return true
case 'a' <= r && r <= 'f':
return true
}
return false
}

View File

@ -0,0 +1,60 @@
package httprule
import (
"fmt"
"strings"
)
type template struct {
segments []segment
verb string
template string
}
type segment interface {
fmt.Stringer
compile() (ops []op)
}
type wildcard struct{}
type deepWildcard struct{}
type literal string
type variable struct {
path string
segments []segment
}
func (wildcard) String() string {
return "*"
}
func (deepWildcard) String() string {
return "**"
}
func (l literal) String() string {
return string(l)
}
func (v variable) String() string {
var segs []string
for _, s := range v.segments {
segs = append(segs, s.String())
}
return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/"))
}
func (t template) String() string {
var segs []string
for _, s := range t.segments {
segs = append(segs, s.String())
}
str := strings.Join(segs, "/")
if t.verb != "" {
str = fmt.Sprintf("%s:%s", str, t.verb)
}
return "/" + str
}

View File

@ -0,0 +1,91 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"])
go_library(
name = "runtime",
srcs = [
"context.go",
"convert.go",
"doc.go",
"errors.go",
"fieldmask.go",
"handler.go",
"marshal_httpbodyproto.go",
"marshal_json.go",
"marshal_jsonpb.go",
"marshal_proto.go",
"marshaler.go",
"marshaler_registry.go",
"mux.go",
"pattern.go",
"proto2_convert.go",
"query.go",
],
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime",
deps = [
"//internal/httprule",
"//utilities",
"@go_googleapis//google/api:httpbody_go_proto",
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//grpclog",
"@org_golang_google_grpc//metadata",
"@org_golang_google_grpc//status",
"@org_golang_google_protobuf//encoding/protojson",
"@org_golang_google_protobuf//proto",
"@org_golang_google_protobuf//reflect/protoreflect",
"@org_golang_google_protobuf//reflect/protoregistry",
"@org_golang_google_protobuf//types/known/durationpb",
"@org_golang_google_protobuf//types/known/timestamppb",
"@org_golang_google_protobuf//types/known/wrapperspb",
],
)
go_test(
name = "runtime_test",
size = "small",
srcs = [
"context_test.go",
"convert_test.go",
"errors_test.go",
"fieldmask_test.go",
"handler_test.go",
"marshal_httpbodyproto_test.go",
"marshal_json_test.go",
"marshal_jsonpb_test.go",
"marshal_proto_test.go",
"marshaler_registry_test.go",
"mux_test.go",
"pattern_test.go",
"query_test.go",
],
embed = [":runtime"],
deps = [
"//runtime/internal/examplepb",
"//utilities",
"@com_github_google_go_cmp//cmp",
"@com_github_google_go_cmp//cmp/cmpopts",
"@go_googleapis//google/api:httpbody_go_proto",
"@go_googleapis//google/rpc:errdetails_go_proto",
"@go_googleapis//google/rpc:status_go_proto",
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//metadata",
"@org_golang_google_grpc//status",
"@org_golang_google_protobuf//encoding/protojson",
"@org_golang_google_protobuf//proto",
"@org_golang_google_protobuf//testing/protocmp",
"@org_golang_google_protobuf//types/known/durationpb",
"@org_golang_google_protobuf//types/known/emptypb",
"@org_golang_google_protobuf//types/known/structpb",
"@org_golang_google_protobuf//types/known/timestamppb",
"@org_golang_google_protobuf//types/known/wrapperspb",
],
)
alias(
name = "go_default_library",
actual = ":runtime",
visibility = ["//visibility:public"],
)

View File

@ -41,6 +41,19 @@ var (
DefaultContextTimeout = 0 * time.Second DefaultContextTimeout = 0 * time.Second
) )
type (
rpcMethodKey struct{}
httpPathPatternKey struct{}
AnnotateContextOption func(ctx context.Context) context.Context
)
func WithHTTPPathPattern(pattern string) AnnotateContextOption {
return func(ctx context.Context) context.Context {
return withHTTPPathPattern(ctx, pattern)
}
}
func decodeBinHeader(v string) ([]byte, error) { func decodeBinHeader(v string) ([]byte, error) {
if len(v)%4 == 0 { if len(v)%4 == 0 {
// Input was padded, or padding was not necessary. // Input was padded, or padding was not necessary.
@ -56,8 +69,8 @@ At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
except that the forwarded destination is not another HTTP service but rather except that the forwarded destination is not another HTTP service but rather
a gRPC service. a gRPC service.
*/ */
func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
ctx, md, err := annotateContext(ctx, mux, req) ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -70,8 +83,8 @@ func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (con
// AnnotateIncomingContext adds context information such as metadata from the request. // AnnotateIncomingContext adds context information such as metadata from the request.
// Attach metadata as incoming context. // Attach metadata as incoming context.
func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
ctx, md, err := annotateContext(ctx, mux, req) ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -82,7 +95,11 @@ func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Reque
return metadata.NewIncomingContext(ctx, md), nil return metadata.NewIncomingContext(ctx, md), nil
} }
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) { func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
ctx = withRPCMethod(ctx, rpcMethodName)
for _, o := range options {
ctx = o(ctx)
}
var pairs []string var pairs []string
timeout := DefaultContextTimeout timeout := DefaultContextTimeout
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
@ -132,6 +149,7 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (con
} }
if timeout != 0 { if timeout != 0 {
//nolint:govet // The context outlives this function
ctx, _ = context.WithTimeout(ctx, timeout) ctx, _ = context.WithTimeout(ctx, timeout)
} }
if len(pairs) == 0 { if len(pairs) == 0 {
@ -289,3 +307,39 @@ func isPermanentHTTPHeader(hdr string) bool {
} }
return false return false
} }
// RPCMethod returns the method string for the server context. The returned
// string is in the format of "/package.service/method".
func RPCMethod(ctx context.Context) (string, bool) {
m := ctx.Value(rpcMethodKey{})
if m == nil {
return "", false
}
ms, ok := m.(string)
if !ok {
return "", false
}
return ms, true
}
func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context {
return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName)
}
// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists.
// The format of the returned string is defined by the google.api.http path template type.
func HTTPPathPattern(ctx context.Context) (string, bool) {
m := ctx.Value(httpPathPatternKey{})
if m == nil {
return "", false
}
ms, ok := m.(string)
if !ok {
return "", false
}
return ms, true
}
func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context {
return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern)
}

View File

@ -6,10 +6,10 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/golang/protobuf/jsonpb" "google.golang.org/protobuf/encoding/protojson"
"github.com/golang/protobuf/ptypes/duration" "google.golang.org/protobuf/types/known/durationpb"
"github.com/golang/protobuf/ptypes/timestamp" "google.golang.org/protobuf/types/known/timestamppb"
"github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/protobuf/types/known/wrapperspb"
) )
// String just returns the given string. // String just returns the given string.
@ -205,9 +205,11 @@ func BytesSlice(val, sep string) ([][]byte, error) {
} }
// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. // Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
func Timestamp(val string) (*timestamp.Timestamp, error) { func Timestamp(val string) (*timestamppb.Timestamp, error) {
var r timestamp.Timestamp var r timestamppb.Timestamp
err := jsonpb.UnmarshalString(val, &r) val = strconv.Quote(strings.Trim(val, `"`))
unmarshaler := &protojson.UnmarshalOptions{}
err := unmarshaler.Unmarshal([]byte(val), &r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -215,9 +217,11 @@ func Timestamp(val string) (*timestamp.Timestamp, error) {
} }
// Duration converts the given string into a timestamp.Duration. // Duration converts the given string into a timestamp.Duration.
func Duration(val string) (*duration.Duration, error) { func Duration(val string) (*durationpb.Duration, error) {
var r duration.Duration var r durationpb.Duration
err := jsonpb.UnmarshalString(val, &r) val = strconv.Quote(strings.Trim(val, `"`))
unmarshaler := &protojson.UnmarshalOptions{}
err := unmarshaler.Unmarshal([]byte(val), &r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -265,54 +269,54 @@ func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
*/ */
// StringValue well-known type support as wrapper around string type // StringValue well-known type support as wrapper around string type
func StringValue(val string) (*wrappers.StringValue, error) { func StringValue(val string) (*wrapperspb.StringValue, error) {
return &wrappers.StringValue{Value: val}, nil return &wrapperspb.StringValue{Value: val}, nil
} }
// FloatValue well-known type support as wrapper around float32 type // FloatValue well-known type support as wrapper around float32 type
func FloatValue(val string) (*wrappers.FloatValue, error) { func FloatValue(val string) (*wrapperspb.FloatValue, error) {
parsedVal, err := Float32(val) parsedVal, err := Float32(val)
return &wrappers.FloatValue{Value: parsedVal}, err return &wrapperspb.FloatValue{Value: parsedVal}, err
} }
// DoubleValue well-known type support as wrapper around float64 type // DoubleValue well-known type support as wrapper around float64 type
func DoubleValue(val string) (*wrappers.DoubleValue, error) { func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
parsedVal, err := Float64(val) parsedVal, err := Float64(val)
return &wrappers.DoubleValue{Value: parsedVal}, err return &wrapperspb.DoubleValue{Value: parsedVal}, err
} }
// BoolValue well-known type support as wrapper around bool type // BoolValue well-known type support as wrapper around bool type
func BoolValue(val string) (*wrappers.BoolValue, error) { func BoolValue(val string) (*wrapperspb.BoolValue, error) {
parsedVal, err := Bool(val) parsedVal, err := Bool(val)
return &wrappers.BoolValue{Value: parsedVal}, err return &wrapperspb.BoolValue{Value: parsedVal}, err
} }
// Int32Value well-known type support as wrapper around int32 type // Int32Value well-known type support as wrapper around int32 type
func Int32Value(val string) (*wrappers.Int32Value, error) { func Int32Value(val string) (*wrapperspb.Int32Value, error) {
parsedVal, err := Int32(val) parsedVal, err := Int32(val)
return &wrappers.Int32Value{Value: parsedVal}, err return &wrapperspb.Int32Value{Value: parsedVal}, err
} }
// UInt32Value well-known type support as wrapper around uint32 type // UInt32Value well-known type support as wrapper around uint32 type
func UInt32Value(val string) (*wrappers.UInt32Value, error) { func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
parsedVal, err := Uint32(val) parsedVal, err := Uint32(val)
return &wrappers.UInt32Value{Value: parsedVal}, err return &wrapperspb.UInt32Value{Value: parsedVal}, err
} }
// Int64Value well-known type support as wrapper around int64 type // Int64Value well-known type support as wrapper around int64 type
func Int64Value(val string) (*wrappers.Int64Value, error) { func Int64Value(val string) (*wrapperspb.Int64Value, error) {
parsedVal, err := Int64(val) parsedVal, err := Int64(val)
return &wrappers.Int64Value{Value: parsedVal}, err return &wrapperspb.Int64Value{Value: parsedVal}, err
} }
// UInt64Value well-known type support as wrapper around uint64 type // UInt64Value well-known type support as wrapper around uint64 type
func UInt64Value(val string) (*wrappers.UInt64Value, error) { func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
parsedVal, err := Uint64(val) parsedVal, err := Uint64(val)
return &wrappers.UInt64Value{Value: parsedVal}, err return &wrapperspb.UInt64Value{Value: parsedVal}, err
} }
// BytesValue well-known type support as wrapper around bytes[] type // BytesValue well-known type support as wrapper around bytes[] type
func BytesValue(val string) (*wrappers.BytesValue, error) { func BytesValue(val string) (*wrapperspb.BytesValue, error) {
parsedVal, err := Bytes(val) parsedVal, err := Bytes(val)
return &wrappers.BytesValue{Value: parsedVal}, err return &wrapperspb.BytesValue{Value: parsedVal}, err
} }

View File

@ -0,0 +1,180 @@
package runtime
import (
"context"
"errors"
"io"
"net/http"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
// ErrorHandlerFunc is the signature used to configure error handling.
type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
// StreamErrorHandlerFunc is the signature used to configure stream error handling.
type StreamErrorHandlerFunc func(context.Context, error) *status.Status
// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors.
type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int)
// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error
// passed to the DefaultRoutingErrorHandler.
type HTTPStatusError struct {
HTTPStatus int
Err error
}
func (e *HTTPStatusError) Error() string {
return e.Err.Error()
}
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
func HTTPStatusFromCode(code codes.Code) int {
switch code {
case codes.OK:
return http.StatusOK
case codes.Canceled:
return http.StatusRequestTimeout
case codes.Unknown:
return http.StatusInternalServerError
case codes.InvalidArgument:
return http.StatusBadRequest
case codes.DeadlineExceeded:
return http.StatusGatewayTimeout
case codes.NotFound:
return http.StatusNotFound
case codes.AlreadyExists:
return http.StatusConflict
case codes.PermissionDenied:
return http.StatusForbidden
case codes.Unauthenticated:
return http.StatusUnauthorized
case codes.ResourceExhausted:
return http.StatusTooManyRequests
case codes.FailedPrecondition:
// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
return http.StatusBadRequest
case codes.Aborted:
return http.StatusConflict
case codes.OutOfRange:
return http.StatusBadRequest
case codes.Unimplemented:
return http.StatusNotImplemented
case codes.Internal:
return http.StatusInternalServerError
case codes.Unavailable:
return http.StatusServiceUnavailable
case codes.DataLoss:
return http.StatusInternalServerError
}
grpclog.Infof("Unknown gRPC error code: %v", code)
return http.StatusInternalServerError
}
// HTTPError uses the mux-configured error handler.
func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
mux.errorHandler(ctx, mux, marshaler, w, r, err)
}
// DefaultHTTPErrorHandler is the default error handler.
// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler
// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode
// are insufficient for.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body written by this function is a Status message marshaled by the Marshaler.
func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
// return Internal when Marshal failed
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
var customStatus *HTTPStatusError
if errors.As(err, &customStatus) {
err = customStatus.Err
}
s := status.Convert(err)
pb := s.Proto()
w.Header().Del("Trailer")
w.Header().Del("Transfer-Encoding")
contentType := marshaler.ContentType(pb)
w.Header().Set("Content-Type", contentType)
if s.Code() == codes.Unauthenticated {
w.Header().Set("WWW-Authenticate", s.Message())
}
buf, merr := marshaler.Marshal(pb)
if merr != nil {
grpclog.Infof("Failed to marshal error message %q: %v", s, merr)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallback); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
// Unless the request includes a TE header field indicating "trailers"
// is acceptable, as described in Section 4.3, a server SHOULD NOT
// generate trailer fields that it believes are necessary for the user
// agent to receive.
doForwardTrailers := requestAcceptsTrailers(r)
if doForwardTrailers {
handleForwardResponseTrailerHeader(w, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
st := HTTPStatusFromCode(s.Code())
if customStatus != nil {
st = customStatus.HTTPStatus
}
w.WriteHeader(st)
if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
if doForwardTrailers {
handleForwardResponseTrailer(w, md)
}
}
func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
return status.Convert(err)
}
// DefaultRoutingErrorHandler is our default handler for routing errors.
// By default http error codes mapped on the following error codes:
// NotFound -> grpc.NotFound
// StatusBadRequest -> grpc.InvalidArgument
// MethodNotAllowed -> grpc.Unimplemented
// Other -> grpc.Internal, method is not expecting to be called for anything else
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
sterr := status.Error(codes.Internal, "Unexpected routing error")
switch httpStatus {
case http.StatusBadRequest:
sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus))
case http.StatusMethodNotAllowed:
sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus))
case http.StatusNotFound:
sterr = status.Error(codes.NotFound, http.StatusText(httpStatus))
}
mux.errorHandler(ctx, mux, marshaler, w, r, sterr)
}

View File

@ -0,0 +1,165 @@
package runtime
import (
"encoding/json"
"fmt"
"io"
"sort"
"google.golang.org/genproto/protobuf/field_mask"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
)
func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
fd := fields.ByName(protoreflect.Name(name))
if fd != nil {
return fd
}
return fields.ByJSONName(name)
}
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) {
fm := &field_mask.FieldMask{}
var root interface{}
if err := json.NewDecoder(r).Decode(&root); err != nil {
if err == io.EOF {
return fm, nil
}
return nil, err
}
queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}}
for len(queue) > 0 {
// dequeue an item
item := queue[0]
queue = queue[1:]
m, ok := item.node.(map[string]interface{})
switch {
case ok:
// if the item is an object, then enqueue all of its children
for k, v := range m {
if item.msg == nil {
return nil, fmt.Errorf("JSON structure did not match request type")
}
fd := getFieldByName(item.msg.Descriptor().Fields(), k)
if fd == nil {
return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName())
}
if isDynamicProtoMessage(fd.Message()) {
for _, p := range buildPathsBlindly(k, v) {
newPath := p
if item.path != "" {
newPath = item.path + "." + newPath
}
queue = append(queue, fieldMaskPathItem{path: newPath})
}
continue
}
if isProtobufAnyMessage(fd.Message()) {
_, hasTypeField := v.(map[string]interface{})["@type"]
if hasTypeField {
queue = append(queue, fieldMaskPathItem{path: k})
continue
} else {
return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName())
}
}
child := fieldMaskPathItem{
node: v,
}
if item.path == "" {
child.path = string(fd.FullName().Name())
} else {
child.path = item.path + "." + string(fd.FullName().Name())
}
switch {
case fd.IsList(), fd.IsMap():
// As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86
// Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop.
fm.Paths = append(fm.Paths, child.path)
case fd.Message() != nil:
child.msg = item.msg.Get(fd).Message()
fallthrough
default:
queue = append(queue, child)
}
}
case len(item.path) > 0:
// otherwise, it's a leaf node so print its path
fm.Paths = append(fm.Paths, item.path)
}
}
// Sort for deterministic output in the presence
// of repeated fields.
sort.Strings(fm.Paths)
return fm, nil
}
func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool {
return md != nil && (md.FullName() == "google.protobuf.Any")
}
func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool {
return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value")
}
// buildPathsBlindly does not attempt to match proto field names to the
// json value keys. Instead it relies completely on the structure of
// the unmarshalled json contained within in.
// Returns a slice containing all subpaths with the root at the
// passed in name and json value.
func buildPathsBlindly(name string, in interface{}) []string {
m, ok := in.(map[string]interface{})
if !ok {
return []string{name}
}
var paths []string
queue := []fieldMaskPathItem{{path: name, node: m}}
for len(queue) > 0 {
cur := queue[0]
queue = queue[1:]
m, ok := cur.node.(map[string]interface{})
if !ok {
// This should never happen since we should always check that we only add
// nodes of type map[string]interface{} to the queue.
continue
}
for k, v := range m {
if mi, ok := v.(map[string]interface{}); ok {
queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi})
} else {
// This is not a struct, so there are no more levels to descend.
curPath := cur.path + "." + k
paths = append(paths, curPath)
}
}
}
return paths
}
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
type fieldMaskPathItem struct {
// the list of prior fields leading up to node connected by dots
path string
// a generic decoded json object the current item to inspect for further path extraction
node interface{}
// parent message
msg protoreflect.Message
}

View File

@ -2,19 +2,19 @@ package runtime
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"net/textproto" "net/textproto"
"strings"
"github.com/golang/protobuf/proto" "google.golang.org/genproto/googleapis/api/httpbody"
"github.com/grpc-ecosystem/grpc-gateway/internal" "google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
) )
var errEmptyResponse = errors.New("empty response")
// ForwardResponseStream forwards the stream from gRPC server to REST client. // ForwardResponseStream forwards the stream from gRPC server to REST client.
func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
f, ok := w.(http.Flusher) f, ok := w.(http.Flusher)
@ -33,7 +33,6 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
handleForwardResponseServerMetadata(w, mux, md) handleForwardResponseServerMetadata(w, mux, md)
w.Header().Set("Transfer-Encoding", "chunked") w.Header().Set("Transfer-Encoding", "chunked")
w.Header().Set("Content-Type", marshaler.ContentType())
if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
HTTPError(ctx, mux, marshaler, w, req, err) HTTPError(ctx, mux, marshaler, w, req, err)
return return
@ -61,10 +60,17 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
return return
} }
if !wroteHeader {
w.Header().Set("Content-Type", marshaler.ContentType(resp))
}
var buf []byte var buf []byte
httpBody, isHTTPBody := resp.(*httpbody.HttpBody)
switch { switch {
case resp == nil: case resp == nil:
buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse))) buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response")))
case isHTTPBody:
buf = httpBody.GetData()
default: default:
result := map[string]interface{}{"result": resp} result := map[string]interface{}{"result": resp}
if rb, ok := resp.(responseBody); ok { if rb, ok := resp.(responseBody); ok {
@ -132,15 +138,22 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
} }
handleForwardResponseServerMetadata(w, mux, md) handleForwardResponseServerMetadata(w, mux, md)
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
// Unless the request includes a TE header field indicating "trailers"
// is acceptable, as described in Section 4.3, a server SHOULD NOT
// generate trailer fields that it believes are necessary for the user
// agent to receive.
doForwardTrailers := requestAcceptsTrailers(req)
if doForwardTrailers {
handleForwardResponseTrailerHeader(w, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
handleForwardResponseTrailerHeader(w, md) handleForwardResponseTrailerHeader(w, md)
contentType := marshaler.ContentType() contentType := marshaler.ContentType(resp)
// Check marshaler on run time in order to keep backwards compatibility
// An interface param needs to be added to the ContentType() function on
// the Marshal interface to be able to remove this check
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
contentType = typeMarshaler.ContentTypeFromMessage(resp)
}
w.Header().Set("Content-Type", contentType) w.Header().Set("Content-Type", contentType)
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
@ -164,8 +177,15 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
grpclog.Infof("Failed to write response: %v", err) grpclog.Infof("Failed to write response: %v", err)
} }
if doForwardTrailers {
handleForwardResponseTrailer(w, md) handleForwardResponseTrailer(w, md)
} }
}
func requestAcceptsTrailers(req *http.Request) bool {
te := req.Header.Get("TE")
return strings.Contains(strings.ToLower(te), "trailers")
}
func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
if len(opts) == 0 { if len(opts) == 0 {
@ -181,11 +201,13 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re
} }
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
serr := streamError(ctx, mux.streamErrorHandler, err) st := mux.streamErrorHandler(ctx, err)
msg := errorChunk(st)
if !wroteHeader { if !wroteHeader {
w.WriteHeader(int(serr.HttpCode)) w.Header().Set("Content-Type", marshaler.ContentType(msg))
w.WriteHeader(HTTPStatusFromCode(st.Code()))
} }
buf, merr := marshaler.Marshal(errorChunk(serr)) buf, merr := marshaler.Marshal(msg)
if merr != nil { if merr != nil {
grpclog.Infof("Failed to marshal an error: %v", merr) grpclog.Infof("Failed to marshal an error: %v", merr)
return return
@ -196,17 +218,6 @@ func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, mar
} }
} }
// streamError returns the payload for the final message in a response stream func errorChunk(st *status.Status) map[string]proto.Message {
// that represents the given err. return map[string]proto.Message{"error": st.Proto()}
func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError {
serr := errHandler(ctx, err)
if serr != nil {
return serr
}
// TODO: log about misbehaving stream error handler?
return DefaultHTTPStreamErrorHandler(ctx, err)
}
func errorChunk(err *StreamError) map[string]proto.Message {
return map[string]proto.Message{"error": (*internal.StreamError)(err)}
} }

View File

@ -4,13 +4,6 @@ import (
"google.golang.org/genproto/googleapis/api/httpbody" "google.golang.org/genproto/googleapis/api/httpbody"
) )
// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
func SetHTTPBodyMarshaler(serveMux *ServeMux) {
serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
Marshaler: &JSONPb{OrigName: true},
}
}
// HTTPBodyMarshaler is a Marshaler which supports marshaling of a // HTTPBodyMarshaler is a Marshaler which supports marshaling of a
// google.api.HttpBody message as the full response body if it is // google.api.HttpBody message as the full response body if it is
// the actual message used as the response. If not, then this will // the actual message used as the response. If not, then this will
@ -19,18 +12,14 @@ type HTTPBodyMarshaler struct {
Marshaler Marshaler
} }
// ContentType implementation to keep backwards compatibility with marshal interface // ContentType returns its specified content type in case v is a
func (h *HTTPBodyMarshaler) ContentType() string { // google.api.HttpBody message, otherwise it will fall back to the default Marshalers
return h.ContentTypeFromMessage(nil) // content type.
} func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
// its specified content type otherwise fall back to the default Marshaler.
func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
if httpBody, ok := v.(*httpbody.HttpBody); ok { if httpBody, ok := v.(*httpbody.HttpBody); ok {
return httpBody.GetContentType() return httpBody.GetContentType()
} }
return h.Marshaler.ContentType() return h.Marshaler.ContentType(v)
} }
// Marshal marshals "v" by returning the body bytes if v is a // Marshal marshals "v" by returning the body bytes if v is a

View File

@ -15,7 +15,7 @@ import (
type JSONBuiltin struct{} type JSONBuiltin struct{}
// ContentType always Returns "application/json". // ContentType always Returns "application/json".
func (*JSONBuiltin) ContentType() string { func (*JSONBuiltin) ContentType(_ interface{}) string {
return "application/json" return "application/json"
} }

View File

@ -6,21 +6,25 @@ import (
"fmt" "fmt"
"io" "io"
"reflect" "reflect"
"strconv"
"github.com/golang/protobuf/jsonpb" "google.golang.org/protobuf/encoding/protojson"
"github.com/golang/protobuf/proto" "google.golang.org/protobuf/proto"
) )
// JSONPb is a Marshaler which marshals/unmarshals into/from JSON // JSONPb is a Marshaler which marshals/unmarshals into/from JSON
// with the "github.com/golang/protobuf/jsonpb". // with the "google.golang.org/protobuf/encoding/protojson" marshaler.
// It supports fully functionality of protobuf unlike JSONBuiltin. // It supports the full functionality of protobuf unlike JSONBuiltin.
// //
// The NewDecoder method returns a DecoderWrapper, so the underlying // The NewDecoder method returns a DecoderWrapper, so the underlying
// *json.Decoder methods can be used. // *json.Decoder methods can be used.
type JSONPb jsonpb.Marshaler type JSONPb struct {
protojson.MarshalOptions
protojson.UnmarshalOptions
}
// ContentType always returns "application/json". // ContentType always returns "application/json".
func (*JSONPb) ContentType() string { func (*JSONPb) ContentType(_ interface{}) string {
return "application/json" return "application/json"
} }
@ -47,7 +51,13 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
_, err = w.Write(buf) _, err = w.Write(buf)
return err return err
} }
return (*jsonpb.Marshaler)(j).Marshal(w, p) b, err := j.MarshalOptions.Marshal(p)
if err != nil {
return err
}
_, err = w.Write(b)
return err
} }
var ( var (
@ -56,8 +66,8 @@ var (
) )
// marshalNonProto marshals a non-message field of a protobuf message. // marshalNonProto marshals a non-message field of a protobuf message.
// This function does not correctly marshals arbitrary data structure into JSON, // This function does not correctly marshal arbitrary data structures into JSON,
// but it is only capable of marshaling non-message field values of protobuf, // it is only capable of marshaling non-message field values of protobuf,
// i.e. primitive types, enums; pointers to primitives or enums; maps from // i.e. primitive types, enums; pointers to primitives or enums; maps from
// integer/string types to primitives/enums/pointers to messages. // integer/string types to primitives/enums/pointers to messages.
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
@ -74,7 +84,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
if rv.Kind() == reflect.Slice { if rv.Kind() == reflect.Slice {
if rv.IsNil() { if rv.IsNil() {
if j.EmitDefaults { if j.EmitUnpopulated {
return []byte("[]"), nil return []byte("[]"), nil
} }
return []byte("null"), nil return []byte("null"), nil
@ -93,7 +103,37 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
return nil, err return nil, err
} }
} }
if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
return nil, err
}
}
err = buf.WriteByte(']')
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
if rv.Type().Elem().Implements(typeProtoEnum) {
var buf bytes.Buffer
err := buf.WriteByte('[')
if err != nil {
return nil, err
}
for i := 0; i < rv.Len(); i++ {
if i != 0 {
err = buf.WriteByte(',')
if err != nil {
return nil, err
}
}
if j.UseEnumNumbers {
_, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
} else {
_, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"")
}
if err != nil {
return nil, err return nil, err
} }
} }
@ -120,7 +160,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
} }
return json.Marshal(m) return json.Marshal(m)
} }
if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers {
return json.Marshal(enum.String()) return json.Marshal(enum.String())
} }
return json.Marshal(rv.Interface()) return json.Marshal(rv.Interface())
@ -128,25 +168,29 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
// Unmarshal unmarshals JSON "data" into "v" // Unmarshal unmarshals JSON "data" into "v"
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
return unmarshalJSONPb(data, v) return unmarshalJSONPb(data, j.UnmarshalOptions, v)
} }
// NewDecoder returns a Decoder which reads JSON stream from "r". // NewDecoder returns a Decoder which reads JSON stream from "r".
func (j *JSONPb) NewDecoder(r io.Reader) Decoder { func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
d := json.NewDecoder(r) d := json.NewDecoder(r)
return DecoderWrapper{Decoder: d} return DecoderWrapper{
Decoder: d,
UnmarshalOptions: j.UnmarshalOptions,
}
} }
// DecoderWrapper is a wrapper around a *json.Decoder that adds // DecoderWrapper is a wrapper around a *json.Decoder that adds
// support for protos to the Decode method. // support for protos to the Decode method.
type DecoderWrapper struct { type DecoderWrapper struct {
*json.Decoder *json.Decoder
protojson.UnmarshalOptions
} }
// Decode wraps the embedded decoder's Decode method to support // Decode wraps the embedded decoder's Decode method to support
// protos using a jsonpb.Unmarshaler. // protos using a jsonpb.Unmarshaler.
func (d DecoderWrapper) Decode(v interface{}) error { func (d DecoderWrapper) Decode(v interface{}) error {
return decodeJSONPb(d.Decoder, v) return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v)
} }
// NewEncoder returns an Encoder which writes JSON stream into "w". // NewEncoder returns an Encoder which writes JSON stream into "w".
@ -162,21 +206,28 @@ func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
}) })
} }
func unmarshalJSONPb(data []byte, v interface{}) error { func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
d := json.NewDecoder(bytes.NewReader(data)) d := json.NewDecoder(bytes.NewReader(data))
return decodeJSONPb(d, v) return decodeJSONPb(d, unmarshaler, v)
} }
func decodeJSONPb(d *json.Decoder, v interface{}) error { func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
p, ok := v.(proto.Message) p, ok := v.(proto.Message)
if !ok { if !ok {
return decodeNonProtoField(d, v) return decodeNonProtoField(d, unmarshaler, v)
}
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
return unmarshaler.UnmarshalNext(d, p)
} }
func decodeNonProtoField(d *json.Decoder, v interface{}) error { // Decode into bytes for marshalling
var b json.RawMessage
err := d.Decode(&b)
if err != nil {
return err
}
return unmarshaler.Unmarshal([]byte(b), p)
}
func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr { if rv.Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer", v) return fmt.Errorf("%T is not a pointer", v)
@ -186,8 +237,14 @@ func decodeNonProtoField(d *json.Decoder, v interface{}) error {
rv.Set(reflect.New(rv.Type().Elem())) rv.Set(reflect.New(rv.Type().Elem()))
} }
if rv.Type().ConvertibleTo(typeProtoMessage) { if rv.Type().ConvertibleTo(typeProtoMessage) {
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} // Decode into bytes for marshalling
return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) var b json.RawMessage
err := d.Decode(&b)
if err != nil {
return err
}
return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message))
} }
rv = rv.Elem() rv = rv.Elem()
} }
@ -211,24 +268,45 @@ func decodeNonProtoField(d *json.Decoder, v interface{}) error {
} }
bk := result[0] bk := result[0]
bv := reflect.New(rv.Type().Elem()) bv := reflect.New(rv.Type().Elem())
if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { if v == nil {
null := json.RawMessage("null")
v = &null
}
if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil {
return err return err
} }
rv.SetMapIndex(bk, bv.Elem()) rv.SetMapIndex(bk, bv.Elem())
} }
return nil return nil
} }
if rv.Kind() == reflect.Slice {
var sl []json.RawMessage
if err := d.Decode(&sl); err != nil {
return err
}
if sl != nil {
rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
}
for _, item := range sl {
bv := reflect.New(rv.Type().Elem())
if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil {
return err
}
rv.Set(reflect.Append(rv, bv.Elem()))
}
return nil
}
if _, ok := rv.Interface().(protoEnum); ok { if _, ok := rv.Interface().(protoEnum); ok {
var repr interface{} var repr interface{}
if err := d.Decode(&repr); err != nil { if err := d.Decode(&repr); err != nil {
return err return err
} }
switch repr.(type) { switch v := repr.(type) {
case string: case string:
// TODO(yugui) Should use proto.StructProperties? // TODO(yugui) Should use proto.StructProperties?
return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
case float64: case float64:
rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type()))
return nil return nil
default: default:
return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
@ -242,6 +320,8 @@ type protoEnum interface {
EnumDescriptor() ([]byte, []int) EnumDescriptor() ([]byte, []int)
} }
var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem()
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
// Delimiter for newline encoded JSON streams. // Delimiter for newline encoded JSON streams.
@ -249,14 +329,16 @@ func (j *JSONPb) Delimiter() []byte {
return []byte("\n") return []byte("\n")
} }
// allowUnknownFields helps not to return an error when the destination var (
// is a struct and the input contains object keys which do not match any convFromType = map[reflect.Kind]reflect.Value{
// non-ignored, exported fields in the destination. reflect.String: reflect.ValueOf(String),
var allowUnknownFields = true reflect.Bool: reflect.ValueOf(Bool),
reflect.Float64: reflect.ValueOf(Float64),
// DisallowUnknownFields enables option in decoder (unmarshaller) to reflect.Float32: reflect.ValueOf(Float32),
// return an error when it finds an unknown field. This function must be reflect.Int64: reflect.ValueOf(Int64),
// called before using the JSON marshaller. reflect.Int32: reflect.ValueOf(Int32),
func DisallowUnknownFields() { reflect.Uint64: reflect.ValueOf(Uint64),
allowUnknownFields = false reflect.Uint32: reflect.ValueOf(Uint32),
reflect.Slice: reflect.ValueOf(Bytes),
} }
)

View File

@ -4,15 +4,16 @@ import (
"io" "io"
"errors" "errors"
"github.com/golang/protobuf/proto"
"io/ioutil" "io/ioutil"
"google.golang.org/protobuf/proto"
) )
// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes // ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
type ProtoMarshaller struct{} type ProtoMarshaller struct{}
// ContentType always returns "application/octet-stream". // ContentType always returns "application/octet-stream".
func (*ProtoMarshaller) ContentType() string { func (*ProtoMarshaller) ContentType(_ interface{}) string {
return "application/octet-stream" return "application/octet-stream"
} }

View File

@ -16,14 +16,9 @@ type Marshaler interface {
// NewEncoder returns an Encoder which writes bytes sequence into "w". // NewEncoder returns an Encoder which writes bytes sequence into "w".
NewEncoder(w io.Writer) Encoder NewEncoder(w io.Writer) Encoder
// ContentType returns the Content-Type which this marshaler is responsible for. // ContentType returns the Content-Type which this marshaler is responsible for.
ContentType() string // The parameter describes the type which is being marshalled, which can sometimes
} // affect the content type returned.
ContentType(v interface{}) string
// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called
// to set the Content-Type header on the response
type contentTypeMarshaler interface {
// ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message
ContentTypeFromMessage(v interface{}) string
} }
// Decoder decodes a byte sequence // Decoder decodes a byte sequence

View File

@ -6,6 +6,7 @@ import (
"net/http" "net/http"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/protobuf/encoding/protojson"
) )
// MIMEWildcard is the fallback MIME type used for requests which do not match // MIMEWildcard is the fallback MIME type used for requests which do not match
@ -16,7 +17,16 @@ var (
acceptHeader = http.CanonicalHeaderKey("Accept") acceptHeader = http.CanonicalHeaderKey("Accept")
contentTypeHeader = http.CanonicalHeaderKey("Content-Type") contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
defaultMarshaler = &JSONPb{OrigName: true} defaultMarshaler = &HTTPBodyMarshaler{
Marshaler: &JSONPb{
MarshalOptions: protojson.MarshalOptions{
EmitUnpopulated: true,
},
UnmarshalOptions: protojson.UnmarshalOptions{
DiscardUnknown: true,
},
},
}
) )
// MarshalerForRequest returns the inbound/outbound marshalers for this request. // MarshalerForRequest returns the inbound/outbound marshalers for this request.

View File

@ -2,28 +2,47 @@ package runtime
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"net/textproto" "net/textproto"
"strings" "strings"
"github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// UnescapingMode defines the behavior of ServeMux when unescaping path parameters.
type UnescapingMode int
const (
// UnescapingModeLegacy is the default V2 behavior, which escapes the entire
// path string before doing any routing.
UnescapingModeLegacy UnescapingMode = iota
// EscapingTypeExceptReserved unescapes all path parameters except RFC 6570
// reserved characters.
UnescapingModeAllExceptReserved
// EscapingTypeExceptSlash unescapes URL path parameters except path
// seperators, which will be left as "%2F".
UnescapingModeAllExceptSlash
// URL path parameters will be fully decoded.
UnescapingModeAllCharacters
// UnescapingModeDefault is the default escaping type.
// TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's
// reference implementation
UnescapingModeDefault = UnescapingModeLegacy
) )
// A HandlerFunc handles a specific pair of path pattern and HTTP method. // A HandlerFunc handles a specific pair of path pattern and HTTP method.
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when
// a request is received with a URI path that does not match any registered
// service method.
//
// Since gRPC servers return an "Unimplemented" code for requests with an
// unrecognized URI path, this error also has a gRPC "Unimplemented" code.
var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
// ServeMux is a request multiplexer for grpc-gateway. // ServeMux is a request multiplexer for grpc-gateway.
// It matches http requests to patterns and invokes the corresponding handler. // It matches http requests to patterns and invokes the corresponding handler.
type ServeMux struct { type ServeMux struct {
@ -34,10 +53,11 @@ type ServeMux struct {
incomingHeaderMatcher HeaderMatcherFunc incomingHeaderMatcher HeaderMatcherFunc
outgoingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc
metadataAnnotators []func(context.Context, *http.Request) metadata.MD metadataAnnotators []func(context.Context, *http.Request) metadata.MD
errorHandler ErrorHandlerFunc
streamErrorHandler StreamErrorHandlerFunc streamErrorHandler StreamErrorHandlerFunc
protoErrorHandler ProtoErrorHandlerFunc routingErrorHandler RoutingErrorHandlerFunc
disablePathLengthFallback bool disablePathLengthFallback bool
lastMatchWins bool unescapingMode UnescapingMode
} }
// ServeMuxOption is an option that can be given to a ServeMux on construction. // ServeMuxOption is an option that can be given to a ServeMux on construction.
@ -55,8 +75,16 @@ func WithForwardResponseOption(forwardResponseOption func(context.Context, http.
} }
} }
// WithEscapingType sets the escaping type. See the definitions of UnescapingMode
// for more information.
func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.unescapingMode = mode
}
}
// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
// Configuring this will mean the generated swagger output is no longer correct, and it should be // Configuring this will mean the generated OpenAPI output is no longer correct, and it should be
// done with careful consideration. // done with careful consideration.
func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
return func(serveMux *ServeMux) { return func(serveMux *ServeMux) {
@ -111,14 +139,36 @@ func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) Se
} }
} }
// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler. // WithErrorHandler returns a ServeMuxOption for configuring a custom error handler.
// //
// This can be used to handle an error as general proto message defined by gRPC. // This can be used to configure a custom error response.
// When this option is used, the mux uses the configured error handler instead of HTTPError and func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption {
// OtherErrorHandler.
func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
return func(serveMux *ServeMux) { return func(serveMux *ServeMux) {
serveMux.protoErrorHandler = fn serveMux.errorHandler = fn
}
}
// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
// error handler, which allows for customizing the error trailer for server-streaming
// calls.
//
// For stream errors that occur before any response has been written, the mux's
// ErrorHandler will be invoked. However, once data has been written, the errors must
// be handled differently: they must be included in the response body. The response body's
// final message will include the error details returned by the stream error handler.
func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.streamErrorHandler = fn
}
}
// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors.
//
// Method called for errors which can happen before gRPC route selected or executed.
// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.routingErrorHandler = fn
} }
} }
@ -129,36 +179,16 @@ func WithDisablePathLengthFallback() ServeMuxOption {
} }
} }
// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
// error handler, which allows for customizing the error trailer for server-streaming
// calls.
//
// For stream errors that occur before any response has been written, the mux's
// ProtoErrorHandler will be invoked. However, once data has been written, the errors must
// be handled differently: they must be included in the response body. The response body's
// final message will include the error details returned by the stream error handler.
func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.streamErrorHandler = fn
}
}
// WithLastMatchWins returns a ServeMuxOption that will enable "last
// match wins" behavior, where if multiple path patterns match a
// request path, the last one defined in the .proto file will be used.
func WithLastMatchWins() ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.lastMatchWins = true
}
}
// NewServeMux returns a new ServeMux whose internal mapping is empty. // NewServeMux returns a new ServeMux whose internal mapping is empty.
func NewServeMux(opts ...ServeMuxOption) *ServeMux { func NewServeMux(opts ...ServeMuxOption) *ServeMux {
serveMux := &ServeMux{ serveMux := &ServeMux{
handlers: make(map[string][]handler), handlers: make(map[string][]handler),
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
marshalers: makeMarshalerMIMERegistry(), marshalers: makeMarshalerMIMERegistry(),
streamErrorHandler: DefaultHTTPStreamErrorHandler, errorHandler: DefaultHTTPErrorHandler,
streamErrorHandler: DefaultStreamErrorHandler,
routingErrorHandler: DefaultRoutingErrorHandler,
unescapingMode: UnescapingModeDefault,
} }
for _, opt := range opts { for _, opt := range opts {
@ -180,11 +210,23 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux {
// Handle associates "h" to the pair of HTTP method and path pattern. // Handle associates "h" to the pair of HTTP method and path pattern.
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
if s.lastMatchWins { s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...)
s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...)
} else {
s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
} }
// HandlePath allows users to configure custom path handlers.
// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/
func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error {
compiler, err := httprule.Parse(pathPattern)
if err != nil {
return fmt.Errorf("parsing path pattern: %w", err)
}
tp := compiler.Compile()
pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb)
if err != nil {
return fmt.Errorf("creating new pattern: %w", err)
}
s.Handle(meth, pattern, h)
return nil
} }
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. // ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
@ -193,48 +235,66 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path path := r.URL.Path
if !strings.HasPrefix(path, "/") { if !strings.HasPrefix(path, "/") {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r) _, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest)
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
}
return return
} }
// TODO(v3): remove UnescapingModeLegacy
if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" {
path = r.URL.RawPath
}
components := strings.Split(path[1:], "/") components := strings.Split(path[1:], "/")
l := len(components)
var verb string
if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
return
} else if idx > 0 {
c := components[l-1]
components[l-1], verb = c[:idx], c[idx+1:]
}
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
r.Method = strings.ToUpper(override) r.Method = strings.ToUpper(override)
if err := r.ParseForm(); err != nil { if err := r.ParseForm(); err != nil {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r) _, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error()) sterr := status.Error(codes.InvalidArgument, err.Error())
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
} else {
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
}
return return
} }
} }
// Verb out here is to memoize for the fallback case below
var verb string
for _, h := range s.handlers[r.Method] { for _, h := range s.handlers[r.Method] {
pathParams, err := h.pat.Match(components, verb) // If the pattern has a verb, explicitly look for a suffix in the last
// component that matches a colon plus the verb. This allows us to
// handle some cases that otherwise can't be correctly handled by the
// former LastIndex case, such as when the verb literal itself contains
// a colon. This should work for all cases that have run through the
// parser because we know what verb we're looking for, however, there
// are still some cases that the parser itself cannot disambiguate. See
// the comment there if interested.
patVerb := h.pat.Verb()
l := len(components)
lastComponent := components[l-1]
var idx int = -1
if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) {
idx = len(lastComponent) - len(patVerb) - 1
}
if idx == 0 {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
return
}
if idx > 0 {
components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:]
}
pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
if err != nil { if err != nil {
var mse MalformedSequenceError
if ok := errors.As(err, &mse); ok {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
HTTPStatus: http.StatusBadRequest,
Err: mse,
})
}
continue continue
} }
h.h(w, r, pathParams) h.h(w, r, pathParams)
@ -242,47 +302,43 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
// lookup other methods to handle fallback from GET to POST and // lookup other methods to handle fallback from GET to POST and
// to determine if it is MethodNotAllowed or NotFound. // to determine if it is NotImplemented or NotFound.
for m, handlers := range s.handlers { for m, handlers := range s.handlers {
if m == r.Method { if m == r.Method {
continue continue
} }
for _, h := range handlers { for _, h := range handlers {
pathParams, err := h.pat.Match(components, verb) pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
if err != nil { if err != nil {
var mse MalformedSequenceError
if ok := errors.As(err, &mse); ok {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
HTTPStatus: http.StatusBadRequest,
Err: mse,
})
}
continue continue
} }
// X-HTTP-Method-Override is optional. Always allow fallback to POST. // X-HTTP-Method-Override is optional. Always allow fallback to POST.
if s.isPathLengthFallback(r) { if s.isPathLengthFallback(r) {
if err := r.ParseForm(); err != nil { if err := r.ParseForm(); err != nil {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r) _, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error()) sterr := status.Error(codes.InvalidArgument, err.Error())
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
} else {
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
}
return return
} }
h.h(w, r, pathParams) h.h(w, r, pathParams)
return return
} }
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r) _, outboundMarshaler := MarshalerForRequest(s, r)
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
}
return return
} }
} }
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r) _, outboundMarshaler := MarshalerForRequest(s, r)
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
} }
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. // GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.

View File

@ -3,9 +3,10 @@ package runtime
import ( import (
"errors" "errors"
"fmt" "fmt"
"strconv"
"strings" "strings"
"github.com/grpc-ecosystem/grpc-gateway/utilities" "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
) )
@ -14,14 +15,23 @@ var (
ErrNotMatch = errors.New("not match to the path pattern") ErrNotMatch = errors.New("not match to the path pattern")
// ErrInvalidPattern indicates that the given definition of Pattern is not valid. // ErrInvalidPattern indicates that the given definition of Pattern is not valid.
ErrInvalidPattern = errors.New("invalid pattern") ErrInvalidPattern = errors.New("invalid pattern")
// ErrMalformedSequence indicates that an escape sequence was malformed.
ErrMalformedSequence = errors.New("malformed escape sequence")
) )
type MalformedSequenceError string
func (e MalformedSequenceError) Error() string {
return "malformed path escape " + strconv.Quote(string(e))
}
type op struct { type op struct {
code utilities.OpCode code utilities.OpCode
operand int operand int
} }
// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. // Pattern is a template pattern of http request paths defined in
// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
type Pattern struct { type Pattern struct {
// ops is a list of operations // ops is a list of operations
ops []op ops []op
@ -35,31 +45,14 @@ type Pattern struct {
tailLen int tailLen int
// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
verb string verb string
// assumeColonVerb indicates whether a path suffix after a final
// colon may only be interpreted as a verb.
assumeColonVerb bool
} }
type patternOptions struct {
assumeColonVerb bool
}
// PatternOpt is an option for creating Patterns.
type PatternOpt func(*patternOptions)
// NewPattern returns a new Pattern from the given definition values. // NewPattern returns a new Pattern from the given definition values.
// "ops" is a sequence of op codes. "pool" is a constant pool. // "ops" is a sequence of op codes. "pool" is a constant pool.
// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. // "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
// "version" must be 1 for now. // "version" must be 1 for now.
// It returns an error if the given definition is invalid. // It returns an error if the given definition is invalid.
func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) { func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
options := patternOptions{
assumeColonVerb: true,
}
for _, o := range opts {
o(&options)
}
if version != 1 { if version != 1 {
grpclog.Infof("unsupported version: %d", version) grpclog.Infof("unsupported version: %d", version)
return Pattern{}, ErrInvalidPattern return Pattern{}, ErrInvalidPattern
@ -111,7 +104,7 @@ func NewPattern(version int, ops []int, pool []string, verb string, opts ...Patt
} }
stack -= op.operand stack -= op.operand
if stack < 0 { if stack < 0 {
grpclog.Print("stack underflow") grpclog.Info("stack underflow")
return Pattern{}, ErrInvalidPattern return Pattern{}, ErrInvalidPattern
} }
stack++ stack++
@ -145,7 +138,6 @@ func NewPattern(version int, ops []int, pool []string, verb string, opts ...Patt
stacksize: maxstack, stacksize: maxstack,
tailLen: tailLen, tailLen: tailLen,
verb: verb, verb: verb,
assumeColonVerb: options.assumeColonVerb,
}, nil }, nil
} }
@ -157,12 +149,13 @@ func MustPattern(p Pattern, err error) Pattern {
return p return p
} }
// Match examines components if it matches to the Pattern. // MatchAndEscape examines components to determine if they match to a Pattern.
// If it matches, the function returns a mapping from field paths to their captured values. // MatchAndEscape will return an error if no Patterns matched or if a pattern
// If otherwise, the function returns an error. // matched but contained malformed escape sequences. If successful, the function
func (p Pattern) Match(components []string, verb string) (map[string]string, error) { // returns a mapping from field paths to their captured values.
func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) {
if p.verb != verb { if p.verb != verb {
if p.assumeColonVerb || p.verb != "" { if p.verb != "" {
return nil, ErrNotMatch return nil, ErrNotMatch
} }
if len(components) == 0 { if len(components) == 0 {
@ -171,7 +164,6 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err
components = append([]string{}, components...) components = append([]string{}, components...)
components[len(components)-1] += ":" + verb components[len(components)-1] += ":" + verb
} }
verb = ""
} }
var pos int var pos int
@ -179,6 +171,8 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err
captured := make([]string, len(p.vars)) captured := make([]string, len(p.vars))
l := len(components) l := len(components)
for _, op := range p.ops { for _, op := range p.ops {
var err error
switch op.code { switch op.code {
case utilities.OpNop: case utilities.OpNop:
continue continue
@ -191,6 +185,10 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err
if lit := p.pool[op.operand]; c != lit { if lit := p.pool[op.operand]; c != lit {
return nil, ErrNotMatch return nil, ErrNotMatch
} }
} else if op.code == utilities.OpPush {
if c, err = unescape(c, unescapingMode, false); err != nil {
return nil, err
}
} }
stack = append(stack, c) stack = append(stack, c)
pos++ pos++
@ -200,7 +198,11 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err
return nil, ErrNotMatch return nil, ErrNotMatch
} }
end -= p.tailLen end -= p.tailLen
stack = append(stack, strings.Join(components[pos:end], "/")) c := strings.Join(components[pos:end], "/")
if c, err = unescape(c, unescapingMode, true); err != nil {
return nil, err
}
stack = append(stack, c)
pos = end pos = end
case utilities.OpConcatN: case utilities.OpConcatN:
n := op.operand n := op.operand
@ -222,6 +224,16 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err
return bindings, nil return bindings, nil
} }
// MatchAndEscape examines components to determine if they match to a Pattern.
// It will never perform per-component unescaping (see: UnescapingModeLegacy).
// MatchAndEscape will return an error if no Patterns matched. If successful,
// the function returns a mapping from field paths to their captured values.
//
// Deprecated: Use MatchAndEscape.
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
return p.MatchAndEscape(components, verb, UnescapingModeDefault)
}
// Verb returns the verb part of the Pattern. // Verb returns the verb part of the Pattern.
func (p Pattern) Verb() string { return p.verb } func (p Pattern) Verb() string { return p.verb }
@ -253,10 +265,119 @@ func (p Pattern) String() string {
return "/" + segs return "/" + segs
} }
// AssumeColonVerbOpt indicates whether a path suffix after a final /*
// colon may only be interpreted as a verb. * The following code is adopted and modified from Go's standard library
func AssumeColonVerbOpt(val bool) PatternOpt { * and carries the attached license.
return PatternOpt(func(o *patternOptions) { *
o.assumeColonVerb = val * Copyright 2009 The Go Authors. All rights reserved.
}) * Use of this source code is governed by a BSD-style
* license that can be found in the LICENSE file.
*/
// ishex returns whether or not the given byte is a valid hex character
func ishex(c byte) bool {
switch {
case '0' <= c && c <= '9':
return true
case 'a' <= c && c <= 'f':
return true
case 'A' <= c && c <= 'F':
return true
}
return false
}
func isRFC6570Reserved(c byte) bool {
switch c {
case '!', '#', '$', '&', '\'', '(', ')', '*',
'+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
return true
default:
return false
}
}
// unhex converts a hex point to the bit representation
func unhex(c byte) byte {
switch {
case '0' <= c && c <= '9':
return c - '0'
case 'a' <= c && c <= 'f':
return c - 'a' + 10
case 'A' <= c && c <= 'F':
return c - 'A' + 10
}
return 0
}
// shouldUnescapeWithMode returns true if the character is escapable with the
// given mode
func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool {
switch mode {
case UnescapingModeAllExceptReserved:
if isRFC6570Reserved(c) {
return false
}
case UnescapingModeAllExceptSlash:
if c == '/' {
return false
}
case UnescapingModeAllCharacters:
return true
}
return true
}
// unescape unescapes a path string using the provided mode
func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) {
// TODO(v3): remove UnescapingModeLegacy
if mode == UnescapingModeLegacy {
return s, nil
}
if !multisegment {
mode = UnescapingModeAllCharacters
}
// Count %, check that they're well-formed.
n := 0
for i := 0; i < len(s); {
if s[i] == '%' {
n++
if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
s = s[i:]
if len(s) > 3 {
s = s[:3]
}
return "", MalformedSequenceError(s)
}
i += 3
} else {
i++
}
}
if n == 0 {
return s, nil
}
var t strings.Builder
t.Grow(len(s))
for i := 0; i < len(s); i++ {
switch s[i] {
case '%':
c := unhex(s[i+1])<<4 | unhex(s[i+2])
if shouldUnescapeWithMode(c, mode) {
t.WriteByte(c)
i += 2
continue
}
fallthrough
default:
t.WriteByte(s[i])
}
}
return t.String(), nil
} }

View File

@ -1,7 +1,7 @@
package runtime package runtime
import ( import (
"github.com/golang/protobuf/proto" "google.golang.org/protobuf/proto"
) )
// StringP returns a pointer to a string whose pointee is same as the given string value. // StringP returns a pointer to a string whose pointee is same as the given string value.

View File

@ -0,0 +1,329 @@
package runtime
import (
"encoding/base64"
"errors"
"fmt"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/genproto/protobuf/field_mask"
"google.golang.org/grpc/grpclog"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
"google.golang.org/protobuf/types/known/wrapperspb"
)
var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
// QueryParameterParser defines interface for all query parameter parsers
type QueryParameterParser interface {
Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
}
// PopulateQueryParameters parses query parameters
// into "msg" using current query parser
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
return currentQueryParser.Parse(msg, values, filter)
}
type defaultQueryParser struct{}
// Parse populates "values" into "msg".
// A value is ignored if its key starts with one of the elements in "filter".
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
for key, values := range values {
match := valuesKeyRegexp.FindStringSubmatch(key)
if len(match) == 3 {
key = match[1]
values = append([]string{match[2]}, values...)
}
fieldPath := strings.Split(key, ".")
if filter.HasCommonPrefix(fieldPath) {
continue
}
if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil {
return err
}
}
return nil
}
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
fieldPath := strings.Split(fieldPathString, ".")
return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
}
func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
if len(fieldPath) < 1 {
return errors.New("no field path")
}
if len(values) < 1 {
return errors.New("no value provided")
}
var fieldDescriptor protoreflect.FieldDescriptor
for i, fieldName := range fieldPath {
fields := msgValue.Descriptor().Fields()
// Get field by name
fieldDescriptor = fields.ByName(protoreflect.Name(fieldName))
if fieldDescriptor == nil {
fieldDescriptor = fields.ByJSONName(fieldName)
if fieldDescriptor == nil {
// We're not returning an error here because this could just be
// an extra query parameter that isn't part of the request.
grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, "."))
return nil
}
}
// If this is the last element, we're done
if i == len(fieldPath)-1 {
break
}
// Only singular message fields are allowed
if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated {
return fmt.Errorf("invalid path: %q is not a message", fieldName)
}
// Get the nested message
msgValue = msgValue.Mutable(fieldDescriptor).Message()
}
// Check if oneof already set
if of := fieldDescriptor.ContainingOneof(); of != nil {
if f := msgValue.WhichOneof(of); f != nil {
return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
}
}
switch {
case fieldDescriptor.IsList():
return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
case fieldDescriptor.IsMap():
return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values)
}
if len(values) > 1 {
return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", "))
}
return populateField(fieldDescriptor, msgValue, values[0])
}
func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error {
v, err := parseField(fieldDescriptor, value)
if err != nil {
return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err)
}
msgValue.Set(fieldDescriptor, v)
return nil
}
func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error {
for _, value := range values {
v, err := parseField(fieldDescriptor, value)
if err != nil {
return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err)
}
list.Append(v)
}
return nil
}
func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error {
if len(values) != 2 {
return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName())
}
key, err := parseField(fieldDescriptor.MapKey(), values[0])
if err != nil {
return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err)
}
value, err := parseField(fieldDescriptor.MapValue(), values[1])
if err != nil {
return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err)
}
mp.Set(key.MapKey(), value)
return nil
}
func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) {
switch fieldDescriptor.Kind() {
case protoreflect.BoolKind:
v, err := strconv.ParseBool(value)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfBool(v), nil
case protoreflect.EnumKind:
enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
switch {
case errors.Is(err, protoregistry.NotFound):
return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
case err != nil:
return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
}
// Look for enum by name
v := enum.Descriptor().Values().ByName(protoreflect.Name(value))
if v == nil {
i, err := strconv.Atoi(value)
if err != nil {
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
}
// Look for enum by number
v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i))
if v == nil {
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
}
}
return protoreflect.ValueOfEnum(v.Number()), nil
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
v, err := strconv.ParseInt(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfInt32(int32(v)), nil
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
v, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfInt64(v), nil
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
v, err := strconv.ParseUint(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfUint32(uint32(v)), nil
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
v, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfUint64(v), nil
case protoreflect.FloatKind:
v, err := strconv.ParseFloat(value, 32)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfFloat32(float32(v)), nil
case protoreflect.DoubleKind:
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfFloat64(v), nil
case protoreflect.StringKind:
return protoreflect.ValueOfString(value), nil
case protoreflect.BytesKind:
v, err := base64.URLEncoding.DecodeString(value)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfBytes(v), nil
case protoreflect.MessageKind, protoreflect.GroupKind:
return parseMessage(fieldDescriptor.Message(), value)
default:
panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind()))
}
}
func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) {
var msg proto.Message
switch msgDescriptor.FullName() {
case "google.protobuf.Timestamp":
if value == "null" {
break
}
t, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return protoreflect.Value{}, err
}
msg = timestamppb.New(t)
case "google.protobuf.Duration":
if value == "null" {
break
}
d, err := time.ParseDuration(value)
if err != nil {
return protoreflect.Value{}, err
}
msg = durationpb.New(d)
case "google.protobuf.DoubleValue":
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.DoubleValue{Value: v}
case "google.protobuf.FloatValue":
v, err := strconv.ParseFloat(value, 32)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.FloatValue{Value: float32(v)}
case "google.protobuf.Int64Value":
v, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.Int64Value{Value: v}
case "google.protobuf.Int32Value":
v, err := strconv.ParseInt(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.Int32Value{Value: int32(v)}
case "google.protobuf.UInt64Value":
v, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.UInt64Value{Value: v}
case "google.protobuf.UInt32Value":
v, err := strconv.ParseUint(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.UInt32Value{Value: uint32(v)}
case "google.protobuf.BoolValue":
v, err := strconv.ParseBool(value)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.BoolValue{Value: v}
case "google.protobuf.StringValue":
msg = &wrapperspb.StringValue{Value: value}
case "google.protobuf.BytesValue":
v, err := base64.URLEncoding.DecodeString(value)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.BytesValue{Value: v}
case "google.protobuf.FieldMask":
fm := &field_mask.FieldMask{}
fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
msg = fm
default:
return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
}
return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil
}

View File

@ -3,19 +3,25 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
go_library( go_library(
name = "go_default_library", name = "utilities",
srcs = [ srcs = [
"doc.go", "doc.go",
"pattern.go", "pattern.go",
"readerfactory.go", "readerfactory.go",
"trie.go", "trie.go",
], ],
importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
) )
go_test( go_test(
name = "go_default_test", name = "utilities_test",
size = "small", size = "small",
srcs = ["trie_test.go"], srcs = ["trie_test.go"],
embed = [":go_default_library"], deps = [":utilities"],
)
alias(
name = "go_default_library",
actual = ":utilities",
visibility = ["//visibility:public"],
) )

View File

@ -145,10 +145,7 @@ func (l byLex) Less(i, j int) bool {
return false return false
} }
} }
if k < len(sj) { return k < len(sj)
return true
}
return false
} }
// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. // HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.

View File

@ -3,6 +3,7 @@ package assert
import ( import (
"fmt" "fmt"
"reflect" "reflect"
"time"
) )
type CompareType int type CompareType int
@ -30,6 +31,8 @@ var (
float64Type = reflect.TypeOf(float64(1)) float64Type = reflect.TypeOf(float64(1))
stringType = reflect.TypeOf("") stringType = reflect.TypeOf("")
timeType = reflect.TypeOf(time.Time{})
) )
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
@ -299,6 +302,27 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
return compareLess, true return compareLess, true
} }
} }
// Check for known struct types we can check for compare results.
case reflect.Struct:
{
// All structs enter here. We're not interested in most types.
if !canConvert(obj1Value, timeType) {
break
}
// time.Time can compared!
timeObj1, ok := obj1.(time.Time)
if !ok {
timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
}
timeObj2, ok := obj2.(time.Time)
if !ok {
timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
}
return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
}
} }
return compareEqual, false return compareEqual, false
@ -310,7 +334,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
// assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, float64(2), float64(1))
// assert.Greater(t, "b", "a") // assert.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
} }
// GreaterOrEqual asserts that the first element is greater than or equal to the second // GreaterOrEqual asserts that the first element is greater than or equal to the second
@ -320,7 +347,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
// assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "a")
// assert.GreaterOrEqual(t, "b", "b") // assert.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
} }
// Less asserts that the first element is less than the second // Less asserts that the first element is less than the second
@ -329,7 +359,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
// assert.Less(t, float64(1), float64(2)) // assert.Less(t, float64(1), float64(2))
// assert.Less(t, "a", "b") // assert.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
} }
// LessOrEqual asserts that the first element is less than or equal to the second // LessOrEqual asserts that the first element is less than or equal to the second
@ -339,7 +372,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
// assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "a", "b")
// assert.LessOrEqual(t, "b", "b") // assert.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
} }
// Positive asserts that the specified element is positive // Positive asserts that the specified element is positive
@ -347,8 +383,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
// assert.Positive(t, 1) // assert.Positive(t, 1)
// assert.Positive(t, 1.23) // assert.Positive(t, 1.23)
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e)) zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
} }
// Negative asserts that the specified element is negative // Negative asserts that the specified element is negative
@ -356,8 +395,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
// assert.Negative(t, -1) // assert.Negative(t, -1)
// assert.Negative(t, -1.23) // assert.Negative(t, -1.23)
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e)) zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
} }
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {

View File

@ -0,0 +1,16 @@
//go:build go1.17
// +build go1.17
// TODO: once support for Go 1.16 is dropped, this file can be
// merged/removed with assertion_compare_go1.17_test.go and
// assertion_compare_legacy.go
package assert
import "reflect"
// Wrapper around reflect.Value.CanConvert, for compatability
// reasons.
func canConvert(value reflect.Value, to reflect.Type) bool {
return value.CanConvert(to)
}

View File

@ -0,0 +1,16 @@
//go:build !go1.17
// +build !go1.17
// TODO: once support for Go 1.16 is dropped, this file can be
// merged/removed with assertion_compare_go1.17_test.go and
// assertion_compare_can_convert.go
package assert
import "reflect"
// Older versions of Go does not have the reflect.Value.CanConvert
// method.
func canConvert(value reflect.Value, to reflect.Type) bool {
return false
}

View File

@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
} }
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...)
}
// ErrorIsf asserts that at least one of the errors in err's chain matches target. // ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is. // This is a wrapper for errors.Is.
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {

View File

@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ..
return ErrorAsf(a.t, err, target, msg, args...) return ErrorAsf(a.t, err, target, msg, args...)
} }
// ErrorContains asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
// a.ErrorContains(err, expectedErrorSubString)
func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return ErrorContains(a.t, theError, contains, msgAndArgs...)
}
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return ErrorContainsf(a.t, theError, contains, msg, args...)
}
// ErrorIs asserts that at least one of the errors in err's chain matches target. // ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is. // This is a wrapper for errors.Is.
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {

View File

@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT
// assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"}) // assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
} }
// IsNonIncreasing asserts that the collection is not increasing // IsNonIncreasing asserts that the collection is not increasing
@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"}) // assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
} }
// IsDecreasing asserts that the collection is decreasing // IsDecreasing asserts that the collection is decreasing
@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{})
// assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"}) // assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
} }
// IsNonDecreasing asserts that the collection is not decreasing // IsNonDecreasing asserts that the collection is not decreasing
@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"}) // assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
} }

View File

@ -718,10 +718,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte
// return (false, false) if impossible. // return (false, false) if impossible.
// return (true, false) if element was not found. // return (true, false) if element was not found.
// return (true, true) if element was found. // return (true, true) if element was found.
func includeElement(list interface{}, element interface{}) (ok, found bool) { func containsElement(list interface{}, element interface{}) (ok, found bool) {
listValue := reflect.ValueOf(list) listValue := reflect.ValueOf(list)
listKind := reflect.TypeOf(list).Kind() listType := reflect.TypeOf(list)
if listType == nil {
return false, false
}
listKind := listType.Kind()
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
ok = false ok = false
@ -764,7 +768,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
h.Helper() h.Helper()
} }
ok, found := includeElement(s, contains) ok, found := containsElement(s, contains)
if !ok { if !ok {
return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
} }
@ -787,7 +791,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
h.Helper() h.Helper()
} }
ok, found := includeElement(s, contains) ok, found := containsElement(s, contains)
if !ok { if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
} }
@ -831,7 +835,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
for i := 0; i < subsetValue.Len(); i++ { for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface() element := subsetValue.Index(i).Interface()
ok, found := includeElement(list, element) ok, found := containsElement(list, element)
if !ok { if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
} }
@ -852,7 +856,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
h.Helper() h.Helper()
} }
if subset == nil { if subset == nil {
return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
} }
subsetValue := reflect.ValueOf(subset) subsetValue := reflect.ValueOf(subset)
@ -875,7 +879,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
for i := 0; i < subsetValue.Len(); i++ { for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface() element := subsetValue.Index(i).Interface()
ok, found := includeElement(list, element) ok, found := containsElement(list, element)
if !ok { if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
} }
@ -1000,27 +1004,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
type PanicTestFunc func() type PanicTestFunc func()
// didPanic returns true if the function passed to it panics. Otherwise, it returns false. // didPanic returns true if the function passed to it panics. Otherwise, it returns false.
func didPanic(f PanicTestFunc) (bool, interface{}, string) { func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) {
didPanic = true
didPanic := false
var message interface{}
var stack string
func() {
defer func() { defer func() {
if message = recover(); message != nil { message = recover()
didPanic = true if didPanic {
stack = string(debug.Stack()) stack = string(debug.Stack())
} }
}() }()
// call the target function // call the target function
f() f()
didPanic = false
}() return
return didPanic, message, stack
} }
// Panics asserts that the code inside the specified PanicTestFunc panics. // Panics asserts that the code inside the specified PanicTestFunc panics.
@ -1161,11 +1159,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs
bf, bok := toFloat(actual) bf, bok := toFloat(actual)
if !aok || !bok { if !aok || !bok {
return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) return Fail(t, "Parameters must be numerical", msgAndArgs...)
}
if math.IsNaN(af) && math.IsNaN(bf) {
return true
} }
if math.IsNaN(af) { if math.IsNaN(af) {
return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) return Fail(t, "Expected must not be NaN", msgAndArgs...)
} }
if math.IsNaN(bf) { if math.IsNaN(bf) {
@ -1188,7 +1190,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn
if expected == nil || actual == nil || if expected == nil || actual == nil ||
reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(actual).Kind() != reflect.Slice ||
reflect.TypeOf(expected).Kind() != reflect.Slice { reflect.TypeOf(expected).Kind() != reflect.Slice {
return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) return Fail(t, "Parameters must be slice", msgAndArgs...)
} }
actualSlice := reflect.ValueOf(actual) actualSlice := reflect.ValueOf(actual)
@ -1250,8 +1252,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m
func calcRelativeError(expected, actual interface{}) (float64, error) { func calcRelativeError(expected, actual interface{}) (float64, error) {
af, aok := toFloat(expected) af, aok := toFloat(expected)
if !aok { bf, bok := toFloat(actual)
return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) if !aok || !bok {
return 0, fmt.Errorf("Parameters must be numerical")
}
if math.IsNaN(af) && math.IsNaN(bf) {
return 0, nil
} }
if math.IsNaN(af) { if math.IsNaN(af) {
return 0, errors.New("expected value must not be NaN") return 0, errors.New("expected value must not be NaN")
@ -1259,10 +1265,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if af == 0 { if af == 0 {
return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
} }
bf, bok := toFloat(actual)
if !bok {
return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
}
if math.IsNaN(bf) { if math.IsNaN(bf) {
return 0, errors.New("actual value must not be NaN") return 0, errors.New("actual value must not be NaN")
} }
@ -1298,7 +1300,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
if expected == nil || actual == nil || if expected == nil || actual == nil ||
reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(actual).Kind() != reflect.Slice ||
reflect.TypeOf(expected).Kind() != reflect.Slice { reflect.TypeOf(expected).Kind() != reflect.Slice {
return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) return Fail(t, "Parameters must be slice", msgAndArgs...)
} }
actualSlice := reflect.ValueOf(actual) actualSlice := reflect.ValueOf(actual)
@ -1375,6 +1377,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte
return true return true
} }
// ErrorContains asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
// assert.ErrorContains(t, err, expectedErrorSubString)
func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if !Error(t, theError, msgAndArgs...) {
return false
}
actual := theError.Error()
if !strings.Contains(actual, contains) {
return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...)
}
return true
}
// matchRegexp return true if a specified regexp matches a string. // matchRegexp return true if a specified regexp matches a string.
func matchRegexp(rx interface{}, str interface{}) bool { func matchRegexp(rx interface{}, str interface{}) bool {
@ -1588,12 +1611,17 @@ func diff(expected interface{}, actual interface{}) string {
} }
var e, a string var e, a string
if et != reflect.TypeOf("") {
e = spewConfig.Sdump(expected) switch et {
a = spewConfig.Sdump(actual) case reflect.TypeOf(""):
} else {
e = reflect.ValueOf(expected).String() e = reflect.ValueOf(expected).String()
a = reflect.ValueOf(actual).String() a = reflect.ValueOf(actual).String()
case reflect.TypeOf(time.Time{}):
e = spewConfigStringerEnabled.Sdump(expected)
a = spewConfigStringerEnabled.Sdump(actual)
default:
e = spewConfig.Sdump(expected)
a = spewConfig.Sdump(actual)
} }
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
@ -1625,6 +1653,14 @@ var spewConfig = spew.ConfigState{
MaxDepth: 10, MaxDepth: 10,
} }
var spewConfigStringerEnabled = spew.ConfigState{
Indent: " ",
DisablePointerAddresses: true,
DisableCapacities: true,
SortKeys: true,
MaxDepth: 10,
}
type tHelper interface { type tHelper interface {
Helper() Helper()
} }

View File

@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int
t.FailNow() t.FailNow()
} }
// ErrorContains asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
// assert.ErrorContains(t, err, expectedErrorSubString)
func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.ErrorContains(t, theError, contains, msgAndArgs...) {
return
}
t.FailNow()
}
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.ErrorContainsf(t, theError, contains, msg, args...) {
return
}
t.FailNow()
}
// ErrorIs asserts that at least one of the errors in err's chain matches target. // ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is. // This is a wrapper for errors.Is.
func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {

View File

@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ..
ErrorAsf(a.t, err, target, msg, args...) ErrorAsf(a.t, err, target, msg, args...)
} }
// ErrorContains asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
// a.ErrorContains(err, expectedErrorSubString)
func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
ErrorContains(a.t, theError, contains, msgAndArgs...)
}
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
ErrorContainsf(a.t, theError, contains, msg, args...)
}
// ErrorIs asserts that at least one of the errors in err's chain matches target. // ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is. // This is a wrapper for errors.Is.
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) {

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package otelgrpc package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
import ( import (
"context" "context"

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package otelgrpc package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
// gRPC tracing middleware // gRPC tracing middleware
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md
@ -33,7 +33,7 @@ import (
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/baggage"
"go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0" semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
) )

View File

@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package internal package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
import ( import (
"strings" "strings"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0" semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
) )
// ParseFullMethod returns a span name following the OpenTelemetry semantic // ParseFullMethod returns a span name following the OpenTelemetry semantic

View File

@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package otelgrpc package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
import ( import (
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0" semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
) )
// Semantic conventions for attribute keys for gRPC. // Semantic conventions for attribute keys for gRPC.

View File

@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package otelgrpc package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
// Version is the current release version of the gRPC instrumentation. // Version is the current release version of the gRPC instrumentation.
func Version() string { func Version() string {
return "0.27.0" return "0.32.0"
// This string is updated by the pre_release.sh script during release // This string is updated by the pre_release.sh script during release
} }

View File

@ -16,6 +16,5 @@ gen/
/example/opencensus/opencensus /example/opencensus/opencensus
/example/passthrough/passthrough /example/passthrough/passthrough
/example/prometheus/prometheus /example/prometheus/prometheus
/example/prom-collector/prom-collector
/example/zipkin/zipkin /example/zipkin/zipkin
/example/otel-collector/otel-collector /example/otel-collector/otel-collector

View File

@ -4,11 +4,27 @@ run:
tests: true #Default tests: true #Default
linters: linters:
# Disable everything by default so upgrades to not include new "default
# enabled" linters.
disable-all: true
# Specifically enable linters we want to use.
enable: enable:
- misspell - deadcode
- goimports - errcheck
- revive
- gofmt - gofmt
- goimports
- gosimple
- govet
- godot
- ineffassign
- misspell
- revive
- staticcheck
- structcheck
- typecheck
- unused
- varcheck
issues: issues:
exclude-rules: exclude-rules:
@ -30,3 +46,9 @@ linters-settings:
- cancelled - cancelled
goimports: goimports:
local-prefixes: go.opentelemetry.io local-prefixes: go.opentelemetry.io
godot:
exclude:
# Exclude sentence fragments for lists.
- '^[ ]*[-•]'
# Exclude sentences prefixing a list.
- ':$'

2
vendor/go.opentelemetry.io/otel/.lycheeignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
http://localhost
http://jaeger-collector

View File

@ -1,16 +0,0 @@
{
"ignorePatterns": [
{
"pattern": "^http(s)?://localhost"
}
],
"replacementPatterns": [
{
"pattern": "^/registry",
"replacement": "https://opentelemetry.io/registry"
}
],
"retryOn429": true,
"retryCount": 5,
"fallbackRetryDelay": "30s"
}

View File

@ -8,6 +8,219 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased] ## [Unreleased]
## [1.7.0/0.30.0] - 2022-04-28
### Added
- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package.
The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763)
- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package.
The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792)
- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package.
The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842)
- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776)
### Fixed
- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784)
- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786)
### Changed
- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790)
- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`.
The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790)
- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`.
Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790)
### Deprecated
- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
Use the equivalent `Iterator.Attribute` method instead. (#2790)
- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790)
- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
Use the equivalent `MergeIterator.Attribute` method instead. (#2790)
### Removed
- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
## [0.29.0] - 2022-04-11
### Added
- The metrics global package was added back into several test files. (#2764)
- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package.
This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750)
### Removed
- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`.
Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720)
### Changed
- Don't panic anymore when setting a global MeterProvider to itself. (#2749)
- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`.
This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748)
## [1.6.3] - 2022-04-07
### Fixed
- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773)
## [1.6.2] - 2022-04-06
### Changed
- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749)
- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`.
This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748)
## [1.6.1] - 2022-03-28
### Fixed
- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant.
Instead of using `"https://opentelemetry.io/schemas/v<version>"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/<version>"`. (#2743, #2744)
### Security
- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`.
This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728)
## [1.6.0/0.28.0] - 2022-03-23
### ⚠️ Notice ⚠️
This update is a breaking change of the unstable Metrics API.
Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified.
### Added
- Add metrics exponential histogram support.
New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502)
- Add Go 1.18 to our compatibility tests. (#2679)
- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517)
- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660)
### Changed
- The metrics API has been significantly changed to match the revised OpenTelemetry specification.
High-level changes include:
- Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s.
These `InstrumentProvider`s are managed with a `Meter`.
- Synchronous and asynchronous instruments are grouped into their own packages based on value types.
- Asynchronous callbacks can now be registered with a `Meter`.
Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660)
### Fixed
- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677)
## [1.5.0] - 2022-03-16
### Added
- Log the Exporters configuration in the TracerProviders message. (#2578)
- Added support to configure the span limits with environment variables.
The following environment variables are supported. (#2606, #2637)
- `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT`
- `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT`
- `OTEL_SPAN_EVENT_COUNT_LIMIT`
- `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
- `OTEL_SPAN_LINK_COUNT_LIMIT`
- `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
If the provided environment variables are invalid (negative), the default values would be used.
- Rename the `gc` runtime name to `go` (#2560)
- Add resource container ID detection. (#2418)
- Add span attribute value length limit.
The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`.
The default limit for this resource is "unlimited". (#2637)
- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`.
This option replaces the `WithSpanLimits` option.
Zero or negative values will not be changed to the default value like `WithSpanLimits` does.
Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited.
Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637)
### Changed
- Drop oldest tracestate `Member` when capacity is reached. (#2592)
- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601)
- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639)
- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640)
- Introduce new internal `envconfig` package for OTLP exporters. (#2608)
- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661)
### Fixed
- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616)
- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625)
- Unlimited span limits are now supported (negative values). (#2636, #2637)
### Deprecated
- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`.
Use `WithRawSpanLimits` instead.
That option allows setting unlimited and zero limits, this option does not.
This option will be kept until the next major version incremented release. (#2637)
## [1.4.1] - 2022-02-16
### Fixed
- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615)
## [1.4.0] - 2022-02-11
### Added
- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490)
- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging.
To enable use a logger with Verbosity (V level) `>=1`. (#2500)
- Added support to configure the batch span-processor with environment variables.
The following environment variables are used. (#2515)
- `OTEL_BSP_SCHEDULE_DELAY`
- `OTEL_BSP_EXPORT_TIMEOUT`
- `OTEL_BSP_MAX_QUEUE_SIZE`.
- `OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
### Changed
- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589)
### Deprecated
- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`.
Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382)
- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445)
### Fixed
- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461)
- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512)
- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491)
- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493)
- W3C baggage will now decode urlescaped values. (#2529)
- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522)
- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification.
Instead of dropping the least-recently-used attribute, the last added attribute is dropped.
This drop order still only applies to attributes with unique keys not already contained in the span.
If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576)
### Removed
- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546)
- [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge)
- [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram)
- [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum)
## [1.3.0] - 2021-12-10 ## [1.3.0] - 2021-12-10
### ⚠️ Notice ⚠️ ### ⚠️ Notice ⚠️
@ -1639,7 +1852,16 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files. - CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project. - CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.3.0...HEAD [Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.7.0...HEAD
[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0
[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0
[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3
[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2
[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1
[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0
[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0
[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1
[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0
[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 [1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0
[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 [1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0
[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 [1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0

View File

@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners # https://help.github.com/en/articles/about-code-owners
# #
* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo @MadVikingGod @pellared * @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo @MadVikingGod @pellared @hanyuancheung
CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod

View File

@ -228,11 +228,11 @@ all options to create a configured `config`.
```go ```go
// newConfig returns an appropriately configured config. // newConfig returns an appropriately configured config.
func newConfig([]Option) config { func newConfig(options ...Option) config {
// Set default values for config. // Set default values for config.
config := config{/* […] */} config := config{/* […] */}
for _, option := range options { for _, option := range options {
option.apply(&config) config = option.apply(config)
} }
// Preform any validation here. // Preform any validation here.
return config return config
@ -253,7 +253,7 @@ To set the value of the options a `config` contains, a corresponding
```go ```go
type Option interface { type Option interface {
apply(*config) apply(config) config
} }
``` ```
@ -261,6 +261,9 @@ Having `apply` unexported makes sure that it will not be used externally.
Moreover, the interface becomes sealed so the user cannot easily implement Moreover, the interface becomes sealed so the user cannot easily implement
the interface on its own. the interface on its own.
The `apply` method should return a modified version of the passed config.
This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap.
The name of the interface should be prefixed in the same way the The name of the interface should be prefixed in the same way the
corresponding `config` is (if at all). corresponding `config` is (if at all).
@ -283,8 +286,9 @@ func With*(…) Option { … }
```go ```go
type defaultFalseOption bool type defaultFalseOption bool
func (o defaultFalseOption) apply(c *config) { func (o defaultFalseOption) apply(c config) config {
c.Bool = bool(o) c.Bool = bool(o)
return c
} }
// WithOption sets a T to have an option included. // WithOption sets a T to have an option included.
@ -296,8 +300,9 @@ func WithOption() Option {
```go ```go
type defaultTrueOption bool type defaultTrueOption bool
func (o defaultTrueOption) apply(c *config) { func (o defaultTrueOption) apply(c config) config {
c.Bool = bool(o) c.Bool = bool(o)
return c
} }
// WithoutOption sets a T to have Bool option excluded. // WithoutOption sets a T to have Bool option excluded.
@ -313,8 +318,9 @@ type myTypeOption struct {
MyType MyType MyType MyType
} }
func (o myTypeOption) apply(c *config) { func (o myTypeOption) apply(c config) config {
c.MyType = o.MyType c.MyType = o.MyType
return c
} }
// WithMyType sets T to have include MyType. // WithMyType sets T to have include MyType.
@ -326,16 +332,17 @@ func WithMyType(t MyType) Option {
##### Functional Options ##### Functional Options
```go ```go
type optionFunc func(*config) type optionFunc func(config) config
func (fn optionFunc) apply(c *config) { func (fn optionFunc) apply(c config) config {
fn(c) return fn(c)
} }
// WithMyType sets t as MyType. // WithMyType sets t as MyType.
func WithMyType(t MyType) Option { func WithMyType(t MyType) Option {
return optionFunc(func(c *config) { return optionFunc(func(c config) config {
c.MyType = t c.MyType = t
return c
}) })
} }
``` ```
@ -370,12 +377,12 @@ type config struct {
// DogOption apply Dog specific options. // DogOption apply Dog specific options.
type DogOption interface { type DogOption interface {
applyDog(*config) applyDog(config) config
} }
// BirdOption apply Bird specific options. // BirdOption apply Bird specific options.
type BirdOption interface { type BirdOption interface {
applyBird(*config) applyBird(config) config
} }
// Option apply options for all animals. // Option apply options for all animals.
@ -385,16 +392,35 @@ type Option interface {
} }
type weightOption float64 type weightOption float64
func (o weightOption) applyDog(c *config) { c.Weight = float64(o) }
func (o weightOption) applyBird(c *config) { c.Weight = float64(o) } func (o weightOption) applyDog(c config) config {
c.Weight = float64(o)
return c
}
func (o weightOption) applyBird(c config) config {
c.Weight = float64(o)
return c
}
func WithWeight(w float64) Option { return weightOption(w) } func WithWeight(w float64) Option { return weightOption(w) }
type furColorOption string type furColorOption string
func (o furColorOption) applyDog(c *config) { c.Color = string(o) }
func (o furColorOption) applyDog(c config) config {
c.Color = string(o)
return c
}
func WithFurColor(c string) DogOption { return furColorOption(c) } func WithFurColor(c string) DogOption { return furColorOption(c) }
type maxAltitudeOption float64 type maxAltitudeOption float64
func (o maxAltitudeOption) applyBird(c *config) { c.MaxAltitude = float64(o) }
func (o maxAltitudeOption) applyBird(c config) config {
c.MaxAltitude = float64(o)
return c
}
func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
func NewDog(name string, o ...DogOption) Dog {…} func NewDog(name string, o ...DogOption) Dog {…}
@ -478,10 +504,11 @@ Approvers:
- [Evan Torrie](https://github.com/evantorrie), Verizon Media - [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Josh MacDonald](https://github.com/jmacd), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep
- [Sam Xie](https://github.com/XSAM) - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [David Ashpole](https://github.com/dashpole), Google - [David Ashpole](https://github.com/dashpole), Google
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Robert Pająk](https://github.com/pellared), Splunk - [Robert Pająk](https://github.com/pellared), Splunk
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
Maintainers: Maintainers:

View File

@ -12,13 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
EXAMPLES := $(shell ./get_main_pkgs.sh ./example)
TOOLS_MOD_DIR := ./internal/tools TOOLS_MOD_DIR := ./internal/tools
# All source code and documents. Used in spell check.
ALL_DOCS := $(shell find . -name '*.md' -type f | sort) ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
# All directories with go.mod files related to opentelemetry library. Used for building, testing and linting. ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example' | sort)) $(shell find ./example -type f -name 'go.mod' -exec dirname {} \; | sort) OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort) ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
GO = go GO = go
@ -27,8 +25,8 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit .DEFAULT_GOAL := precommit
.PHONY: precommit ci .PHONY: precommit ci
precommit: dependabot-check license-check lint build examples test-default precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default
ci: precommit check-clean-work-tree test-coverage ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
# Tools # Tools
@ -49,6 +47,12 @@ $(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
CROSSLINK = $(TOOLS)/crosslink CROSSLINK = $(TOOLS)/crosslink
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink
SEMCONVKIT = $(TOOLS)/semconvkit
$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
DBOTCONF = $(TOOLS)/dbotconf
$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf
GOLANGCI_LINT = $(TOOLS)/golangci-lint GOLANGCI_LINT = $(TOOLS)/golangci-lint
$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
@ -68,55 +72,51 @@ GOJQ = $(TOOLS)/gojq
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq $(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
.PHONY: tools .PHONY: tools
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT)
# Build # Build
.PHONY: examples generate build .PHONY: generate build
examples:
@set -e; for dir in $(EXAMPLES); do \
echo "$(GO) build $${dir}/..."; \
(cd "$${dir}" && \
$(GO) build .); \
done
generate: $(STRINGER) $(PORTO) generate: $(OTEL_GO_MOD_DIRS:%=generate/%)
set -e; for dir in $(ALL_GO_MOD_DIRS); do \ generate/%: DIR=$*
echo "$(GO) generate $${dir}/..."; \ generate/%: | $(STRINGER) $(PORTO)
(cd "$${dir}" && \ @echo "$(GO) generate $(DIR)/..." \
PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && \ && cd $(DIR) \
$(PORTO) -w .); \ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w .
done
build: generate build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
# Build all package code including testing code. build/%: DIR=$*
set -e; for dir in $(ALL_GO_MOD_DIRS); do \ build/%:
echo "$(GO) build $${dir}/..."; \ @echo "$(GO) build $(DIR)/..." \
(cd "$${dir}" && \ && cd $(DIR) \
$(GO) build ./... && \ && $(GO) build ./...
$(GO) list ./... \
build-tests/%: DIR=$*
build-tests/%:
@echo "$(GO) build tests $(DIR)/..." \
&& cd $(DIR) \
&& $(GO) list ./... \
| grep -v third_party \ | grep -v third_party \
| xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null); \ | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null
done
# Tests # Tests
TEST_TARGETS := test-default test-bench test-short test-verbose test-race TEST_TARGETS := test-default test-bench test-short test-verbose test-race
.PHONY: $(TEST_TARGETS) test .PHONY: $(TEST_TARGETS) test
test-default: ARGS=-v -race test-default test-race: ARGS=-race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short test-short: ARGS=-short
test-verbose: ARGS=-v test-verbose: ARGS=-v -race
test-race: ARGS=-race
$(TEST_TARGETS): test $(TEST_TARGETS): test
test: test: $(OTEL_GO_MOD_DIRS:%=test/%)
@set -e; for dir in $(ALL_GO_MOD_DIRS); do \ test/%: DIR=$*
echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $${dir}/..."; \ test/%:
(cd "$${dir}" && \ @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \
$(GO) list ./... \ && cd $(DIR) \
&& $(GO) list ./... \
| grep -v third_party \ | grep -v third_party \
| xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)); \ | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)
done
COVERAGE_MODE = atomic COVERAGE_MODE = atomic
COVERAGE_PROFILE = coverage.out COVERAGE_PROFILE = coverage.out
@ -129,37 +129,52 @@ test-coverage: | $(GOCOVMERGE)
(cd "$${dir}" && \ (cd "$${dir}" && \
$(GO) list ./... \ $(GO) list ./... \
| grep -v third_party \ | grep -v third_party \
| grep -v 'semconv/v.*' \
| xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \
$(GO) tool cover -html=coverage.out -o coverage.html); \ $(GO) tool cover -html=coverage.out -o coverage.html); \
done; \ done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
.PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix
golangci-lint-fix: golangci-lint
golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
golangci-lint/%: DIR=$*
golangci-lint/%: | $(GOLANGCI_LINT)
@echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
&& cd $(DIR) \
&& $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
.PHONY: crosslink
crosslink: | $(CROSSLINK)
@echo "cross-linking all go modules" \
&& $(CROSSLINK)
.PHONY: go-mod-tidy
go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
go-mod-tidy/%: DIR=$*
go-mod-tidy/%: | crosslink
@echo "$(GO) mod tidy in $(DIR)" \
&& cd $(DIR) \
&& $(GO) mod tidy
.PHONY: lint-modules
lint-modules: go-mod-tidy
.PHONY: lint .PHONY: lint
lint: misspell lint-modules | $(GOLANGCI_LINT) lint: misspell lint-modules golangci-lint
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "golangci-lint in $${dir}"; \
(cd "$${dir}" && \
$(GOLANGCI_LINT) run --fix && \
$(GOLANGCI_LINT) run); \
done
.PHONY: vanity-import-check .PHONY: vanity-import-check
vanity-import-check: | $(PORTO) vanity-import-check: | $(PORTO)
$(PORTO) --include-internal -l . @$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)"
.PHONY: vanity-import-fix
vanity-import-fix: | $(PORTO)
@$(PORTO) --include-internal -w .
.PHONY: misspell .PHONY: misspell
misspell: | $(MISSPELL) misspell: | $(MISSPELL)
$(MISSPELL) -w $(ALL_DOCS) @$(MISSPELL) -w $(ALL_DOCS)
.PHONY: lint-modules
lint-modules: | $(CROSSLINK)
set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
echo "$(GO) mod tidy in $${dir}"; \
(cd "$${dir}" && \
$(GO) mod tidy); \
done
echo "cross-linking all go modules"
$(CROSSLINK)
.PHONY: license-check .PHONY: license-check
license-check: license-check:
@ -171,19 +186,14 @@ license-check:
exit 1; \ exit 1; \
fi fi
DEPENDABOT_CONFIG = .github/dependabot.yml
.PHONY: dependabot-check .PHONY: dependabot-check
dependabot-check: dependabot-check: | $(DBOTCONF)
@result=$$( \ @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)"
for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.//' ); \
do grep -q "directory: \+$$f" .github/dependabot.yml \ .PHONY: dependabot-generate
|| echo "$$f"; \ dependabot-generate: | $(DBOTCONF)
done; \ @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG)
); \
if [ -n "$$result" ]; then \
echo "missing go.mod dependabot check:"; echo "$$result"; \
echo "new modules need to be added to the .github/dependabot.yml file"; \
exit 1; \
fi
.PHONY: check-clean-work-tree .PHONY: check-clean-work-tree
check-clean-work-tree: check-clean-work-tree:
@ -195,6 +205,15 @@ check-clean-work-tree:
exit 1; \ exit 1; \
fi fi
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
@[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
@[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
@$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/trace" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
@$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/resource" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
@$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: prerelease .PHONY: prerelease
prerelease: | $(MULTIMOD) prerelease: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )

View File

@ -41,20 +41,28 @@ This project is tested on the following systems.
| OS | Go Version | Architecture | | OS | Go Version | Architecture |
| ------- | ---------- | ------------ | | ------- | ---------- | ------------ |
| Ubuntu | 1.18 | amd64 |
| Ubuntu | 1.17 | amd64 | | Ubuntu | 1.17 | amd64 |
| Ubuntu | 1.16 | amd64 | | Ubuntu | 1.16 | amd64 |
| Ubuntu | 1.18 | 386 |
| Ubuntu | 1.17 | 386 | | Ubuntu | 1.17 | 386 |
| Ubuntu | 1.16 | 386 | | Ubuntu | 1.16 | 386 |
| MacOS | 1.18 | amd64 |
| MacOS | 1.17 | amd64 | | MacOS | 1.17 | amd64 |
| MacOS | 1.16 | amd64 | | MacOS | 1.16 | amd64 |
| Windows | 1.18 | amd64 |
| Windows | 1.17 | amd64 | | Windows | 1.17 | amd64 |
| Windows | 1.16 | amd64 | | Windows | 1.16 | amd64 |
| Windows | 1.18 | 386 |
| Windows | 1.17 | 386 | | Windows | 1.17 | 386 |
| Windows | 1.16 | 386 | | Windows | 1.16 | 386 |
While this project should work for other systems, no compatibility guarantees While this project should work for other systems, no compatibility guarantees
are made for those systems currently. are made for those systems currently.
Go 1.18 was added in March of 2022.
Go 1.16 will be removed around June 2022.
## Getting Started ## Getting Started
You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/).
@ -76,7 +84,7 @@ libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/
If you need to extend the telemetry an instrumentation library provides or want If you need to extend the telemetry an instrumentation library provides or want
to build your own instrumentation for your application directly you will need to build your own instrumentation for your application directly you will need
to use the to use the
[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api) [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
package. The included [examples](./example/) are a good way to see some package. The included [examples](./example/) are a good way to see some
practical uses of this process. practical uses of this process.
@ -95,8 +103,6 @@ All officially supported exporters for the OpenTelemetry project are contained i
| [stdout](./exporters/stdout/) | ✓ | ✓ | | [stdout](./exporters/stdout/) | ✓ | ✓ |
| [Zipkin](./exporters/zipkin/) | | ✓ | | [Zipkin](./exporters/zipkin/) | | ✓ |
Additionally, OpenTelemetry community supported exporters can be found in the [contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters).
## Contributing ## Contributing
See the [contributing documentation](CONTRIBUTING.md). See the [contributing documentation](CONTRIBUTING.md).

View File

@ -2,35 +2,23 @@
## Semantic Convention Generation ## Semantic Convention Generation
If a new version of the OpenTelemetry Specification has been released it will be necessary to generate a new New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated.
semantic convention package from the YAML definitions in the specification repository. There is a `semconvgen` utility The `semconv-generate` make target is used for this.
installed by `make tools` that can be used to generate the a package with the name matching the specification
version number under the `semconv` package. This will ideally be done soon after the specification release is
tagged. Make sure that the specification repo contains a checkout of the the latest tagged release so that the
generated files match the released semantic conventions.
There are currently two categories of semantic conventions that must be generated, `resource` and `trace`. 1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag.
2. Run the `make semconv-generate ...` target from this repository.
``` For example,
.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/resource -t semconv/template.j2
.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/trace -t semconv/template.j2 ```sh
export TAG="v1.7.0" # Change to the release version you are generating.
export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification"
git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG"
make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO.
``` ```
Using default values for all options other than `input` will result in using the `template.j2` template to This should create a new sub-package of [`semconv`](./semconv).
generate `resource.go` and `trace.go` in `/path/to/otelgo/repo/semconv/<version>`. Ensure things look correct before submitting a pull request to include the addition.
There are several ancillary files that are not generated and should be copied into the new package from the
prior package, with updates made as appropriate to canonical import path statements and constant values.
These files include:
* doc.go
* exception.go
* http(_test)?.go
* schema.go
Uses of the previous schema version in this repository should be updated to use the newly generated version.
No tooling for this exists at present, so use find/replace in your editor of choice or craft a `grep | sed`
pipeline if you like living on the edge.
## Pre-Release ## Pre-Release
@ -108,7 +96,6 @@ It is critical you make sure the version you push upstream is correct.
Finally create a Release for the new `<new tag>` on GitHub. Finally create a Release for the new `<new tag>` on GitHub.
The release body should include all the release notes from the Changelog for this release. The release body should include all the release notes from the Changelog for this release.
Additionally, the `tag.sh` script generates commit logs since last release which can be used to supplement the release notes.
## Verify Examples ## Verify Examples
@ -131,3 +118,5 @@ Once verified be sure to [make a release for the `contrib` repository](https://g
Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/).
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification

View File

@ -21,19 +21,17 @@ import (
) )
type ( type (
// Encoder is a mechanism for serializing a label set into a // Encoder is a mechanism for serializing an attribute set into a specific
// specific string representation that supports caching, to // string representation that supports caching, to avoid repeated
// avoid repeated serialization. An example could be an // serialization. An example could be an exporter encoding the attribute
// exporter encoding the label set into a wire representation. // set into a wire representation.
Encoder interface { Encoder interface {
// Encode returns the serialized encoding of the label // Encode returns the serialized encoding of the attribute set using
// set using its Iterator. This result may be cached // its Iterator. This result may be cached by a attribute.Set.
// by a attribute.Set.
Encode(iterator Iterator) string Encode(iterator Iterator) string
// ID returns a value that is unique for each class of // ID returns a value that is unique for each class of attribute
// label encoder. Label encoders allocate these using // encoder. Attribute encoders allocate these using `NewEncoderID`.
// `NewEncoderID`.
ID() EncoderID ID() EncoderID
} }
@ -43,54 +41,53 @@ type (
value uint64 value uint64
} }
// defaultLabelEncoder uses a sync.Pool of buffers to reduce // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of
// the number of allocations used in encoding labels. This // allocations used in encoding attributes. This implementation encodes a
// implementation encodes a comma-separated list of key=value, // comma-separated list of key=value, with '/'-escaping of '=', ',', and
// with '/'-escaping of '=', ',', and '\'. // '\'.
defaultLabelEncoder struct { defaultAttrEncoder struct {
// pool is a pool of labelset builders. The buffers in this // pool is a pool of attribute set builders. The buffers in this pool
// pool grow to a size that most label encodings will not // grow to a size that most attribute encodings will not allocate new
// allocate new memory. // memory.
pool sync.Pool // *bytes.Buffer pool sync.Pool // *bytes.Buffer
} }
) )
// escapeChar is used to ensure uniqueness of the label encoding where // escapeChar is used to ensure uniqueness of the attribute encoding where
// keys or values contain either '=' or ','. Since there is no parser // keys or values contain either '=' or ','. Since there is no parser needed
// needed for this encoding and its only requirement is to be unique, // for this encoding and its only requirement is to be unique, this choice is
// this choice is arbitrary. Users will see these in some exporters // arbitrary. Users will see these in some exporters (e.g., stdout), so the
// (e.g., stdout), so the backslash ('\') is used as a conventional choice. // backslash ('\') is used as a conventional choice.
const escapeChar = '\\' const escapeChar = '\\'
var ( var (
_ Encoder = &defaultLabelEncoder{} _ Encoder = &defaultAttrEncoder{}
// encoderIDCounter is for generating IDs for other label // encoderIDCounter is for generating IDs for other attribute encoders.
// encoders.
encoderIDCounter uint64 encoderIDCounter uint64
defaultEncoderOnce sync.Once defaultEncoderOnce sync.Once
defaultEncoderID = NewEncoderID() defaultEncoderID = NewEncoderID()
defaultEncoderInstance *defaultLabelEncoder defaultEncoderInstance *defaultAttrEncoder
) )
// NewEncoderID returns a unique label encoder ID. It should be // NewEncoderID returns a unique attribute encoder ID. It should be called
// called once per each type of label encoder. Preferably in init() or // once per each type of attribute encoder. Preferably in init() or in var
// in var definition. // definition.
func NewEncoderID() EncoderID { func NewEncoderID() EncoderID {
return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
} }
// DefaultEncoder returns a label encoder that encodes labels // DefaultEncoder returns an attribute encoder that encodes attributes in such
// in such a way that each escaped label's key is followed by an equal // a way that each escaped attribute's key is followed by an equal sign and
// sign and then by an escaped label's value. All key-value pairs are // then by an escaped attribute's value. All key-value pairs are separated by
// separated by a comma. // a comma.
// //
// Escaping is done by prepending a backslash before either a // Escaping is done by prepending a backslash before either a backslash, equal
// backslash, equal sign or a comma. // sign or a comma.
func DefaultEncoder() Encoder { func DefaultEncoder() Encoder {
defaultEncoderOnce.Do(func() { defaultEncoderOnce.Do(func() {
defaultEncoderInstance = &defaultLabelEncoder{ defaultEncoderInstance = &defaultAttrEncoder{
pool: sync.Pool{ pool: sync.Pool{
New: func() interface{} { New: func() interface{} {
return &bytes.Buffer{} return &bytes.Buffer{}
@ -101,15 +98,14 @@ func DefaultEncoder() Encoder {
return defaultEncoderInstance return defaultEncoderInstance
} }
// Encode is a part of an implementation of the LabelEncoder // Encode is a part of an implementation of the AttributeEncoder interface.
// interface. func (d *defaultAttrEncoder) Encode(iter Iterator) string {
func (d *defaultLabelEncoder) Encode(iter Iterator) string {
buf := d.pool.Get().(*bytes.Buffer) buf := d.pool.Get().(*bytes.Buffer)
defer d.pool.Put(buf) defer d.pool.Put(buf)
buf.Reset() buf.Reset()
for iter.Next() { for iter.Next() {
i, keyValue := iter.IndexedLabel() i, keyValue := iter.IndexedAttribute()
if i > 0 { if i > 0 {
_, _ = buf.WriteRune(',') _, _ = buf.WriteRune(',')
} }
@ -126,8 +122,8 @@ func (d *defaultLabelEncoder) Encode(iter Iterator) string {
return buf.String() return buf.String()
} }
// ID is a part of an implementation of the LabelEncoder interface. // ID is a part of an implementation of the AttributeEncoder interface.
func (*defaultLabelEncoder) ID() EncoderID { func (*defaultAttrEncoder) ID() EncoderID {
return defaultEncoderID return defaultEncoderID
} }

View File

@ -14,16 +14,16 @@
package attribute // import "go.opentelemetry.io/otel/attribute" package attribute // import "go.opentelemetry.io/otel/attribute"
// Iterator allows iterating over the set of labels in order, // Iterator allows iterating over the set of attributes in order, sorted by
// sorted by key. // key.
type Iterator struct { type Iterator struct {
storage *Set storage *Set
idx int idx int
} }
// MergeIterator supports iterating over two sets of labels while // MergeIterator supports iterating over two sets of attributes while
// eliminating duplicate values from the combined set. The first // eliminating duplicate values from the combined set. The first iterator
// iterator value takes precedence. // value takes precedence.
type MergeIterator struct { type MergeIterator struct {
one oneIterator one oneIterator
two oneIterator two oneIterator
@ -33,11 +33,11 @@ type MergeIterator struct {
type oneIterator struct { type oneIterator struct {
iter Iterator iter Iterator
done bool done bool
label KeyValue attr KeyValue
} }
// Next moves the iterator to the next position. Returns false if there // Next moves the iterator to the next position. Returns false if there are no
// are no more labels. // more attributes.
func (i *Iterator) Next() bool { func (i *Iterator) Next() bool {
i.idx++ i.idx++
return i.idx < i.Len() return i.idx < i.Len()
@ -45,30 +45,41 @@ func (i *Iterator) Next() bool {
// Label returns current KeyValue. Must be called only after Next returns // Label returns current KeyValue. Must be called only after Next returns
// true. // true.
//
// Deprecated: Use Attribute instead.
func (i *Iterator) Label() KeyValue { func (i *Iterator) Label() KeyValue {
return i.Attribute()
}
// Attribute returns the current KeyValue of the Iterator. It must be called
// only after Next returns true.
func (i *Iterator) Attribute() KeyValue {
kv, _ := i.storage.Get(i.idx) kv, _ := i.storage.Get(i.idx)
return kv return kv
} }
// Attribute is a synonym for Label().
func (i *Iterator) Attribute() KeyValue {
return i.Label()
}
// IndexedLabel returns current index and attribute. Must be called only // IndexedLabel returns current index and attribute. Must be called only
// after Next returns true. // after Next returns true.
//
// Deprecated: Use IndexedAttribute instead.
func (i *Iterator) IndexedLabel() (int, KeyValue) { func (i *Iterator) IndexedLabel() (int, KeyValue) {
return i.idx, i.Label() return i.idx, i.Attribute()
} }
// Len returns a number of labels in the iterator's `*Set`. // IndexedAttribute returns current index and attribute. Must be called only
// after Next returns true.
func (i *Iterator) IndexedAttribute() (int, KeyValue) {
return i.idx, i.Attribute()
}
// Len returns a number of attributes in the iterated set.
func (i *Iterator) Len() int { func (i *Iterator) Len() int {
return i.storage.Len() return i.storage.Len()
} }
// ToSlice is a convenience function that creates a slice of labels // ToSlice is a convenience function that creates a slice of attributes from
// from the passed iterator. The iterator is set up to start from the // the passed iterator. The iterator is set up to start from the beginning
// beginning before creating the slice. // before creating the slice.
func (i *Iterator) ToSlice() []KeyValue { func (i *Iterator) ToSlice() []KeyValue {
l := i.Len() l := i.Len()
if l == 0 { if l == 0 {
@ -77,12 +88,12 @@ func (i *Iterator) ToSlice() []KeyValue {
i.idx = -1 i.idx = -1
slice := make([]KeyValue, 0, l) slice := make([]KeyValue, 0, l)
for i.Next() { for i.Next() {
slice = append(slice, i.Label()) slice = append(slice, i.Attribute())
} }
return slice return slice
} }
// NewMergeIterator returns a MergeIterator for merging two label sets // NewMergeIterator returns a MergeIterator for merging two attribute sets.
// Duplicates are resolved by taking the value from the first set. // Duplicates are resolved by taking the value from the first set.
func NewMergeIterator(s1, s2 *Set) MergeIterator { func NewMergeIterator(s1, s2 *Set) MergeIterator {
mi := MergeIterator{ mi := MergeIterator{
@ -102,42 +113,49 @@ func makeOne(iter Iterator) oneIterator {
func (oi *oneIterator) advance() { func (oi *oneIterator) advance() {
if oi.done = !oi.iter.Next(); !oi.done { if oi.done = !oi.iter.Next(); !oi.done {
oi.label = oi.iter.Label() oi.attr = oi.iter.Attribute()
} }
} }
// Next returns true if there is another label available. // Next returns true if there is another attribute available.
func (m *MergeIterator) Next() bool { func (m *MergeIterator) Next() bool {
if m.one.done && m.two.done { if m.one.done && m.two.done {
return false return false
} }
if m.one.done { if m.one.done {
m.current = m.two.label m.current = m.two.attr
m.two.advance() m.two.advance()
return true return true
} }
if m.two.done { if m.two.done {
m.current = m.one.label m.current = m.one.attr
m.one.advance() m.one.advance()
return true return true
} }
if m.one.label.Key == m.two.label.Key { if m.one.attr.Key == m.two.attr.Key {
m.current = m.one.label // first iterator label value wins m.current = m.one.attr // first iterator attribute value wins
m.one.advance() m.one.advance()
m.two.advance() m.two.advance()
return true return true
} }
if m.one.label.Key < m.two.label.Key { if m.one.attr.Key < m.two.attr.Key {
m.current = m.one.label m.current = m.one.attr
m.one.advance() m.one.advance()
return true return true
} }
m.current = m.two.label m.current = m.two.attr
m.two.advance() m.two.advance()
return true return true
} }
// Label returns the current value after Next() returns true. // Label returns the current value after Next() returns true.
//
// Deprecated: Use Attribute instead.
func (m *MergeIterator) Label() KeyValue { func (m *MergeIterator) Label() KeyValue {
return m.current return m.current
} }
// Attribute returns the current value after Next() returns true.
func (m *MergeIterator) Attribute() KeyValue {
return m.current
}

View File

@ -26,7 +26,7 @@ type KeyValue struct {
// Valid returns if kv is a valid OpenTelemetry attribute. // Valid returns if kv is a valid OpenTelemetry attribute.
func (kv KeyValue) Valid() bool { func (kv KeyValue) Valid() bool {
return kv.Key != "" && kv.Value.Type() != INVALID return kv.Key.Defined() && kv.Value.Type() != INVALID
} }
// Bool creates a KeyValue with a BOOL Value type. // Bool creates a KeyValue with a BOOL Value type.

View File

@ -21,49 +21,42 @@ import (
) )
type ( type (
// Set is the representation for a distinct label set. It // Set is the representation for a distinct attribute set. It manages an
// manages an immutable set of labels, with an internal cache // immutable set of attributes, with an internal cache for storing
// for storing label encodings. // attribute encodings.
// //
// This type supports the `Equivalent` method of comparison // This type supports the Equivalent method of comparison using values of
// using values of type `Distinct`. // type Distinct.
//
// This type is used to implement:
// 1. Metric labels
// 2. Resource sets
// 3. Correlation map (TODO)
Set struct { Set struct {
equivalent Distinct equivalent Distinct
} }
// Distinct wraps a variable-size array of `KeyValue`, // Distinct wraps a variable-size array of KeyValue, constructed with keys
// constructed with keys in sorted order. This can be used as // in sorted order. This can be used as a map key or for equality checking
// a map key or for equality checking between Sets. // between Sets.
Distinct struct { Distinct struct {
iface interface{} iface interface{}
} }
// Filter supports removing certain labels from label sets. // Filter supports removing certain attributes from attribute sets. When
// When the filter returns true, the label will be kept in // the filter returns true, the attribute will be kept in the filtered
// the filtered label set. When the filter returns false, the // attribute set. When the filter returns false, the attribute is excluded
// label is excluded from the filtered label set, and the // from the filtered attribute set, and the attribute instead appears in
// label instead appears in the `removed` list of excluded labels. // the removed list of excluded attributes.
Filter func(KeyValue) bool Filter func(KeyValue) bool
// Sortable implements `sort.Interface`, used for sorting // Sortable implements sort.Interface, used for sorting KeyValue. This is
// `KeyValue`. This is an exported type to support a // an exported type to support a memory optimization. A pointer to one of
// memory optimization. A pointer to one of these is needed // these is needed for the call to sort.Stable(), which the caller may
// for the call to `sort.Stable()`, which the caller may // provide in order to avoid an allocation. See NewSetWithSortable().
// provide in order to avoid an allocation. See
// `NewSetWithSortable()`.
Sortable []KeyValue Sortable []KeyValue
) )
var ( var (
// keyValueType is used in `computeDistinctReflect`. // keyValueType is used in computeDistinctReflect.
keyValueType = reflect.TypeOf(KeyValue{}) keyValueType = reflect.TypeOf(KeyValue{})
// emptySet is returned for empty label sets. // emptySet is returned for empty attribute sets.
emptySet = &Set{ emptySet = &Set{
equivalent: Distinct{ equivalent: Distinct{
iface: [0]KeyValue{}, iface: [0]KeyValue{},
@ -78,17 +71,17 @@ func EmptySet() *Set {
return emptySet return emptySet
} }
// reflect abbreviates `reflect.ValueOf`. // reflect abbreviates reflect.ValueOf.
func (d Distinct) reflect() reflect.Value { func (d Distinct) reflect() reflect.Value {
return reflect.ValueOf(d.iface) return reflect.ValueOf(d.iface)
} }
// Valid returns true if this value refers to a valid `*Set`. // Valid returns true if this value refers to a valid Set.
func (d Distinct) Valid() bool { func (d Distinct) Valid() bool {
return d.iface != nil return d.iface != nil
} }
// Len returns the number of labels in this set. // Len returns the number of attributes in this set.
func (l *Set) Len() int { func (l *Set) Len() int {
if l == nil || !l.equivalent.Valid() { if l == nil || !l.equivalent.Valid() {
return 0 return 0
@ -96,7 +89,7 @@ func (l *Set) Len() int {
return l.equivalent.reflect().Len() return l.equivalent.reflect().Len()
} }
// Get returns the KeyValue at ordered position `idx` in this set. // Get returns the KeyValue at ordered position idx in this set.
func (l *Set) Get(idx int) (KeyValue, bool) { func (l *Set) Get(idx int) (KeyValue, bool) {
if l == nil { if l == nil {
return KeyValue{}, false return KeyValue{}, false
@ -142,7 +135,7 @@ func (l *Set) HasValue(k Key) bool {
return ok return ok
} }
// Iter returns an iterator for visiting the labels in this set. // Iter returns an iterator for visiting the attributes in this set.
func (l *Set) Iter() Iterator { func (l *Set) Iter() Iterator {
return Iterator{ return Iterator{
storage: l, storage: l,
@ -150,18 +143,17 @@ func (l *Set) Iter() Iterator {
} }
} }
// ToSlice returns the set of labels belonging to this set, sorted, // ToSlice returns the set of attributes belonging to this set, sorted, where
// where keys appear no more than once. // keys appear no more than once.
func (l *Set) ToSlice() []KeyValue { func (l *Set) ToSlice() []KeyValue {
iter := l.Iter() iter := l.Iter()
return iter.ToSlice() return iter.ToSlice()
} }
// Equivalent returns a value that may be used as a map key. The // Equivalent returns a value that may be used as a map key. The Distinct type
// Distinct type guarantees that the result will equal the equivalent // guarantees that the result will equal the equivalent. Distinct value of any
// Distinct value of any label set with the same elements as this, // attribute set with the same elements as this, where sets are made unique by
// where sets are made unique by choosing the last value in the input // choosing the last value in the input for any given key.
// for any given key.
func (l *Set) Equivalent() Distinct { func (l *Set) Equivalent() Distinct {
if l == nil || !l.equivalent.Valid() { if l == nil || !l.equivalent.Valid() {
return emptySet.equivalent return emptySet.equivalent
@ -174,8 +166,7 @@ func (l *Set) Equals(o *Set) bool {
return l.Equivalent() == o.Equivalent() return l.Equivalent() == o.Equivalent()
} }
// Encoded returns the encoded form of this set, according to // Encoded returns the encoded form of this set, according to encoder.
// `encoder`.
func (l *Set) Encoded(encoder Encoder) string { func (l *Set) Encoded(encoder Encoder) string {
if l == nil || encoder == nil { if l == nil || encoder == nil {
return "" return ""
@ -190,11 +181,11 @@ func empty() Set {
} }
} }
// NewSet returns a new `Set`. See the documentation for // NewSet returns a new Set. See the documentation for
// `NewSetWithSortableFiltered` for more details. // NewSetWithSortableFiltered for more details.
// //
// Except for empty sets, this method adds an additional allocation // Except for empty sets, this method adds an additional allocation compared
// compared with calls that include a `*Sortable`. // with calls that include a Sortable.
func NewSet(kvs ...KeyValue) Set { func NewSet(kvs ...KeyValue) Set {
// Check for empty set. // Check for empty set.
if len(kvs) == 0 { if len(kvs) == 0 {
@ -204,10 +195,10 @@ func NewSet(kvs ...KeyValue) Set {
return s return s
} }
// NewSetWithSortable returns a new `Set`. See the documentation for // NewSetWithSortable returns a new Set. See the documentation for
// `NewSetWithSortableFiltered` for more details. // NewSetWithSortableFiltered for more details.
// //
// This call includes a `*Sortable` option as a memory optimization. // This call includes a Sortable option as a memory optimization.
func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
// Check for empty set. // Check for empty set.
if len(kvs) == 0 { if len(kvs) == 0 {
@ -217,12 +208,11 @@ func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
return s return s
} }
// NewSetWithFiltered returns a new `Set`. See the documentation for // NewSetWithFiltered returns a new Set. See the documentation for
// `NewSetWithSortableFiltered` for more details. // NewSetWithSortableFiltered for more details.
// //
// This call includes a `Filter` to include/exclude label keys from // This call includes a Filter to include/exclude attribute keys from the
// the return value. Excluded keys are returned as a slice of label // return value. Excluded keys are returned as a slice of attribute values.
// values.
func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// Check for empty set. // Check for empty set.
if len(kvs) == 0 { if len(kvs) == 0 {
@ -231,7 +221,7 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
return NewSetWithSortableFiltered(kvs, new(Sortable), filter) return NewSetWithSortableFiltered(kvs, new(Sortable), filter)
} }
// NewSetWithSortableFiltered returns a new `Set`. // NewSetWithSortableFiltered returns a new Set.
// //
// Duplicate keys are eliminated by taking the last value. This // Duplicate keys are eliminated by taking the last value. This
// re-orders the input slice so that unique last-values are contiguous // re-orders the input slice so that unique last-values are contiguous
@ -243,17 +233,16 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// - Caller sees the reordering, but doesn't lose values // - Caller sees the reordering, but doesn't lose values
// - Repeated call preserve last-value wins. // - Repeated call preserve last-value wins.
// //
// Note that methods are defined on `*Set`, although this returns `Set`. // Note that methods are defined on Set, although this returns Set. Callers
// Callers can avoid memory allocations by: // can avoid memory allocations by:
// //
// - allocating a `Sortable` for use as a temporary in this method // - allocating a Sortable for use as a temporary in this method
// - allocating a `Set` for storing the return value of this // - allocating a Set for storing the return value of this constructor.
// constructor.
// //
// The result maintains a cache of encoded labels, by attribute.EncoderID. // The result maintains a cache of encoded attributes, by attribute.EncoderID.
// This value should not be copied after its first use. // This value should not be copied after its first use.
// //
// The second `[]KeyValue` return value is a list of labels that were // The second []KeyValue return value is a list of attributes that were
// excluded by the Filter (if non-nil). // excluded by the Filter (if non-nil).
func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
// Check for empty set. // Check for empty set.
@ -293,13 +282,13 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S
}, nil }, nil
} }
// filterSet reorders `kvs` so that included keys are contiguous at // filterSet reorders kvs so that included keys are contiguous at the end of
// the end of the slice, while excluded keys precede the included keys. // the slice, while excluded keys precede the included keys.
func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
var excluded []KeyValue var excluded []KeyValue
// Move labels that do not match the filter so // Move attributes that do not match the filter so they're adjacent before
// they're adjacent before calling computeDistinct(). // calling computeDistinct().
distinctPosition := len(kvs) distinctPosition := len(kvs)
// Swap indistinct keys forward and distinct keys toward the // Swap indistinct keys forward and distinct keys toward the
@ -319,8 +308,8 @@ func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
}, excluded }, excluded
} }
// Filter returns a filtered copy of this `Set`. See the // Filter returns a filtered copy of this Set. See the documentation for
// documentation for `NewSetWithSortableFiltered` for more details. // NewSetWithSortableFiltered for more details.
func (l *Set) Filter(re Filter) (Set, []KeyValue) { func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if re == nil { if re == nil {
return Set{ return Set{
@ -333,9 +322,9 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
return filterSet(l.ToSlice(), re) return filterSet(l.ToSlice(), re)
} }
// computeDistinct returns a `Distinct` using either the fixed- or // computeDistinct returns a Distinct using either the fixed- or
// reflect-oriented code path, depending on the size of the input. // reflect-oriented code path, depending on the size of the input. The input
// The input slice is assumed to already be sorted and de-duplicated. // slice is assumed to already be sorted and de-duplicated.
func computeDistinct(kvs []KeyValue) Distinct { func computeDistinct(kvs []KeyValue) Distinct {
iface := computeDistinctFixed(kvs) iface := computeDistinctFixed(kvs)
if iface == nil { if iface == nil {
@ -346,8 +335,8 @@ func computeDistinct(kvs []KeyValue) Distinct {
} }
} }
// computeDistinctFixed computes a `Distinct` for small slices. It // computeDistinctFixed computes a Distinct for small slices. It returns nil
// returns nil if the input is too large for this code path. // if the input is too large for this code path.
func computeDistinctFixed(kvs []KeyValue) interface{} { func computeDistinctFixed(kvs []KeyValue) interface{} {
switch len(kvs) { switch len(kvs) {
case 1: case 1:
@ -395,8 +384,8 @@ func computeDistinctFixed(kvs []KeyValue) interface{} {
} }
} }
// computeDistinctReflect computes a `Distinct` using reflection, // computeDistinctReflect computes a Distinct using reflection, works for any
// works for any size input. // size input.
func computeDistinctReflect(kvs []KeyValue) interface{} { func computeDistinctReflect(kvs []KeyValue) interface{} {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs { for i, keyValue := range kvs {
@ -405,22 +394,31 @@ func computeDistinctReflect(kvs []KeyValue) interface{} {
return at.Interface() return at.Interface()
} }
// MarshalJSON returns the JSON encoding of the `*Set`. // MarshalJSON returns the JSON encoding of the Set.
func (l *Set) MarshalJSON() ([]byte, error) { func (l *Set) MarshalJSON() ([]byte, error) {
return json.Marshal(l.equivalent.iface) return json.Marshal(l.equivalent.iface)
} }
// Len implements `sort.Interface`. // MarshalLog is the marshaling function used by the logging system to represent this exporter.
func (l Set) MarshalLog() interface{} {
kvs := make(map[string]string)
for _, kv := range l.ToSlice() {
kvs[string(kv.Key)] = kv.Value.Emit()
}
return kvs
}
// Len implements sort.Interface.
func (l *Sortable) Len() int { func (l *Sortable) Len() int {
return len(*l) return len(*l)
} }
// Swap implements `sort.Interface`. // Swap implements sort.Interface.
func (l *Sortable) Swap(i, j int) { func (l *Sortable) Swap(i, j int) {
(*l)[i], (*l)[j] = (*l)[j], (*l)[i] (*l)[i], (*l)[j] = (*l)[j], (*l)[i]
} }
// Less implements `sort.Interface`. // Less implements sort.Interface.
func (l *Sortable) Less(i, j int) bool { func (l *Sortable) Less(i, j int) bool {
return (*l)[i].Key < (*l)[j].Key return (*l)[i].Key < (*l)[j].Key
} }

View File

@ -187,7 +187,7 @@ func (v Value) AsFloat64() float64 {
} }
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
// INT64SLICE. // FLOAT64SLICE.
func (v Value) AsFloat64Slice() []float64 { func (v Value) AsFloat64Slice() []float64 {
if s, ok := v.slice.(*[]float64); ok { if s, ok := v.slice.(*[]float64); ok {
return *s return *s
@ -202,7 +202,7 @@ func (v Value) AsString() string {
} }
// AsStringSlice returns the []string value. Make sure that the Value's type is // AsStringSlice returns the []string value. Make sure that the Value's type is
// INT64SLICE. // STRINGSLICE.
func (v Value) AsStringSlice() []string { func (v Value) AsStringSlice() []string {
if s, ok := v.slice.(*[]string); ok { if s, ok := v.slice.(*[]string); ok {
return *s return *s

View File

@ -61,45 +61,57 @@ type Property struct {
// hasValue indicates if a zero-value value means the property does not // hasValue indicates if a zero-value value means the property does not
// have a value or if it was the zero-value. // have a value or if it was the zero-value.
hasValue bool hasValue bool
// hasData indicates whether the created property contains data or not.
// Properties that do not contain data are invalid with no other check
// required.
hasData bool
} }
func NewKeyProperty(key string) (Property, error) { func NewKeyProperty(key string) (Property, error) {
p := Property{}
if !keyRe.MatchString(key) { if !keyRe.MatchString(key) {
return p, fmt.Errorf("%w: %q", errInvalidKey, key) return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
} }
p.key = key
p := Property{key: key, hasData: true}
return p, nil return p, nil
} }
func NewKeyValueProperty(key, value string) (Property, error) { func NewKeyValueProperty(key, value string) (Property, error) {
p := Property{}
if !keyRe.MatchString(key) { if !keyRe.MatchString(key) {
return p, fmt.Errorf("%w: %q", errInvalidKey, key) return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
} }
if !valueRe.MatchString(value) { if !valueRe.MatchString(value) {
return p, fmt.Errorf("%w: %q", errInvalidValue, value) return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
p := Property{
key: key,
value: value,
hasValue: true,
hasData: true,
} }
p.key = key
p.value = value
p.hasValue = true
return p, nil return p, nil
} }
func newInvalidProperty() Property {
return Property{}
}
// parseProperty attempts to decode a Property from the passed string. It // parseProperty attempts to decode a Property from the passed string. It
// returns an error if the input is invalid according to the W3C Baggage // returns an error if the input is invalid according to the W3C Baggage
// specification. // specification.
func parseProperty(property string) (Property, error) { func parseProperty(property string) (Property, error) {
p := Property{}
if property == "" { if property == "" {
return p, nil return newInvalidProperty(), nil
} }
match := propertyRe.FindStringSubmatch(property) match := propertyRe.FindStringSubmatch(property)
if len(match) != 4 { if len(match) != 4 {
return p, fmt.Errorf("%w: %q", errInvalidProperty, property) return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
} }
p := Property{hasData: true}
if match[1] != "" { if match[1] != "" {
p.key = match[1] p.key = match[1]
} else { } else {
@ -107,6 +119,7 @@ func parseProperty(property string) (Property, error) {
p.value = match[3] p.value = match[3]
p.hasValue = true p.hasValue = true
} }
return p, nil return p, nil
} }
@ -117,6 +130,10 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err) return fmt.Errorf("invalid property: %w", err)
} }
if !p.hasData {
return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p))
}
if !keyRe.MatchString(p.key) { if !keyRe.MatchString(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
} }
@ -220,26 +237,40 @@ func (p properties) String() string {
type Member struct { type Member struct {
key, value string key, value string
properties properties properties properties
// hasData indicates whether the created property contains data or not.
// Properties that do not contain data are invalid with no other check
// required.
hasData bool
} }
// NewMember returns a new Member from the passed arguments. An error is // NewMember returns a new Member from the passed arguments. An error is
// returned if the created Member would be invalid according to the W3C // returned if the created Member would be invalid according to the W3C
// Baggage specification. // Baggage specification.
func NewMember(key, value string, props ...Property) (Member, error) { func NewMember(key, value string, props ...Property) (Member, error) {
m := Member{key: key, value: value, properties: properties(props).Copy()} m := Member{
key: key,
value: value,
properties: properties(props).Copy(),
hasData: true,
}
if err := m.validate(); err != nil { if err := m.validate(); err != nil {
return Member{}, err return newInvalidMember(), err
} }
return m, nil return m, nil
} }
func newInvalidMember() Member {
return Member{}
}
// parseMember attempts to decode a Member from the passed string. It returns // parseMember attempts to decode a Member from the passed string. It returns
// an error if the input is invalid according to the W3C Baggage // an error if the input is invalid according to the W3C Baggage
// specification. // specification.
func parseMember(member string) (Member, error) { func parseMember(member string) (Member, error) {
if n := len(member); n > maxBytesPerMembers { if n := len(member); n > maxBytesPerMembers {
return Member{}, fmt.Errorf("%w: %d", errMemberBytes, n) return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
} }
var ( var (
@ -254,7 +285,7 @@ func parseMember(member string) (Member, error) {
for _, pStr := range strings.Split(parts[1], propertyDelimiter) { for _, pStr := range strings.Split(parts[1], propertyDelimiter) {
p, err := parseProperty(pStr) p, err := parseProperty(pStr)
if err != nil { if err != nil {
return Member{}, err return newInvalidMember(), err
} }
props = append(props, p) props = append(props, p)
} }
@ -265,16 +296,21 @@ func parseMember(member string) (Member, error) {
// Take into account a value can contain equal signs (=). // Take into account a value can contain equal signs (=).
kv := strings.SplitN(parts[0], keyValueDelimiter, 2) kv := strings.SplitN(parts[0], keyValueDelimiter, 2)
if len(kv) != 2 { if len(kv) != 2 {
return Member{}, fmt.Errorf("%w: %q", errInvalidMember, member) return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
} }
// "Leading and trailing whitespaces are allowed but MUST be trimmed // "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure." // when converting the header into a data structure."
key, value = strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1]) key = strings.TrimSpace(kv[0])
var err error
value, err = url.QueryUnescape(strings.TrimSpace(kv[1]))
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
}
if !keyRe.MatchString(key) { if !keyRe.MatchString(key) {
return Member{}, fmt.Errorf("%w: %q", errInvalidKey, key) return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
} }
if !valueRe.MatchString(value) { if !valueRe.MatchString(value) {
return Member{}, fmt.Errorf("%w: %q", errInvalidValue, value) return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
} }
default: default:
// This should never happen unless a developer has changed the string // This should never happen unless a developer has changed the string
@ -283,12 +319,16 @@ func parseMember(member string) (Member, error) {
panic("failed to parse baggage member") panic("failed to parse baggage member")
} }
return Member{key: key, value: value, properties: props}, nil return Member{key: key, value: value, properties: props, hasData: true}, nil
} }
// validate ensures m conforms to the W3C Baggage specification, returning an // validate ensures m conforms to the W3C Baggage specification, returning an
// error otherwise. // error otherwise.
func (m Member) validate() error { func (m Member) validate() error {
if !m.hasData {
return fmt.Errorf("%w: %q", errInvalidMember, m)
}
if !keyRe.MatchString(m.key) { if !keyRe.MatchString(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key) return fmt.Errorf("%w: %q", errInvalidKey, m.key)
} }
@ -324,9 +364,10 @@ type Baggage struct { //nolint:golint
list baggage.List list baggage.List
} }
// New returns a new valid Baggage. It returns an error if the passed members // New returns a new valid Baggage. It returns an error if it results in a
// are invalid according to the W3C Baggage specification or if it results in // Baggage exceeding limits set in that specification.
// a Baggage exceeding limits set in that specification. //
// It expects all the provided members to have already been validated.
func New(members ...Member) (Baggage, error) { func New(members ...Member) (Baggage, error) {
if len(members) == 0 { if len(members) == 0 {
return Baggage{}, nil return Baggage{}, nil
@ -334,9 +375,10 @@ func New(members ...Member) (Baggage, error) {
b := make(baggage.List) b := make(baggage.List)
for _, m := range members { for _, m := range members {
if err := m.validate(); err != nil { if !m.hasData {
return Baggage{}, err return Baggage{}, errInvalidMember
} }
// OpenTelemetry resolves duplicates by last-one-wins. // OpenTelemetry resolves duplicates by last-one-wins.
b[m.key] = baggage.Item{ b[m.key] = baggage.Item{
Value: m.value, Value: m.value,
@ -401,6 +443,8 @@ func Parse(bStr string) (Baggage, error) {
// //
// If there is no list-member matching the passed key the returned Member will // If there is no list-member matching the passed key the returned Member will
// be a zero-value Member. // be a zero-value Member.
// The returned member is not validated, as we assume the validation happened
// when it was added to the Baggage.
func (b Baggage) Member(key string) Member { func (b Baggage) Member(key string) Member {
v, ok := b.list[key] v, ok := b.list[key]
if !ok { if !ok {
@ -408,7 +452,7 @@ func (b Baggage) Member(key string) Member {
// where a zero-valued Member is included in the Baggage because a // where a zero-valued Member is included in the Baggage because a
// zero-valued Member is invalid according to the W3C Baggage // zero-valued Member is invalid according to the W3C Baggage
// specification (it has an empty key). // specification (it has an empty key).
return Member{} return newInvalidMember()
} }
return Member{ return Member{
@ -420,6 +464,9 @@ func (b Baggage) Member(key string) Member {
// Members returns all the baggage list-members. // Members returns all the baggage list-members.
// The order of the returned list-members does not have significance. // The order of the returned list-members does not have significance.
//
// The returned members are not validated, as we assume the validation happened
// when they were added to the Baggage.
func (b Baggage) Members() []Member { func (b Baggage) Members() []Member {
if len(b.list) == 0 { if len(b.list) == 0 {
return nil return nil
@ -443,8 +490,8 @@ func (b Baggage) Members() []Member {
// If member is invalid according to the W3C Baggage specification, an error // If member is invalid according to the W3C Baggage specification, an error
// is returned with the original Baggage. // is returned with the original Baggage.
func (b Baggage) SetMember(member Member) (Baggage, error) { func (b Baggage) SetMember(member Member) (Baggage, error) {
if err := member.validate(); err != nil { if !member.hasData {
return b, fmt.Errorf("%w: %s", errInvalidMember, err) return b, errInvalidMember
} }
n := len(b.list) n := len(b.list)

View File

@ -0,0 +1,34 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal contains common functionality for all OTLP exporters.
package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
import (
"fmt"
"path"
"strings"
)
// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead.
func CleanPath(urlPath string, defaultPath string) string {
tmp := path.Clean(strings.TrimSpace(urlPath))
if tmp == "." {
return defaultPath
}
if !path.IsAbs(tmp) {
tmp = fmt.Sprintf("/%s", tmp)
}
return tmp
}

View File

@ -0,0 +1,148 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"time"
)
// ConfigFn is the generic function used to set a config.
type ConfigFn func(*EnvOptionsReader)
// EnvOptionsReader reads the required environment variables.
type EnvOptionsReader struct {
GetEnv func(string) string
ReadFile func(string) ([]byte, error)
Namespace string
}
// Apply runs every ConfigFn.
func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
for _, o := range opts {
o(e)
}
}
// GetEnvValue gets an OTLP environment variable value of the specified key
// using the GetEnv function.
// This function prepends the OTLP specified namespace to all key lookups.
func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
return v, v != ""
}
// WithString retrieves the specified config and passes it to ConfigFn as a string.
func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
fn(v)
}
}
}
// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
if d, err := strconv.Atoi(v); err == nil {
fn(time.Duration(d) * time.Millisecond)
}
}
}
}
// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
fn(stringToHeader(v))
}
}
}
// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
if u, err := url.Parse(v); err == nil {
fn(u)
}
}
}
}
// WithTLSConfig retrieves the specified config and passes it to ConfigFn as a crypto/tls.Config.
func WithTLSConfig(n string, fn func(*tls.Config)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
if b, err := e.ReadFile(v); err == nil {
if c, err := createTLSConfig(b); err == nil {
fn(c)
}
}
}
}
}
func keyWithNamespace(ns, key string) string {
if ns == "" {
return key
}
return fmt.Sprintf("%s_%s", ns, key)
}
func stringToHeader(value string) map[string]string {
headersPairs := strings.Split(value, ",")
headers := make(map[string]string)
for _, header := range headersPairs {
nameValue := strings.SplitN(header, "=", 2)
if len(nameValue) < 2 {
continue
}
name, err := url.QueryUnescape(nameValue[0])
if err != nil {
continue
}
trimmedName := strings.TrimSpace(name)
value, err := url.QueryUnescape(nameValue[1])
if err != nil {
continue
}
trimmedValue := strings.TrimSpace(value)
headers[trimmedName] = trimmedValue
}
return headers
}
func createTLSConfig(certBytes []byte) (*tls.Config, error) {
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
return nil, errors.New("failed to append certificate to the cert pool")
}
return &tls.Config{
RootCAs: cp,
}, nil
}

View File

@ -39,11 +39,13 @@ these environment variables is interpreted, see [the OpenTelemetry
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md). specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md).
| Environment variable | Option | Default value | | Environment variable | Option | Default value |
| ------------------------------------------------------------------------ |------------------------------ | ----------------------------------- | | ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- |
| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` | | `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` or `https://localhost:4318`[^1] |
| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | | | `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | |
| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | | | `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | |
| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | | | `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | |
| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` | | `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` |
[^1]: The gRPC client defaults to `https://localhost:4317` and the HTTP client `https://localhost:4318`.
Configuration using options have precedence over the environment variables. Configuration using options have precedence over the environment variables.

View File

@ -100,3 +100,14 @@ func NewUnstarted(client Client) *Exporter {
client: client, client: client,
} }
} }
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
func (e *Exporter) MarshalLog() interface{} {
return struct {
Type string
Client Client
}{
Type: "otlptrace",
Client: e.client,
}
}

View File

@ -16,65 +16,59 @@ package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/
import ( import (
"crypto/tls" "crypto/tls"
"fmt"
"io/ioutil" "io/ioutil"
"net/url" "net/url"
"os" "os"
"path" "path"
"strconv"
"strings" "strings"
"time" "time"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
) )
var DefaultEnvOptionsReader = EnvOptionsReader{ // DefaultEnvOptionsReader is the default environments reader.
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
GetEnv: os.Getenv, GetEnv: os.Getenv,
ReadFile: ioutil.ReadFile, ReadFile: ioutil.ReadFile,
Namespace: "OTEL_EXPORTER_OTLP",
} }
func ApplyGRPCEnvConfigs(cfg *Config) { // ApplyGRPCEnvConfigs applies the env configurations for gRPC.
DefaultEnvOptionsReader.ApplyGRPCEnvConfigs(cfg) func ApplyGRPCEnvConfigs(cfg Config) Config {
} opts := getOptionsFromEnv()
func ApplyHTTPEnvConfigs(cfg *Config) {
DefaultEnvOptionsReader.ApplyHTTPEnvConfigs(cfg)
}
type EnvOptionsReader struct {
GetEnv func(string) string
ReadFile func(filename string) ([]byte, error)
}
func (e *EnvOptionsReader) ApplyHTTPEnvConfigs(cfg *Config) {
opts := e.GetOptionsFromEnv()
for _, opt := range opts { for _, opt := range opts {
opt.ApplyHTTPOption(cfg) cfg = opt.ApplyGRPCOption(cfg)
} }
return cfg
} }
func (e *EnvOptionsReader) ApplyGRPCEnvConfigs(cfg *Config) { // ApplyHTTPEnvConfigs applies the env configurations for HTTP.
opts := e.GetOptionsFromEnv() func ApplyHTTPEnvConfigs(cfg Config) Config {
opts := getOptionsFromEnv()
for _, opt := range opts { for _, opt := range opts {
opt.ApplyGRPCOption(cfg) cfg = opt.ApplyHTTPOption(cfg)
} }
return cfg
} }
func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption { func getOptionsFromEnv() []GenericOption {
var opts []GenericOption opts := []GenericOption{}
// Endpoint DefaultEnvOptionsReader.Apply(
if v, ok := e.getEnvValue("TRACES_ENDPOINT"); ok { envconfig.WithURL("ENDPOINT", func(u *url.URL) {
u, err := url.Parse(v) opts = append(opts, withEndpointScheme(u))
// Ignore invalid values. opts = append(opts, newSplitOption(func(cfg Config) Config {
if err == nil { cfg.Traces.Endpoint = u.Host
// This is used to set the scheme for OTLP/HTTP. // For OTLP/HTTP endpoint URLs without a per-signal
if insecureSchema(u.Scheme) { // configuration, the passed endpoint is used as a base URL
opts = append(opts, WithInsecure()) // and the signals are sent to these paths relative to that.
} else { cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
opts = append(opts, WithSecure()) return cfg
} }, withEndpointForGRPC(u)))
opts = append(opts, newSplitOption(func(cfg *Config) { }),
envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
opts = append(opts, withEndpointScheme(u))
opts = append(opts, newSplitOption(func(cfg Config) Config {
cfg.Traces.Endpoint = u.Host cfg.Traces.Endpoint = u.Host
// For endpoint URLs for OTLP/HTTP per-signal variables, the // For endpoint URLs for OTLP/HTTP per-signal variables, the
// URL MUST be used as-is without any modification. The only // URL MUST be used as-is without any modification. The only
@ -85,137 +79,51 @@ func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption {
path = "/" path = "/"
} }
cfg.Traces.URLPath = path cfg.Traces.URLPath = path
}, func(cfg *Config) { return cfg
// For OTLP/gRPC endpoints, this is the target to which the }, withEndpointForGRPC(u)))
// exporter is going to send telemetry. }),
cfg.Traces.Endpoint = path.Join(u.Host, u.Path) envconfig.WithTLSConfig("CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
})) envconfig.WithTLSConfig("TRACES_CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
} envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
} else if v, ok = e.getEnvValue("ENDPOINT"); ok { envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
u, err := url.Parse(v) WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
// Ignore invalid values. WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
if err == nil { envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
// This is used to set the scheme for OTLP/HTTP. envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
if insecureSchema(u.Scheme) { )
opts = append(opts, WithInsecure())
} else {
opts = append(opts, WithSecure())
}
opts = append(opts, newSplitOption(func(cfg *Config) {
cfg.Traces.Endpoint = u.Host
// For OTLP/HTTP endpoint URLs without a per-signal
// configuration, the passed endpoint is used as a base URL
// and the signals are sent to these paths relative to that.
cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
}, func(cfg *Config) {
// For OTLP/gRPC endpoints, this is the target to which the
// exporter is going to send telemetry.
cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
}))
}
}
// Certificate File
if path, ok := e.getEnvValue("CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp exporter certificate '%s': %w", path, err))
}
}
if path, ok := e.getEnvValue("TRACES_CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp traces exporter certificate '%s': %w", path, err))
}
}
// Headers
if h, ok := e.getEnvValue("HEADERS"); ok {
opts = append(opts, WithHeaders(stringToHeader(h)))
}
if h, ok := e.getEnvValue("TRACES_HEADERS"); ok {
opts = append(opts, WithHeaders(stringToHeader(h)))
}
// Compression
if c, ok := e.getEnvValue("COMPRESSION"); ok {
opts = append(opts, WithCompression(stringToCompression(c)))
}
if c, ok := e.getEnvValue("TRACES_COMPRESSION"); ok {
opts = append(opts, WithCompression(stringToCompression(c)))
}
// Timeout
if t, ok := e.getEnvValue("TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond))
}
}
if t, ok := e.getEnvValue("TRACES_TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond))
}
}
return opts return opts
} }
func insecureSchema(schema string) bool { func withEndpointScheme(u *url.URL) GenericOption {
switch strings.ToLower(schema) { switch strings.ToLower(u.Scheme) {
case "http", "unix": case "http", "unix":
return true return WithInsecure()
default: default:
return false return WithSecure()
} }
} }
// getEnvValue gets an OTLP environment variable value of the specified key using the GetEnv function. func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
// This function already prepends the OTLP prefix to all key lookup. return func(cfg Config) Config {
func (e *EnvOptionsReader) getEnvValue(key string) (string, bool) { // For OTLP/gRPC endpoints, this is the target to which the
v := strings.TrimSpace(e.GetEnv(fmt.Sprintf("OTEL_EXPORTER_OTLP_%s", key))) // exporter is going to send telemetry.
return v, v != "" cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
return cfg
}
} }
func (e *EnvOptionsReader) readTLSConfig(path string) (*tls.Config, error) { // WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
b, err := e.ReadFile(path) func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
if err != nil { return func(e *envconfig.EnvOptionsReader) {
return nil, err if v, ok := e.GetEnvValue(n); ok {
} cp := NoCompression
return CreateTLSConfig(b) switch v {
}
func stringToCompression(value string) Compression {
switch value {
case "gzip": case "gzip":
return GzipCompression cp = GzipCompression
} }
return NoCompression fn(cp)
} }
func stringToHeader(value string) map[string]string {
headersPairs := strings.Split(value, ",")
headers := make(map[string]string)
for _, header := range headersPairs {
nameValue := strings.SplitN(header, "=", 2)
if len(nameValue) < 2 {
continue
} }
name, err := url.QueryUnescape(nameValue[0])
if err != nil {
continue
}
trimmedName := strings.TrimSpace(name)
value, err := url.QueryUnescape(nameValue[1])
if err != nil {
continue
}
trimmedValue := strings.TrimSpace(value)
headers[trimmedName] = trimmedValue
}
return headers
} }

View File

@ -22,8 +22,10 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/backoff" "google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/encoding/gzip"
"go.opentelemetry.io/otel/exporters/otlp/internal"
"go.opentelemetry.io/otel/exporters/otlp/internal/retry" "go.opentelemetry.io/otel/exporters/otlp/internal/retry"
) )
@ -64,27 +66,41 @@ type (
} }
) )
func NewDefaultConfig() Config { // NewHTTPConfig returns a new Config with all settings applied from opts and
c := Config{ // any unset setting using the default HTTP config values.
func NewHTTPConfig(opts ...HTTPOption) Config {
cfg := Config{
Traces: SignalConfig{ Traces: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorPort), Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
URLPath: DefaultTracesPath, URLPath: DefaultTracesPath,
Compression: NoCompression, Compression: NoCompression,
Timeout: DefaultTimeout, Timeout: DefaultTimeout,
}, },
RetryConfig: retry.DefaultConfig, RetryConfig: retry.DefaultConfig,
} }
cfg = ApplyHTTPEnvConfigs(cfg)
return c for _, opt := range opts {
cfg = opt.ApplyHTTPOption(cfg)
}
cfg.Traces.URLPath = internal.CleanPath(cfg.Traces.URLPath, DefaultTracesPath)
return cfg
} }
// NewGRPCConfig returns a new Config with all settings applied from opts and // NewGRPCConfig returns a new Config with all settings applied from opts and
// any unset setting using the default gRPC config values. // any unset setting using the default gRPC config values.
func NewGRPCConfig(opts ...GRPCOption) Config { func NewGRPCConfig(opts ...GRPCOption) Config {
cfg := NewDefaultConfig() cfg := Config{
ApplyGRPCEnvConfigs(&cfg) Traces: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
URLPath: DefaultTracesPath,
Compression: NoCompression,
Timeout: DefaultTimeout,
},
RetryConfig: retry.DefaultConfig,
}
cfg = ApplyGRPCEnvConfigs(cfg)
for _, opt := range opts { for _, opt := range opts {
opt.ApplyGRPCOption(&cfg) cfg = opt.ApplyGRPCOption(cfg)
} }
if cfg.ServiceConfig != "" { if cfg.ServiceConfig != "" {
@ -94,7 +110,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
if cfg.Traces.GRPCCredentials != nil { if cfg.Traces.GRPCCredentials != nil {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
} else if cfg.Traces.Insecure { } else if cfg.Traces.Insecure {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithInsecure()) cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
} else { } else {
// Default to using the host's root CA. // Default to using the host's root CA.
creds := credentials.NewTLS(nil) creds := credentials.NewTLS(nil)
@ -121,8 +137,8 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
type ( type (
// GenericOption applies an option to the HTTP or gRPC driver. // GenericOption applies an option to the HTTP or gRPC driver.
GenericOption interface { GenericOption interface {
ApplyHTTPOption(*Config) ApplyHTTPOption(Config) Config
ApplyGRPCOption(*Config) ApplyGRPCOption(Config) Config
// A private method to prevent users implementing the // A private method to prevent users implementing the
// interface and so future additions to it will not // interface and so future additions to it will not
@ -132,7 +148,7 @@ type (
// HTTPOption applies an option to the HTTP driver. // HTTPOption applies an option to the HTTP driver.
HTTPOption interface { HTTPOption interface {
ApplyHTTPOption(*Config) ApplyHTTPOption(Config) Config
// A private method to prevent users implementing the // A private method to prevent users implementing the
// interface and so future additions to it will not // interface and so future additions to it will not
@ -142,7 +158,7 @@ type (
// GRPCOption applies an option to the gRPC driver. // GRPCOption applies an option to the gRPC driver.
GRPCOption interface { GRPCOption interface {
ApplyGRPCOption(*Config) ApplyGRPCOption(Config) Config
// A private method to prevent users implementing the // A private method to prevent users implementing the
// interface and so future additions to it will not // interface and so future additions to it will not
@ -154,128 +170,138 @@ type (
// genericOption is an option that applies the same logic // genericOption is an option that applies the same logic
// for both gRPC and HTTP. // for both gRPC and HTTP.
type genericOption struct { type genericOption struct {
fn func(*Config) fn func(Config) Config
} }
func (g *genericOption) ApplyGRPCOption(cfg *Config) { func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
g.fn(cfg) return g.fn(cfg)
} }
func (g *genericOption) ApplyHTTPOption(cfg *Config) { func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
g.fn(cfg) return g.fn(cfg)
} }
func (genericOption) private() {} func (genericOption) private() {}
func newGenericOption(fn func(cfg *Config)) GenericOption { func newGenericOption(fn func(cfg Config) Config) GenericOption {
return &genericOption{fn: fn} return &genericOption{fn: fn}
} }
// splitOption is an option that applies different logics // splitOption is an option that applies different logics
// for gRPC and HTTP. // for gRPC and HTTP.
type splitOption struct { type splitOption struct {
httpFn func(*Config) httpFn func(Config) Config
grpcFn func(*Config) grpcFn func(Config) Config
} }
func (g *splitOption) ApplyGRPCOption(cfg *Config) { func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
g.grpcFn(cfg) return g.grpcFn(cfg)
} }
func (g *splitOption) ApplyHTTPOption(cfg *Config) { func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
g.httpFn(cfg) return g.httpFn(cfg)
} }
func (splitOption) private() {} func (splitOption) private() {}
func newSplitOption(httpFn func(cfg *Config), grpcFn func(cfg *Config)) GenericOption { func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
return &splitOption{httpFn: httpFn, grpcFn: grpcFn} return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
} }
// httpOption is an option that is only applied to the HTTP driver. // httpOption is an option that is only applied to the HTTP driver.
type httpOption struct { type httpOption struct {
fn func(*Config) fn func(Config) Config
} }
func (h *httpOption) ApplyHTTPOption(cfg *Config) { func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
h.fn(cfg) return h.fn(cfg)
} }
func (httpOption) private() {} func (httpOption) private() {}
func NewHTTPOption(fn func(cfg *Config)) HTTPOption { func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
return &httpOption{fn: fn} return &httpOption{fn: fn}
} }
// grpcOption is an option that is only applied to the gRPC driver. // grpcOption is an option that is only applied to the gRPC driver.
type grpcOption struct { type grpcOption struct {
fn func(*Config) fn func(Config) Config
} }
func (h *grpcOption) ApplyGRPCOption(cfg *Config) { func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
h.fn(cfg) return h.fn(cfg)
} }
func (grpcOption) private() {} func (grpcOption) private() {}
func NewGRPCOption(fn func(cfg *Config)) GRPCOption { func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
return &grpcOption{fn: fn} return &grpcOption{fn: fn}
} }
// Generic Options // Generic Options
func WithEndpoint(endpoint string) GenericOption { func WithEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg *Config) { return newGenericOption(func(cfg Config) Config {
cfg.Traces.Endpoint = endpoint cfg.Traces.Endpoint = endpoint
return cfg
}) })
} }
func WithCompression(compression Compression) GenericOption { func WithCompression(compression Compression) GenericOption {
return newGenericOption(func(cfg *Config) { return newGenericOption(func(cfg Config) Config {
cfg.Traces.Compression = compression cfg.Traces.Compression = compression
return cfg
}) })
} }
func WithURLPath(urlPath string) GenericOption { func WithURLPath(urlPath string) GenericOption {
return newGenericOption(func(cfg *Config) { return newGenericOption(func(cfg Config) Config {
cfg.Traces.URLPath = urlPath cfg.Traces.URLPath = urlPath
return cfg
}) })
} }
func WithRetry(rc retry.Config) GenericOption { func WithRetry(rc retry.Config) GenericOption {
return newGenericOption(func(cfg *Config) { return newGenericOption(func(cfg Config) Config {
cfg.RetryConfig = rc cfg.RetryConfig = rc
return cfg
}) })
} }
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
return newSplitOption(func(cfg *Config) { return newSplitOption(func(cfg Config) Config {
cfg.Traces.TLSCfg = tlsCfg.Clone() cfg.Traces.TLSCfg = tlsCfg.Clone()
}, func(cfg *Config) { return cfg
}, func(cfg Config) Config {
cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg) cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
return cfg
}) })
} }
func WithInsecure() GenericOption { func WithInsecure() GenericOption {
return newGenericOption(func(cfg *Config) { return newGenericOption(func(cfg Config) Config {
cfg.Traces.Insecure = true cfg.Traces.Insecure = true
return cfg
}) })
} }
func WithSecure() GenericOption { func WithSecure() GenericOption {
return newGenericOption(func(cfg *Config) { return newGenericOption(func(cfg Config) Config {
cfg.Traces.Insecure = false cfg.Traces.Insecure = false
return cfg
}) })
} }
func WithHeaders(headers map[string]string) GenericOption { func WithHeaders(headers map[string]string) GenericOption {
return newGenericOption(func(cfg *Config) { return newGenericOption(func(cfg Config) Config {
cfg.Traces.Headers = headers cfg.Traces.Headers = headers
return cfg
}) })
} }
func WithTimeout(duration time.Duration) GenericOption { func WithTimeout(duration time.Duration) GenericOption {
return newGenericOption(func(cfg *Config) { return newGenericOption(func(cfg Config) Config {
cfg.Traces.Timeout = duration cfg.Traces.Timeout = duration
return cfg
}) })
} }

View File

@ -15,9 +15,10 @@
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
const ( const (
// DefaultCollectorPort is the port the Exporter will attempt connect to // DefaultCollectorGRPCPort is the default gRPC port of the collector.
// if no collector port is provided. DefaultCollectorGRPCPort uint16 = 4317
DefaultCollectorPort uint16 = 4317 // DefaultCollectorHTTPPort is the default HTTP port of the collector.
DefaultCollectorHTTPPort uint16 = 4318
// DefaultCollectorHost is the host address the Exporter will attempt // DefaultCollectorHost is the host address the Exporter will attempt
// connect to if no collector address is provided. // connect to if no collector address is provided.
DefaultCollectorHost string = "localhost" DefaultCollectorHost string = "localhost"
@ -36,7 +37,7 @@ const (
GzipCompression GzipCompression
) )
// Marshaler describes the kind of message format sent to the collector // Marshaler describes the kind of message format sent to the collector.
type Marshaler int type Marshaler int
const ( const (

View File

@ -19,11 +19,11 @@ import (
commonpb "go.opentelemetry.io/proto/otlp/common/v1" commonpb "go.opentelemetry.io/proto/otlp/common/v1"
) )
func InstrumentationLibrary(il instrumentation.Library) *commonpb.InstrumentationLibrary { func InstrumentationScope(il instrumentation.Library) *commonpb.InstrumentationScope {
if il == (instrumentation.Library{}) { if il == (instrumentation.Library{}) {
return nil return nil
} }
return &commonpb.InstrumentationLibrary{ return &commonpb.InstrumentationScope{
Name: il.Name, Name: il.Name,
Version: il.Version, Version: il.Version,
} }

View File

@ -23,10 +23,6 @@ import (
tracepb "go.opentelemetry.io/proto/otlp/trace/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
) )
const (
maxEventsPerSpan = 128
)
// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP // Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
// ResourceSpans. // ResourceSpans.
func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
@ -36,11 +32,11 @@ func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans) rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans)
type ilsKey struct { type key struct {
r attribute.Distinct r attribute.Distinct
il instrumentation.Library il instrumentation.Library
} }
ilsm := make(map[ilsKey]*tracepb.InstrumentationLibrarySpans) ssm := make(map[key]*tracepb.ScopeSpans)
var resources int var resources int
for _, sd := range sdl { for _, sd := range sdl {
@ -49,21 +45,21 @@ func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
} }
rKey := sd.Resource().Equivalent() rKey := sd.Resource().Equivalent()
iKey := ilsKey{ k := key{
r: rKey, r: rKey,
il: sd.InstrumentationLibrary(), il: sd.InstrumentationLibrary(),
} }
ils, iOk := ilsm[iKey] scopeSpan, iOk := ssm[k]
if !iOk { if !iOk {
// Either the resource or instrumentation library were unknown. // Either the resource or instrumentation library were unknown.
ils = &tracepb.InstrumentationLibrarySpans{ scopeSpan = &tracepb.ScopeSpans{
InstrumentationLibrary: InstrumentationLibrary(sd.InstrumentationLibrary()), Scope: InstrumentationScope(sd.InstrumentationLibrary()),
Spans: []*tracepb.Span{}, Spans: []*tracepb.Span{},
SchemaUrl: sd.InstrumentationLibrary().SchemaURL, SchemaUrl: sd.InstrumentationLibrary().SchemaURL,
} }
} }
ils.Spans = append(ils.Spans, span(sd)) scopeSpan.Spans = append(scopeSpan.Spans, span(sd))
ilsm[iKey] = ils ssm[k] = scopeSpan
rs, rOk := rsm[rKey] rs, rOk := rsm[rKey]
if !rOk { if !rOk {
@ -71,7 +67,7 @@ func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
// The resource was unknown. // The resource was unknown.
rs = &tracepb.ResourceSpans{ rs = &tracepb.ResourceSpans{
Resource: Resource(sd.Resource()), Resource: Resource(sd.Resource()),
InstrumentationLibrarySpans: []*tracepb.InstrumentationLibrarySpans{ils}, ScopeSpans: []*tracepb.ScopeSpans{scopeSpan},
SchemaUrl: sd.Resource().SchemaURL(), SchemaUrl: sd.Resource().SchemaURL(),
} }
rsm[rKey] = rs rsm[rKey] = rs
@ -82,9 +78,9 @@ func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
// library lookup was unknown because if so we need to add it to the // library lookup was unknown because if so we need to add it to the
// ResourceSpans. Otherwise, the instrumentation library has already // ResourceSpans. Otherwise, the instrumentation library has already
// been seen and the append we did above will be included it in the // been seen and the append we did above will be included it in the
// InstrumentationLibrarySpans reference. // ScopeSpans reference.
if !iOk { if !iOk {
rs.InstrumentationLibrarySpans = append(rs.InstrumentationLibrarySpans, ils) rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan)
} }
} }
@ -165,6 +161,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link {
TraceId: tid[:], TraceId: tid[:],
SpanId: sid[:], SpanId: sid[:],
Attributes: KeyValues(otLink.Attributes), Attributes: KeyValues(otLink.Attributes),
DroppedAttributesCount: uint32(otLink.DroppedAttributeCount),
}) })
} }
return sl return sl
@ -176,29 +173,16 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
return nil return nil
} }
evCount := len(es) events := make([]*tracepb.Span_Event, len(es))
if evCount > maxEventsPerSpan {
evCount = maxEventsPerSpan
}
events := make([]*tracepb.Span_Event, 0, evCount)
nEvents := 0
// Transform message events // Transform message events
for _, e := range es { for i := 0; i < len(es); i++ {
if nEvents >= maxEventsPerSpan { events[i] = &tracepb.Span_Event{
break Name: es[i].Name,
TimeUnixNano: uint64(es[i].Time.UnixNano()),
Attributes: KeyValues(es[i].Attributes),
DroppedAttributesCount: uint32(es[i].DroppedAttributeCount),
} }
nEvents++
events = append(events,
&tracepb.Span_Event{
Name: e.Name,
TimeUnixNano: uint64(e.Time.UnixNano()),
Attributes: KeyValues(e.Attributes),
// TODO (rghetia) : Add Drop Counts when supported.
},
)
} }
return events return events
} }

Some files were not shown because too many files have changed in this diff Show More