Merge pull request #9581 from bryantbiggs/chore/update-otelgrpc

deps: Update otelgrpc
This commit is contained in:
Fu Wei 2023-12-29 09:58:13 +00:00 committed by GitHub
commit 1f76ca4081
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
97 changed files with 1798 additions and 698 deletions

20
go.mod
View File

@ -56,18 +56,18 @@ require (
github.com/urfave/cli v1.22.14
github.com/vishvananda/netlink v1.2.1-beta.2
go.etcd.io/bbolt v1.3.8
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0
go.opentelemetry.io/otel v1.19.0
go.opentelemetry.io/otel v1.21.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0
go.opentelemetry.io/otel/sdk v1.19.0
go.opentelemetry.io/otel/trace v1.19.0
go.opentelemetry.io/otel/sdk v1.21.0
go.opentelemetry.io/otel/trace v1.21.0
golang.org/x/mod v0.14.0
golang.org/x/sync v0.5.0
golang.org/x/sys v0.15.0
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0
google.golang.org/grpc v1.60.1
google.golang.org/protobuf v1.32.0
k8s.io/apimachinery v0.28.4
@ -89,7 +89,7 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
@ -112,12 +112,12 @@ require (
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/metric v1.19.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/oauth2 v0.13.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/term v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.14.0 // indirect
google.golang.org/appengine v1.6.8 // indirect

42
go.sum
View File

@ -106,8 +106,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
@ -331,24 +331,24 @@ go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 h1:RsQi0qJ2imFfCvZabqzM9cNXBG8k6gXMv1A0cXRmH6A=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0/go.mod h1:vsh3ySueQCiKPxFLvjWC4Z135gIa34TQ/NSqkDTZYUM=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
@ -387,8 +387,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
@ -435,15 +435,15 @@ golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -472,11 +472,11 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg=
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU=
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=

View File

@ -144,13 +144,12 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) {
}
serverOpts := []grpc.ServerOption{
grpc.StatsHandler(otelgrpc.NewServerHandler()),
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
otelgrpc.StreamServerInterceptor(),
grpc_prometheus.StreamServerInterceptor,
streamNamespaceInterceptor,
)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
otelgrpc.UnaryServerInterceptor(),
grpc_prometheus.UnaryServerInterceptor,
unaryNamespaceInterceptor,
)),

View File

@ -91,11 +91,12 @@ logr design but also left out some parts and changed others:
| Adding a name to a logger | `WithName` | no API |
| Modify verbosity of log entries in a call chain | `V` | no API |
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
The high-level slog API is explicitly meant to be one of many different APIs
that can be layered on top of a shared `slog.Handler`. logr is one such
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
package.
alternative API, with [interoperability](#slog-interoperability) provided by
some conversion functions.
### Inspiration
@ -145,24 +146,24 @@ There are implementations for the following logging libraries:
## slog interoperability
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
slog API. `slogr` itself leaves that to the caller.
slog API.
## Using a `logr.Sink` as backend for slog
### Using a `logr.LogSink` as backend for slog
Ideally, a logr sink implementation should support both logr and slog by
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
implementing both the normal logr interface(s) and `SlogSink`. Because
of a conflict in the parameters of the common `Enabled` method, it is [not
possible to implement both slog.Handler and logr.Sink in the same
type](https://github.com/golang/go/issues/59110).
If both are supported, log calls can go from the high-level APIs to the backend
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
convert back and forth without adding additional wrappers, with one exception:
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
log calls.
Such an implementation should also support values that implement specific
@ -187,13 +188,13 @@ Not supporting slog has several drawbacks:
These drawbacks are severe enough that applications using a mixture of slog and
logr should switch to a different backend.
## Using a `slog.Handler` as backend for logr
### Using a `slog.Handler` as backend for logr
Using a plain `slog.Handler` without support for logr works better than the
other direction:
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
by negating them.
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
- Stack unwinding is done by the `SlogSink` and the resulting program
counter is passed to the `slog.Handler`.
- Names added via `Logger.WithName` are gathered and recorded in an additional
attribute with `logger` as key and the names separated by slash as value.
@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
with logr implementations without slog support is not important, then
`slog.Valuer` is sufficient.
## Context support for slog
### Context support for slog
Storing a logger in a `context.Context` is not supported by
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
to fill this gap:
slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
used to fill this gap. They store and retrieve a `slog.Logger` pointer
under the same context key that is also used by `NewContext` and
`FromContext` for `logr.Logger` value.
func HandlerFromContext(ctx context.Context) slog.Handler {
logger, err := logr.FromContext(ctx)
if err == nil {
return slogr.NewSlogHandler(logger)
}
return slog.Default().Handler()
}
When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
automatically convert the `slog.Logger` to a
`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
return logr.NewContext(ctx, slogr.NewLogr(handler))
}
With this approach, binaries which use either slog or logr are as efficient as
possible with no unnecessary allocations. This is also why the API stores a
`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
on retrieval would need to allocate one.
The downside is that storing and retrieving a `slog.Handler` needs more
allocations compared to using a `logr.Logger`. Therefore the recommendation is
to use the `logr.Logger` API in code which uses contextual logging.
The downside is that switching back and forth needs more allocations. Because
logr is the API that is already in use by different packages, in particular
Kubernetes, the recommendation is to use the `logr.Logger` API in code which
uses contextual logging.
An alternative to adding values to a logger and storing that logger in the
context is to store the values in the context and to configure a logging
backend to extract those values when emitting log entries. This only works when
log calls are passed the context, which is not supported by the logr API.
With the slog API, it is possible, but not
required. https://github.com/veqryn/slog-context is a package for slog which
provides additional support code for this approach. It also contains wrappers
for the context functions in logr, so developers who prefer to not use the logr
APIs directly can use those instead and the resulting code will still be
interoperable with logr.
## FAQ

33
vendor/github.com/go-logr/logr/context.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
// the value is always a Logger value. With Go >= 1.21, the value can be a
// Logger value or a slog.Logger pointer.
type contextKey struct{}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}

49
vendor/github.com/go-logr/logr/context_noslog.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
//go:build !go1.21
// +build !go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

83
vendor/github.com/go-logr/logr/context_slog.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"fmt"
"log/slog"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
v := ctx.Value(contextKey{})
if v == nil {
return Logger{}, notFoundError{}
}
switch v := v.(type) {
case Logger:
return v, nil
case *slog.Logger:
return FromSlogHandler(v.Handler()), nil
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
v := ctx.Value(contextKey{})
if v == nil {
return nil
}
switch v := v.(type) {
case Logger:
return slog.New(ToSlogHandler(v))
case *slog.Logger:
return v
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if logger, err := FromContext(ctx); err == nil {
return logger
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
// provided slog.Logger.
func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

View File

@ -100,6 +100,11 @@ type Options struct {
// details, see docs for Go's time.Layout.
TimestampFormat string
// LogInfoLevel tells funcr what key to use to log the info level.
// If not specified, the info level will be logged as "level".
// If this is set to "", the info level will not be logged at all.
LogInfoLevel *string
// Verbosity tells funcr which V logs to produce. Higher values enable
// more logs. Info logs at or below this level will be written, while logs
// above this level will be discarded.
@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
if opts.MaxLogDepth == 0 {
opts.MaxLogDepth = defaultMaxLogDepth
}
if opts.LogInfoLevel == nil {
opts.LogInfoLevel = new(string)
*opts.LogInfoLevel = "level"
}
f := Formatter{
outputFormat: outfmt,
prefix: "",
@ -231,8 +240,11 @@ type Formatter struct {
prefix string
values []any
valuesStr string
parentValuesStr string
depth int
opts *Options
group string // for slog groups
groupDepth int
}
// outputFormat indicates which outputFormat to use.
@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
buf.WriteByte('{')
buf.WriteByte('{') // for the whole line
}
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
if len(f.valuesStr) > 0 {
if f.parentValuesStr != "" {
if continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
} else {
buf.WriteByte(' ')
}
buf.WriteByte(f.comma())
}
buf.WriteString(f.parentValuesStr)
continuing = true
buf.WriteString(f.valuesStr)
}
groupDepth := f.groupDepth
if f.group != "" {
if f.valuesStr != "" || len(args) != 0 {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
} else {
// The group was empty
groupDepth--
}
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
continuing = true
}
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, continuing, true) // escape user-provided keys
if f.outputFormat == outputJSON {
buf.WriteByte('}')
for i := 0; i < groupDepth; i++ {
buf.WriteByte('}') // for the groups
}
if f.outputFormat == outputJSON {
buf.WriteByte('}') // for the whole line
}
return buf.String()
}
@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
copied := false
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
if !copied {
newList := make([]any, len(kvList))
copy(newList, kvList)
kvList = newList
copied = true
}
k = f.nonStringKey(kvList[i])
kvList[i] = k
}
@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
if i > 0 || continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
buf.WriteByte(f.comma())
} else {
// In theory the format could be something we don't understand. In
// practice, we control it, so it won't be.
@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
}
}
if escapeKeys {
buf.WriteString(prettyString(k))
} else {
// this is faster
buf.WriteByte('"')
buf.WriteString(k)
buf.WriteByte('"')
}
if f.outputFormat == outputJSON {
buf.WriteByte(':')
} else {
buf.WriteByte('=')
}
buf.WriteString(f.quoted(k, escapeKeys))
buf.WriteByte(f.colon())
buf.WriteString(f.pretty(v))
}
return kvList
}
func (f Formatter) quoted(str string, escape bool) string {
if escape {
return prettyString(str)
}
// this is faster
return `"` + str + `"`
}
func (f Formatter) comma() byte {
if f.outputFormat == outputJSON {
return ','
}
return ' '
}
func (f Formatter) colon() byte {
if f.outputFormat == outputJSON {
return ':'
}
return '='
}
func (f Formatter) pretty(value any) string {
return f.prettyWithFlags(value, 0, 0)
}
@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
}
for i := 0; i < len(v); i += 2 {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
k, _ := v[i].(string) // sanitize() above means no need to check success
// arbitrary keys might need escaping
buf.WriteString(prettyString(k))
buf.WriteByte(':')
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
}
if flags&flagRawStruct == 0 {
@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
continue
}
if printComma {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
printComma = true // if we got here, we are rendering a field
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
name = fld.Name
}
// field names can't contain characters which need escaping
buf.WriteByte('"')
buf.WriteString(name)
buf.WriteByte('"')
buf.WriteByte(':')
buf.WriteString(f.quoted(name, false))
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
}
if flags&flagRawStruct == 0 {
@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
buf.WriteByte('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
e := v.Index(i)
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
i := 0
for it.Next() {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
// If a map key supports TextMarshaler, use it.
keystr := ""
@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
}
}
buf.WriteString(keystr)
buf.WriteByte(':')
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
i++
}
@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any {
return kvList
}
// startGroup opens a new group scope (basically a sub-struct), which locks all
// the current saved values and starts them anew. This is needed to satisfy
// slog.
func (f *Formatter) startGroup(group string) {
// Unnamed groups are just inlined.
if group == "" {
return
}
// Any saved values can no longer be changed.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
continuing := false
if f.parentValuesStr != "" {
buf.WriteString(f.parentValuesStr)
continuing = true
}
if f.group != "" && f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
}
// NOTE: We don't close the scope here - that's done later, when a log line
// is actually rendered (because we have N scopes to close).
f.parentValuesStr = buf.String()
// Start collecting new values.
f.group = group
f.groupDepth++
f.valuesStr = ""
f.values = nil
}
// Init configures this Formatter from runtime info, such as the call depth
// imposed by logr itself.
// Note that this receiver is a pointer, so depth can be saved.
@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args
if policy := f.opts.LogCaller; policy == All || policy == Info {
args = append(args, "caller", f.caller())
}
args = append(args, "level", level, "msg", msg)
if key := *f.opts.LogInfoLevel; key != "" {
args = append(args, key, level)
}
args = append(args, "msg", msg)
return prefix, f.render(args, kvList)
}

105
vendor/github.com/go-logr/logr/funcr/slogsink.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package funcr
import (
"context"
"log/slog"
"github.com/go-logr/logr"
)
var _ logr.SlogSink = &fnlogger{}
const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
kvList := make([]any, 0, 2*record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
kvList = attrToKVs(attr, kvList)
return true
})
if record.Level >= slog.LevelError {
l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
} else {
level := l.levelFromSlog(record.Level)
l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
}
return nil
}
func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
kvList := make([]any, 0, 2*len(attrs))
for _, attr := range attrs {
kvList = attrToKVs(attr, kvList)
}
l.AddValues(kvList)
return &l
}
func (l fnlogger) WithGroup(name string) logr.SlogSink {
l.startGroup(name)
return &l
}
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
// and other details of slog.
func attrToKVs(attr slog.Attr, kvList []any) []any {
attrVal := attr.Value.Resolve()
if attrVal.Kind() == slog.KindGroup {
groupVal := attrVal.Group()
grpKVs := make([]any, 0, 2*len(groupVal))
for _, attr := range groupVal {
grpKVs = attrToKVs(attr, grpKVs)
}
if attr.Key == "" {
// slog says we have to inline these
kvList = append(kvList, grpKVs...)
} else {
kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
}
} else if attr.Key != "" {
kvList = append(kvList, attr.Key, attrVal.Any())
}
return kvList
}
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
// It ensures that the result is >= 0. This is necessary because the result is
// passed to a LogSink and that API did not historically document whether
// levels could be negative or what that meant.
//
// Some example usage:
//
// logrV0 := getMyLogger()
// logrV2 := logrV0.V(2)
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
func (l fnlogger) levelFromSlog(level slog.Level) int {
result := -level
if result < 0 {
result = 0 // because LogSink doesn't expect negative V levels
}
return int(result)
}

View File

@ -207,10 +207,6 @@ limitations under the License.
// those.
package logr
import (
"context"
)
// New returns a new Logger instance. This is primarily used by libraries
// implementing LogSink, rather than end users. Passing a nil sink will create
// a Logger which discards all log lines.
@ -410,45 +406,6 @@ func (l Logger) IsZero() bool {
return l.sink == nil
}
// contextKey is how we find Loggers in a context.Context.
type contextKey struct{}
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// RuntimeInfo holds information that the logr "core" library knows which
// LogSinks might want to know.
type RuntimeInfo struct {

192
vendor/github.com/go-logr/logr/sloghandler.go generated vendored Normal file
View File

@ -0,0 +1,192 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
type slogHandler struct {
// May be nil, in which case all logs get discarded.
sink LogSink
// Non-nil if sink is non-nil and implements SlogSink.
slogSink SlogSink
// groupPrefix collects values from WithGroup calls. It gets added as
// prefix to value keys when handling a log record.
groupPrefix string
// levelBias can be set when constructing the handler to influence the
// slog.Level of log records. A positive levelBias reduces the
// slog.Level value. slog has no API to influence this value after the
// handler got created, so it can only be set indirectly through
// Logger.V.
levelBias slog.Level
}
var _ slog.Handler = &slogHandler{}
// groupSeparator is used to concatenate WithGroup names and attribute keys.
const groupSeparator = "."
// GetLevel is used for black box unit testing.
func (l *slogHandler) GetLevel() slog.Level {
return l.levelBias
}
func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
}
func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
if l.slogSink != nil {
// Only adjust verbosity level of log entries < slog.LevelError.
if record.Level < slog.LevelError {
record.Level -= l.levelBias
}
return l.slogSink.Handle(ctx, record)
}
// No need to check for nil sink here because Handle will only be called
// when Enabled returned true.
kvList := make([]any, 0, 2*record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
return true
})
if record.Level >= slog.LevelError {
l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
} else {
level := l.levelFromSlog(record.Level)
l.sinkWithCallDepth().Info(level, record.Message, kvList...)
}
return nil
}
// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
// are called by Handle, code in slog gets skipped.
//
// This offset currently (Go 1.21.0) works for calls through
// slog.New(ToSlogHandler(...)). There's no guarantee that the call
// chain won't change. Wrapping the handler will also break unwinding. It's
// still better than not adjusting at all....
//
// This cannot be done when constructing the handler because FromSlogHandler needs
// access to the original sink without this adjustment. A second copy would
// work, but then WithAttrs would have to be called for both of them.
func (l *slogHandler) sinkWithCallDepth() LogSink {
if sink, ok := l.sink.(CallDepthLogSink); ok {
return sink.WithCallDepth(2)
}
return l.sink
}
func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
if l.sink == nil || len(attrs) == 0 {
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithAttrs(attrs)
clone.sink = clone.slogSink
} else {
kvList := make([]any, 0, 2*len(attrs))
for _, attr := range attrs {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
}
clone.sink = l.sink.WithValues(kvList...)
}
return &clone
}
func (l *slogHandler) WithGroup(name string) slog.Handler {
if l.sink == nil {
return l
}
if name == "" {
// slog says to inline empty groups
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithGroup(name)
clone.sink = clone.slogSink
} else {
clone.groupPrefix = addPrefix(clone.groupPrefix, name)
}
return &clone
}
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
// and other details of slog.
func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
attrVal := attr.Value.Resolve()
if attrVal.Kind() == slog.KindGroup {
groupVal := attrVal.Group()
grpKVs := make([]any, 0, 2*len(groupVal))
prefix := groupPrefix
if attr.Key != "" {
prefix = addPrefix(groupPrefix, attr.Key)
}
for _, attr := range groupVal {
grpKVs = attrToKVs(attr, prefix, grpKVs)
}
kvList = append(kvList, grpKVs...)
} else if attr.Key != "" {
kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
}
return kvList
}
func addPrefix(prefix, name string) string {
if prefix == "" {
return name
}
if name == "" {
return prefix
}
return prefix + groupSeparator + name
}
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
// It ensures that the result is >= 0. This is necessary because the result is
// passed to a LogSink and that API did not historically document whether
// levels could be negative or what that meant.
//
// Some example usage:
//
// logrV0 := getMyLogger()
// logrV2 := logrV0.V(2)
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
func (l *slogHandler) levelFromSlog(level slog.Level) int {
result := -level
result += l.levelBias // in case the original Logger had a V level
if result < 0 {
result = 0 // because LogSink doesn't expect negative V levels
}
return int(result)
}

100
vendor/github.com/go-logr/logr/slogr.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
// FromSlogHandler returns a Logger which writes to the slog.Handler.
//
// The logr verbosity level is mapped to slog levels such that V(0) becomes
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
func FromSlogHandler(handler slog.Handler) Logger {
if handler, ok := handler.(*slogHandler); ok {
if handler.sink == nil {
return Discard()
}
return New(handler.sink).V(int(handler.levelBias))
}
return New(&slogSink{handler: handler})
}
// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
//
// The returned logger writes all records with level >= slog.LevelError as
// error log entries with LogSink.Error, regardless of the verbosity level of
// the Logger:
//
// logger := <some Logger with 0 as verbosity level>
// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
//
// The level of all other records gets reduced by the verbosity
// level of the Logger and the result is negated. If it happens
// to be negative, then it gets replaced by zero because a LogSink
// is not expected to handled negative levels:
//
// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
func ToSlogHandler(logger Logger) slog.Handler {
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
return sink.handler
}
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
if slogSink, ok := handler.sink.(SlogSink); ok {
handler.slogSink = slogSink
}
return handler
}
// SlogSink is an optional interface that a LogSink can implement to support
// logging through the slog.Logger or slog.Handler APIs better. It then should
// also support special slog values like slog.Group. When used as a
// slog.Handler, the advantages are:
//
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
// as intended by slog
// - proper grouping of key/value pairs via WithGroup
// - verbosity levels > slog.LevelInfo can be recorded
// - less overhead
//
// Both APIs (Logger and slog.Logger/Handler) then are supported equally
// well. Developers can pick whatever API suits them better and/or mix
// packages which use either API in the same binary with a common logging
// implementation.
//
// This interface is necessary because the type implementing the LogSink
// interface cannot also implement the slog.Handler interface due to the
// different prototype of the common Enabled method.
//
// An implementation could support both interfaces in two different types, but then
// additional interfaces would be needed to convert between those types in FromSlogHandler
// and ToSlogHandler.
type SlogSink interface {
LogSink
Handle(ctx context.Context, record slog.Record) error
WithAttrs(attrs []slog.Attr) SlogSink
WithGroup(name string) SlogSink
}

120
vendor/github.com/go-logr/logr/slogsink.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
"runtime"
"time"
)
var (
_ LogSink = &slogSink{}
_ CallDepthLogSink = &slogSink{}
_ Underlier = &slogSink{}
)
// Underlier is implemented by the LogSink returned by NewFromLogHandler.
type Underlier interface {
// GetUnderlying returns the Handler used by the LogSink.
GetUnderlying() slog.Handler
}
const (
// nameKey is used to log the `WithName` values as an additional attribute.
nameKey = "logger"
// errKey is used to log the error parameter of Error as an additional attribute.
errKey = "err"
)
type slogSink struct {
callDepth int
name string
handler slog.Handler
}
func (l *slogSink) Init(info RuntimeInfo) {
l.callDepth = info.CallDepth
}
func (l *slogSink) GetUnderlying() slog.Handler {
return l.handler
}
func (l *slogSink) WithCallDepth(depth int) LogSink {
newLogger := *l
newLogger.callDepth += depth
return &newLogger
}
func (l *slogSink) Enabled(level int) bool {
return l.handler.Enabled(context.Background(), slog.Level(-level))
}
func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
l.log(nil, msg, slog.Level(-level), kvList...)
}
func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
l.log(err, msg, slog.LevelError, kvList...)
}
func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
var pcs [1]uintptr
// skip runtime.Callers, this function, Info/Error, and all helper functions above that.
runtime.Callers(3+l.callDepth, pcs[:])
record := slog.NewRecord(time.Now(), level, msg, pcs[0])
if l.name != "" {
record.AddAttrs(slog.String(nameKey, l.name))
}
if err != nil {
record.AddAttrs(slog.Any(errKey, err))
}
record.Add(kvList...)
_ = l.handler.Handle(context.Background(), record)
}
func (l slogSink) WithName(name string) LogSink {
if l.name != "" {
l.name += "/"
}
l.name += name
return &l
}
func (l slogSink) WithValues(kvList ...interface{}) LogSink {
l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
return &l
}
func kvListToAttrs(kvList ...interface{}) []slog.Attr {
// We don't need the record itself, only its Add method.
record := slog.NewRecord(time.Time{}, 0, "", 0)
record.Add(kvList...)
attrs := make([]slog.Attr, 0, record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
attrs = append(attrs, attr)
return true
})
return attrs
}

View File

@ -24,8 +24,8 @@ import (
)
const (
// instrumentationName is the name of this instrumentation package.
instrumentationName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
// ScopeName is the instrumentation scope name.
ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
// GRPCStatusCodeKey is convention for numeric status code of a gRPC request.
GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
)
@ -46,8 +46,14 @@ type config struct {
ReceivedEvent bool
SentEvent bool
tracer trace.Tracer
meter metric.Meter
rpcServerDuration metric.Int64Histogram
rpcDuration metric.Float64Histogram
rpcRequestSize metric.Int64Histogram
rpcResponseSize metric.Int64Histogram
rpcRequestsPerRPC metric.Int64Histogram
rpcResponsesPerRPC metric.Int64Histogram
}
// Option applies an option value for a config.
@ -56,7 +62,7 @@ type Option interface {
}
// newConfig returns a config configured with all the passed Options.
func newConfig(opts []Option) *config {
func newConfig(opts []Option, role string) *config {
c := &config{
Propagators: otel.GetTextMapPropagator(),
TracerProvider: otel.GetTracerProvider(),
@ -66,19 +72,53 @@ func newConfig(opts []Option) *config {
o.apply(c)
}
c.tracer = c.TracerProvider.Tracer(
ScopeName,
trace.WithInstrumentationVersion(SemVersion()),
)
c.meter = c.MeterProvider.Meter(
instrumentationName,
ScopeName,
metric.WithInstrumentationVersion(Version()),
metric.WithSchemaURL(semconv.SchemaURL),
)
var err error
c.rpcServerDuration, err = c.meter.Int64Histogram("rpc.server.duration",
c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration",
metric.WithDescription("Measures the duration of inbound RPC."),
metric.WithUnit("ms"))
if err != nil {
otel.Handle(err)
}
c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size",
metric.WithDescription("Measures size of RPC request messages (uncompressed)."),
metric.WithUnit("By"))
if err != nil {
otel.Handle(err)
}
c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size",
metric.WithDescription("Measures size of RPC response messages (uncompressed)."),
metric.WithUnit("By"))
if err != nil {
otel.Handle(err)
}
c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc",
metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
metric.WithUnit("{count}"))
if err != nil {
otel.Handle(err)
}
c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc",
metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
metric.WithUnit("{count}"))
if err != nil {
otel.Handle(err)
}
return c
}
@ -105,6 +145,8 @@ func (o tracerProviderOption) apply(c *config) {
}
// WithInterceptorFilter returns an Option to use the request filter.
//
// Deprecated: Use stats handlers instead.
func WithInterceptorFilter(f Filter) Option {
return interceptorFilterOption{f: f}
}

View File

@ -13,33 +13,10 @@
// limitations under the License.
/*
Package otelgrpc is the instrumentation library for [google.golang.org/grpc]
Package otelgrpc is the instrumentation library for [google.golang.org/grpc].
For now you can instrument your program which use [google.golang.org/grpc] in two ways:
Use [NewClientHandler] with [grpc.WithStatsHandler] to instrument a gRPC client.
- by [grpc.UnaryClientInterceptor], [grpc.UnaryServerInterceptor], [grpc.StreamClientInterceptor], [grpc.StreamServerInterceptor]
- by [stats.Handler]
Notice: Do not use both interceptors and [stats.Handler] at the same time! If so, you will get duplicated spans and the parent/child relationships between spans will also be broken.
We strongly still recommand you to use [stats.Handler], mainly for two reasons:
Functional advantages: [stats.Handler] has more information for user to build more flexible and granular metric, for example
- multiple different types of represent "data length": In [stats.InPayload], there exists "Length", "CompressedLength", "WireLength" to denote the size of uncompressed, compressed payload data, with or without framing data. But in interceptors, we can only got uncompressed data, and this feature is also removed due to performance problem.
- more accurate timestamp: [stats.InPayload]'s "RecvTime" and [stats.OutPayload]'s "SentTime" records more accurate timestamp that server got and sent the message, the timestamp recorded by interceptors depends on the location of this interceptors in the total interceptor chain.
- some other use cases: for example, catch failure of decoding message.
Performance advantages: If too many interceptors are registered in a service, the interceptor chain can become too long, which increases the latency and processing time of the entire RPC call.
[stats.Handler]: https://pkg.go.dev/google.golang.org/grpc/stats#Handler
[grpc.UnaryClientInterceptor]: https://pkg.go.dev/google.golang.org/grpc#UnaryClientInterceptor
[grpc.UnaryServerInterceptor]: https://pkg.go.dev/google.golang.org/grpc#UnaryServerInterceptor
[grpc.StreamClientInterceptor]: https://pkg.go.dev/google.golang.org/grpc#StreamClientInterceptor
[grpc.StreamServerInterceptor]: https://pkg.go.dev/google.golang.org/grpc#StreamServerInterceptor
[stats.OutPayload]: https://pkg.go.dev/google.golang.org/grpc/stats#OutPayload
[stats.InPayload]: https://pkg.go.dev/google.golang.org/grpc/stats#InPayload
Use [NewServerHandler] with [grpc.StatsHandler] to instrument a gRPC server.
*/
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"

View File

@ -60,10 +60,12 @@ var (
// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable
// for use in a grpc.Dial call.
//
// Deprecated: Use [NewClientHandler] instead.
func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
cfg := newConfig(opts)
cfg := newConfig(opts, "client")
tracer := cfg.TracerProvider.Tracer(
instrumentationName,
ScopeName,
trace.WithInstrumentationVersion(Version()),
)
@ -83,11 +85,12 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
return invoker(ctx, method, req, reply, cc, callOpts...)
}
name, attr := spanInfo(method, cc.Target())
name, attr, _ := telemetryAttributes(method, cc.Target())
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindClient),
trace.WithAttributes(attr...)},
trace.WithAttributes(attr...),
},
cfg.SpanStartOptions...,
)
@ -122,27 +125,13 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
}
}
type streamEventType int
type streamEvent struct {
Type streamEventType
Err error
}
const (
receiveEndEvent streamEventType = iota
errorEvent
)
// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and
// SendMsg method call.
type clientStream struct {
grpc.ClientStream
desc *grpc.StreamDesc
events chan streamEvent
eventsDone chan struct{}
finished chan error
span trace.Span
receivedEvent bool
sentEvent bool
@ -157,11 +146,11 @@ func (w *clientStream) RecvMsg(m interface{}) error {
err := w.ClientStream.RecvMsg(m)
if err == nil && !w.desc.ServerStreams {
w.sendStreamEvent(receiveEndEvent, nil)
w.endSpan(nil)
} else if err == io.EOF {
w.sendStreamEvent(receiveEndEvent, nil)
w.endSpan(nil)
} else if err != nil {
w.sendStreamEvent(errorEvent, err)
w.endSpan(err)
} else {
w.receivedMessageID++
@ -183,7 +172,7 @@ func (w *clientStream) SendMsg(m interface{}) error {
}
if err != nil {
w.sendStreamEvent(errorEvent, err)
w.endSpan(err)
}
return err
@ -191,9 +180,8 @@ func (w *clientStream) SendMsg(m interface{}) error {
func (w *clientStream) Header() (metadata.MD, error) {
md, err := w.ClientStream.Header()
if err != nil {
w.sendStreamEvent(errorEvent, err)
w.endSpan(err)
}
return md, err
@ -201,64 +189,43 @@ func (w *clientStream) Header() (metadata.MD, error) {
func (w *clientStream) CloseSend() error {
err := w.ClientStream.CloseSend()
if err != nil {
w.sendStreamEvent(errorEvent, err)
w.endSpan(err)
}
return err
}
func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, cfg *config) *clientStream {
events := make(chan streamEvent)
eventsDone := make(chan struct{})
finished := make(chan error)
go func() {
defer close(eventsDone)
for {
select {
case event := <-events:
switch event.Type {
case receiveEndEvent:
finished <- nil
return
case errorEvent:
finished <- event.Err
return
}
case <-ctx.Done():
finished <- ctx.Err()
return
}
}
}()
func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream {
return &clientStream{
ClientStream: s,
span: span,
desc: desc,
events: events,
eventsDone: eventsDone,
finished: finished,
receivedEvent: cfg.ReceivedEvent,
sentEvent: cfg.SentEvent,
}
}
func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) {
select {
case <-w.eventsDone:
case w.events <- streamEvent{Type: eventType, Err: err}:
func (w *clientStream) endSpan(err error) {
if err != nil {
s, _ := status.FromError(err)
w.span.SetStatus(codes.Error, s.Message())
w.span.SetAttributes(statusCodeAttr(s.Code()))
} else {
w.span.SetAttributes(statusCodeAttr(grpc_codes.OK))
}
w.span.End()
}
// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
// for use in a grpc.Dial call.
//
// Deprecated: Use [NewClientHandler] instead.
func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
cfg := newConfig(opts)
cfg := newConfig(opts, "client")
tracer := cfg.TracerProvider.Tracer(
instrumentationName,
ScopeName,
trace.WithInstrumentationVersion(Version()),
)
@ -278,11 +245,12 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
return streamer(ctx, desc, cc, method, callOpts...)
}
name, attr := spanInfo(method, cc.Target())
name, attr, _ := telemetryAttributes(method, cc.Target())
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindClient),
trace.WithAttributes(attr...)},
trace.WithAttributes(attr...),
},
cfg.SpanStartOptions...,
)
@ -302,32 +270,19 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
span.End()
return s, err
}
stream := wrapClientStream(ctx, s, desc, cfg)
go func() {
err := <-stream.finished
if err != nil {
s, _ := status.FromError(err)
span.SetStatus(codes.Error, s.Message())
span.SetAttributes(statusCodeAttr(s.Code()))
} else {
span.SetAttributes(statusCodeAttr(grpc_codes.OK))
}
span.End()
}()
stream := wrapClientStream(ctx, s, desc, span, cfg)
return stream, nil
}
}
// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable
// for use in a grpc.NewServer call.
//
// Deprecated: Use [NewServerHandler] instead.
func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
cfg := newConfig(opts)
cfg := newConfig(opts, "server")
tracer := cfg.TracerProvider.Tracer(
instrumentationName,
ScopeName,
trace.WithInstrumentationVersion(Version()),
)
@ -346,11 +301,12 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
}
ctx = extract(ctx, cfg.Propagators)
name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx))
name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindServer),
trace.WithAttributes(attr...)},
trace.WithAttributes(attr...),
},
cfg.SpanStartOptions...,
)
@ -365,30 +321,30 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
messageReceived.Event(ctx, 1, req)
}
var statusCode grpc_codes.Code
defer func(t time.Time) {
elapsedTime := time.Since(t) / time.Millisecond
attr = append(attr, semconv.RPCGRPCStatusCodeKey.Int64(int64(statusCode)))
o := metric.WithAttributes(attr...)
cfg.rpcServerDuration.Record(ctx, int64(elapsedTime), o)
}(time.Now())
before := time.Now()
resp, err := handler(ctx, req)
if err != nil {
s, _ := status.FromError(err)
if err != nil {
statusCode, msg := serverStatus(s)
span.SetStatus(statusCode, msg)
span.SetAttributes(statusCodeAttr(s.Code()))
if cfg.SentEvent {
messageSent.Event(ctx, 1, s.Proto())
}
} else {
statusCode = grpc_codes.OK
span.SetAttributes(statusCodeAttr(grpc_codes.OK))
if cfg.SentEvent {
messageSent.Event(ctx, 1, resp)
}
}
grpcStatusCodeAttr := statusCodeAttr(s.Code())
span.SetAttributes(grpcStatusCodeAttr)
// Use floating point division here for higher precision (instead of Millisecond method).
elapsedTime := float64(time.Since(before)) / float64(time.Millisecond)
metricAttrs = append(metricAttrs, grpcStatusCodeAttr)
cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...))
return resp, err
}
@ -446,10 +402,12 @@ func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *s
// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable
// for use in a grpc.NewServer call.
//
// Deprecated: Use [NewServerHandler] instead.
func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
cfg := newConfig(opts)
cfg := newConfig(opts, "server")
tracer := cfg.TracerProvider.Tracer(
instrumentationName,
ScopeName,
trace.WithInstrumentationVersion(Version()),
)
@ -469,11 +427,12 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
}
ctx = extract(ctx, cfg.Propagators)
name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx))
name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
startOpts := append([]trace.SpanStartOption{
trace.WithSpanKind(trace.SpanKindServer),
trace.WithAttributes(attr...)},
trace.WithAttributes(attr...),
},
cfg.SpanStartOptions...,
)
@ -498,17 +457,18 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
}
}
// spanInfo returns a span name and all appropriate attributes from the gRPC
// method and peer address.
func spanInfo(fullMethod, peerAddress string) (string, []attribute.KeyValue) {
name, mAttrs := internal.ParseFullMethod(fullMethod)
// telemetryAttributes returns a span name and span and metric attributes from
// the gRPC method and peer address.
func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) {
name, methodAttrs := internal.ParseFullMethod(fullMethod)
peerAttrs := peerAttr(peerAddress)
attrs := make([]attribute.KeyValue, 0, 1+len(mAttrs)+len(peerAttrs))
attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs))
attrs = append(attrs, RPCSystemGRPC)
attrs = append(attrs, mAttrs...)
attrs = append(attrs, methodAttrs...)
metricAttrs := attrs[:1+len(methodAttrs)]
attrs = append(attrs, peerAttrs...)
return name, attrs
return name, attrs, metricAttrs
}
// peerAttr returns attributes about the peer address.

View File

@ -56,7 +56,7 @@ func (s *metadataSupplier) Keys() []string {
// requests.
// Deprecated: Unnecessary public func.
func Inject(ctx context.Context, md *metadata.MD, opts ...Option) {
c := newConfig(opts)
c := newConfig(opts, "")
c.Propagators.Inject(ctx, &metadataSupplier{
metadata: md,
})
@ -78,7 +78,7 @@ func inject(ctx context.Context, propagators propagation.TextMapPropagator) cont
// This function is meant to be used on incoming requests.
// Deprecated: Unnecessary public func.
func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) {
c := newConfig(opts)
c := newConfig(opts, "")
ctx = c.Propagators.Extract(ctx, &metadataSupplier{
metadata: md,
})

View File

@ -17,13 +17,16 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g
import (
"context"
"sync/atomic"
"time"
grpc_codes "google.golang.org/grpc/codes"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
)
@ -33,24 +36,32 @@ type gRPCContextKey struct{}
type gRPCContext struct {
messagesReceived int64
messagesSent int64
metricAttrs []attribute.KeyValue
}
type serverHandler struct {
*config
}
// NewServerHandler creates a stats.Handler for gRPC server.
func NewServerHandler(opts ...Option) stats.Handler {
h := &serverHandler{
config: newConfig(opts),
config: newConfig(opts, "server"),
}
h.tracer = h.config.TracerProvider.Tracer(
instrumentationName,
trace.WithInstrumentationVersion(SemVersion()),
)
return h
}
type serverHandler struct {
*config
tracer trace.Tracer
// TagConn can attach some information to the given context.
func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
span := trace.SpanFromContext(ctx)
attrs := peerAttr(peerFromCtx(ctx))
span.SetAttributes(attrs...)
return ctx
}
// HandleConn processes the Conn stats.
func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
}
// TagRPC can attach some information to the given context.
@ -66,46 +77,30 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont
trace.WithAttributes(attrs...),
)
gctx := gRPCContext{}
gctx := gRPCContext{
metricAttrs: attrs,
}
return context.WithValue(ctx, gRPCContextKey{}, &gctx)
}
// HandleRPC processes the RPC stats.
func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
handleRPC(ctx, rs)
h.handleRPC(ctx, rs)
}
// TagConn can attach some information to the given context.
func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
span := trace.SpanFromContext(ctx)
attrs := peerAttr(peerFromCtx(ctx))
span.SetAttributes(attrs...)
return ctx
}
// HandleConn processes the Conn stats.
func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
type clientHandler struct {
*config
}
// NewClientHandler creates a stats.Handler for gRPC client.
func NewClientHandler(opts ...Option) stats.Handler {
h := &clientHandler{
config: newConfig(opts),
config: newConfig(opts, "client"),
}
h.tracer = h.config.TracerProvider.Tracer(
instrumentationName,
trace.WithInstrumentationVersion(SemVersion()),
)
return h
}
type clientHandler struct {
*config
tracer trace.Tracer
}
// TagRPC can attach some information to the given context.
func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
name, attrs := internal.ParseFullMethod(info.FullMethodName)
@ -117,14 +112,16 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont
trace.WithAttributes(attrs...),
)
gctx := gRPCContext{}
gctx := gRPCContext{
metricAttrs: attrs,
}
return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators)
}
// HandleRPC processes the RPC stats.
func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
handleRPC(ctx, rs)
h.handleRPC(ctx, rs)
}
// TagConn can attach some information to the given context.
@ -140,17 +137,23 @@ func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) {
// no-op
}
func handleRPC(ctx context.Context, rs stats.RPCStats) {
func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) {
span := trace.SpanFromContext(ctx)
gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext)
var messageId int64
metricAttrs := make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1)
metricAttrs = append(metricAttrs, gctx.metricAttrs...)
wctx := withoutCancel(ctx)
switch rs := rs.(type) {
case *stats.Begin:
case *stats.InPayload:
if gctx != nil {
messageId = atomic.AddInt64(&gctx.messagesReceived, 1)
c.rpcRequestSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
}
if c.ReceivedEvent {
span.AddEvent("message",
trace.WithAttributes(
semconv.MessageTypeReceived,
@ -159,11 +162,14 @@ func handleRPC(ctx context.Context, rs stats.RPCStats) {
semconv.MessageUncompressedSizeKey.Int(rs.Length),
),
)
}
case *stats.OutPayload:
if gctx != nil {
messageId = atomic.AddInt64(&gctx.messagesSent, 1)
c.rpcResponseSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
}
if c.SentEvent {
span.AddEvent("message",
trace.WithAttributes(
semconv.MessageTypeSent,
@ -172,16 +178,61 @@ func handleRPC(ctx context.Context, rs stats.RPCStats) {
semconv.MessageUncompressedSizeKey.Int(rs.Length),
),
)
}
case *stats.OutTrailer:
case *stats.End:
var rpcStatusAttr attribute.KeyValue
if rs.Error != nil {
s, _ := status.FromError(rs.Error)
span.SetStatus(codes.Error, s.Message())
span.SetAttributes(statusCodeAttr(s.Code()))
rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code()))
} else {
span.SetAttributes(statusCodeAttr(grpc_codes.OK))
rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK))
}
span.SetAttributes(rpcStatusAttr)
span.End()
metricAttrs = append(metricAttrs, rpcStatusAttr)
// Use floating point division here for higher precision (instead of Millisecond method).
elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond)
c.rpcDuration.Record(wctx, elapsedTime, metric.WithAttributes(metricAttrs...))
c.rpcRequestsPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...))
c.rpcResponsesPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...))
default:
return
}
}
func withoutCancel(parent context.Context) context.Context {
if parent == nil {
panic("cannot create context from nil parent")
}
return withoutCancelCtx{parent}
}
type withoutCancelCtx struct {
c context.Context
}
func (withoutCancelCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (withoutCancelCtx) Done() <-chan struct{} {
return nil
}
func (withoutCancelCtx) Err() error {
return nil
}
func (w withoutCancelCtx) Value(key any) any {
return w.c.Value(key)
}
func (w withoutCancelCtx) String() string {
return "withoutCancel"
}

View File

@ -16,7 +16,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g
// Version is the current release version of the gRPC instrumentation.
func Version() string {
return "0.45.0"
return "0.46.1"
// This string is updated by the pre_release.sh script during release
}

View File

@ -14,12 +14,9 @@ go.work.sum
gen/
/example/dice/dice
/example/fib/fib
/example/fib/traces.txt
/example/jaeger/jaeger
/example/namedtracer/namedtracer
/example/otel-collector/otel-collector
/example/opencensus/opencensus
/example/passthrough/passthrough
/example/prometheus/prometheus
/example/zipkin/zipkin
/example/otel-collector/otel-collector

View File

@ -12,8 +12,9 @@ linters:
- depguard
- errcheck
- godot
- gofmt
- gofumpt
- goimports
- gosec
- gosimple
- govet
- ineffassign
@ -53,6 +54,20 @@ issues:
text: "calls to (.+) only in main[(][)] or init[(][)] functions"
linters:
- revive
# It's okay to not run gosec in a test.
- path: _test\.go
linters:
- gosec
# Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
# as we commonly use it in tests and examples.
- text: "G404:"
linters:
- gosec
# Igonoring gosec G402: TLS MinVersion too low
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
- text: "G402: TLS MinVersion too low."
linters:
- gosec
include:
# revive exported should have comment or be unexported.
- EXC0012

View File

@ -8,6 +8,85 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
## [1.21.0/0.44.0] 2023-11-16
### Removed
- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706)
- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707)
- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708)
- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723)
### Fixed
- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719)
- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719)
## [1.20.0/0.43.0] 2023-11-10
This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
### Added
- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567)
- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584)
- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620)
- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620)
- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644)
- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649)
- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603)
- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660)
- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660)
- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622)
- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585)
- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605)
- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668)
### Deprecated
- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567)
- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618)
- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`.
Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620)
- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649)
- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693)
### Changed
- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
Implementors need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
This extends the `Tracer` interface and is is a breaking change for any existing implementation.
Implementors need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
This extends the `Span` interface and is is a breaking change for any existing implementation.
Implementors need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670)
- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670)
- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669)
- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669)
- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679)
- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679)
### Fixed
- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699)
- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699)
- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648)
- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695)
- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695)
## [1.19.0/0.42.0/0.0.7] 2023-09-28
This release contains the first stable release of the OpenTelemetry Go [metric SDK].
@ -2656,7 +2735,9 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.19.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD
[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0
@ -2731,7 +2812,7 @@ It contains api and sdk for trace and meter.
[Go 1.20]: https://go.dev/doc/go1.20
[Go 1.19]: https://go.dev/doc/go1.19
[Go 1.18]: https://go.dev/doc/go1.18
[Go 1.19]: https://go.dev/doc/go1.19
[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace

View File

@ -90,6 +90,10 @@ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
request ID to the entry you added to `CHANGELOG.md`.
Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request.
Rewriting Git history makes it difficult to keep track of iterations during code review.
All pull requests are squashed to a single commit upon merge to `main`.
### How to Receive Comments
* If the PR is not ready for review, please put `[WIP]` in the title,

View File

@ -77,6 +77,9 @@ $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
GORELEASE = $(TOOLS)/gorelease
$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools
tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
@ -189,6 +192,18 @@ test-coverage: | $(GOCOVMERGE)
done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
# Adding a directory will include all benchmarks in that direcotry if a filter is not specified.
BENCHMARK_TARGETS := sdk/trace
.PHONY: benchmark
benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
BENCHMARK_FILTER = .
# You can override the filter for a particular directory by adding a rule here.
benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
benchmark/%:
@echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
&& cd $* \
$(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
.PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix
golangci-lint-fix: golangci-lint
@ -216,7 +231,7 @@ go-mod-tidy/%: | crosslink
lint-modules: go-mod-tidy
.PHONY: lint
lint: misspell lint-modules golangci-lint
lint: misspell lint-modules golangci-lint govulncheck
.PHONY: vanity-import-check
vanity-import-check: | $(PORTO)
@ -226,6 +241,14 @@ vanity-import-check: | $(PORTO)
misspell: | $(MISSPELL)
@$(MISSPELL) -w $(ALL_DOCS)
.PHONY: govulncheck
govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
govulncheck/%: DIR=$*
govulncheck/%: | $(GOVULNCHECK)
@echo "govulncheck ./... in $(DIR)" \
&& cd $(DIR) \
&& $(GOVULNCHECK) ./...
.PHONY: codespell
codespell: | $(CODESPELL)
@$(DOCKERPY) $(CODESPELL)
@ -289,3 +312,7 @@ COMMIT ?= "HEAD"
add-tags: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
$(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
.PHONY: lint-markdown
lint-markdown:
docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md

View File

@ -11,16 +11,13 @@ It provides a set of APIs to directly measure performance and behavior of your s
## Project Status
| Signal | Status | Project |
|---------|------------|-----------------------|
| Traces | Stable | N/A |
| Metrics | Mixed [1] | [Go: Metric SDK (GA)] |
| Logs | Frozen [2] | N/A |
| Signal | Status |
|---------|------------|
| Traces | Stable |
| Metrics | Stable |
| Logs | Design [1] |
[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34
- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta.
- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK.
- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)).
No Logs Pull Requests are currently being accepted.
Progress and status specific to this repository is tracked in our

View File

@ -254,7 +254,7 @@ func NewMember(key, value string, props ...Property) (Member, error) {
if err := m.validate(); err != nil {
return newInvalidMember(), err
}
decodedValue, err := url.QueryUnescape(value)
decodedValue, err := url.PathUnescape(value)
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
@ -301,7 +301,7 @@ func parseMember(member string) (Member, error) {
// when converting the header into a data structure."
key = strings.TrimSpace(k)
var err error
value, err = url.QueryUnescape(strings.TrimSpace(v))
value, err = url.PathUnescape(strings.TrimSpace(v))
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
}

View File

@ -34,11 +34,13 @@ type afCounter struct {
name string
opts []metric.Float64ObservableCounterOption
delegate atomic.Value //metric.Float64ObservableCounter
delegate atomic.Value // metric.Float64ObservableCounter
}
var _ unwrapper = (*afCounter)(nil)
var _ metric.Float64ObservableCounter = (*afCounter)(nil)
var (
_ unwrapper = (*afCounter)(nil)
_ metric.Float64ObservableCounter = (*afCounter)(nil)
)
func (i *afCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
@ -63,11 +65,13 @@ type afUpDownCounter struct {
name string
opts []metric.Float64ObservableUpDownCounterOption
delegate atomic.Value //metric.Float64ObservableUpDownCounter
delegate atomic.Value // metric.Float64ObservableUpDownCounter
}
var _ unwrapper = (*afUpDownCounter)(nil)
var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
var (
_ unwrapper = (*afUpDownCounter)(nil)
_ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
)
func (i *afUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
@ -92,11 +96,13 @@ type afGauge struct {
name string
opts []metric.Float64ObservableGaugeOption
delegate atomic.Value //metric.Float64ObservableGauge
delegate atomic.Value // metric.Float64ObservableGauge
}
var _ unwrapper = (*afGauge)(nil)
var _ metric.Float64ObservableGauge = (*afGauge)(nil)
var (
_ unwrapper = (*afGauge)(nil)
_ metric.Float64ObservableGauge = (*afGauge)(nil)
)
func (i *afGauge) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
@ -121,11 +127,13 @@ type aiCounter struct {
name string
opts []metric.Int64ObservableCounterOption
delegate atomic.Value //metric.Int64ObservableCounter
delegate atomic.Value // metric.Int64ObservableCounter
}
var _ unwrapper = (*aiCounter)(nil)
var _ metric.Int64ObservableCounter = (*aiCounter)(nil)
var (
_ unwrapper = (*aiCounter)(nil)
_ metric.Int64ObservableCounter = (*aiCounter)(nil)
)
func (i *aiCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
@ -150,11 +158,13 @@ type aiUpDownCounter struct {
name string
opts []metric.Int64ObservableUpDownCounterOption
delegate atomic.Value //metric.Int64ObservableUpDownCounter
delegate atomic.Value // metric.Int64ObservableUpDownCounter
}
var _ unwrapper = (*aiUpDownCounter)(nil)
var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
var (
_ unwrapper = (*aiUpDownCounter)(nil)
_ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
)
func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
@ -179,11 +189,13 @@ type aiGauge struct {
name string
opts []metric.Int64ObservableGaugeOption
delegate atomic.Value //metric.Int64ObservableGauge
delegate atomic.Value // metric.Int64ObservableGauge
}
var _ unwrapper = (*aiGauge)(nil)
var _ metric.Int64ObservableGauge = (*aiGauge)(nil)
var (
_ unwrapper = (*aiGauge)(nil)
_ metric.Int64ObservableGauge = (*aiGauge)(nil)
)
func (i *aiGauge) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
@ -208,7 +220,7 @@ type sfCounter struct {
name string
opts []metric.Float64CounterOption
delegate atomic.Value //metric.Float64Counter
delegate atomic.Value // metric.Float64Counter
}
var _ metric.Float64Counter = (*sfCounter)(nil)
@ -234,7 +246,7 @@ type sfUpDownCounter struct {
name string
opts []metric.Float64UpDownCounterOption
delegate atomic.Value //metric.Float64UpDownCounter
delegate atomic.Value // metric.Float64UpDownCounter
}
var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
@ -260,7 +272,7 @@ type sfHistogram struct {
name string
opts []metric.Float64HistogramOption
delegate atomic.Value //metric.Float64Histogram
delegate atomic.Value // metric.Float64Histogram
}
var _ metric.Float64Histogram = (*sfHistogram)(nil)
@ -286,7 +298,7 @@ type siCounter struct {
name string
opts []metric.Int64CounterOption
delegate atomic.Value //metric.Int64Counter
delegate atomic.Value // metric.Int64Counter
}
var _ metric.Int64Counter = (*siCounter)(nil)
@ -312,7 +324,7 @@ type siUpDownCounter struct {
name string
opts []metric.Int64UpDownCounterOption
delegate atomic.Value //metric.Int64UpDownCounter
delegate atomic.Value // metric.Int64UpDownCounter
}
var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
@ -338,7 +350,7 @@ type siHistogram struct {
name string
opts []metric.Int64HistogramOption
delegate atomic.Value //metric.Int64Histogram
delegate atomic.Value // metric.Int64Histogram
}
var _ metric.Int64Histogram = (*siHistogram)(nil)

View File

@ -39,6 +39,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
// tracerProvider is a placeholder for a configured SDK TracerProvider.
@ -46,6 +47,8 @@ import (
// All TracerProvider functionality is forwarded to a delegate once
// configured.
type tracerProvider struct {
embedded.TracerProvider
mtx sync.Mutex
tracers map[il]*tracer
delegate trace.TracerProvider
@ -119,6 +122,8 @@ type il struct {
// All Tracer functionality is forwarded to a delegate once configured.
// Otherwise, all functionality is forwarded to a NoopTracer.
type tracer struct {
embedded.Tracer
name string
opts []trace.TracerOption
provider *tracerProvider
@ -156,6 +161,8 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart
// SpanContext. It performs no operations other than to return the wrapped
// SpanContext.
type nonRecordingSpan struct {
embedded.Span
sc trace.SpanContext
tracer *tracer
}

View File

@ -149,7 +149,7 @@ of [go.opentelemetry.io/otel/metric].
Finally, an author can embed another implementation in theirs. The embedded
implementation will be used for methods not defined by the author. For example,
an author who want to default to silently dropping the call can use
an author who wants to default to silently dropping the call can use
[go.opentelemetry.io/otel/metric/noop]:
import "go.opentelemetry.io/otel/metric/noop"

View File

@ -39,6 +39,12 @@ type InstrumentOption interface {
Float64ObservableGaugeOption
}
// HistogramOption applies options to histogram instruments.
type HistogramOption interface {
Int64HistogramOption
Float64HistogramOption
}
type descOpt string
func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
@ -171,6 +177,23 @@ func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64Ob
// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
func WithUnit(u string) InstrumentOption { return unitOpt(u) }
// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries.
//
// This option is considered "advisory", and may be ignored by API implementations.
func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) }
type bucketOpt []float64
func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
c.explicitBucketBoundaries = o
return c
}
func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
c.explicitBucketBoundaries = o
return c
}
// AddOption applies options to an addition measurement. See
// [MeasurementOption] for other options that can be used as an AddOption.
type AddOption interface {

View File

@ -149,6 +149,7 @@ type Float64Histogram interface {
type Float64HistogramConfig struct {
description string
unit string
explicitBucketBoundaries []float64
}
// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all
@ -171,6 +172,11 @@ func (c Float64HistogramConfig) Unit() string {
return c.unit
}
// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
return c.explicitBucketBoundaries
}
// Float64HistogramOption applies options to a [Float64HistogramConfig]. See
// [InstrumentOption] for other options that can be used as a
// Float64HistogramOption.

View File

@ -149,6 +149,7 @@ type Int64Histogram interface {
type Int64HistogramConfig struct {
description string
unit string
explicitBucketBoundaries []float64
}
// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts
@ -171,6 +172,11 @@ func (c Int64HistogramConfig) Unit() string {
return c.unit
}
// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
return c.explicitBucketBoundaries
}
// Int64HistogramOption applies options to a [Int64HistogramConfig]. See
// [InstrumentOption] for other options that can be used as an
// Int64HistogramOption.

View File

@ -40,8 +40,10 @@ const (
// their proprietary information.
type TraceContext struct{}
var _ TextMapPropagator = TraceContext{}
var traceCtxRegExp = regexp.MustCompile("^(?P<version>[0-9a-f]{2})-(?P<traceID>[a-f0-9]{32})-(?P<spanID>[a-f0-9]{16})-(?P<traceFlags>[a-f0-9]{2})(?:-.*)?$")
var (
_ TextMapPropagator = TraceContext{}
traceCtxRegExp = regexp.MustCompile("^(?P<version>[0-9a-f]{2})-(?P<traceID>[a-f0-9]{32})-(?P<spanID>[a-f0-9]{16})-(?P<traceFlags>[a-f0-9]{2})(?:-.*)?$")
)
// Inject set tracecontext from the Context into the carrier.
func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {

View File

@ -1 +1 @@
codespell==2.2.5
codespell==2.2.6

View File

@ -21,12 +21,10 @@ import (
"strings"
)
var (
// ErrPartialResource is returned by a detector when complete source
// information for a Resource is unavailable or the source information
// contains invalid values that are omitted from the returned Resource.
ErrPartialResource = errors.New("partial resource")
)
// ErrPartialResource is returned by a detector when complete source
// information for a Resource is unavailable or the source information
// contains invalid values that are omitted from the returned Resource.
var ErrPartialResource = errors.New("partial resource")
// Detector detects OpenTelemetry resource information.
type Detector interface {

View File

@ -28,16 +28,14 @@ import (
const (
// resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from.
resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES"
resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials
// svcNameKey is the environment variable name that Service Name information will be read from.
svcNameKey = "OTEL_SERVICE_NAME"
)
var (
// errMissingValue is returned when a resource value is missing.
errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
)
// errMissingValue is returned when a resource value is missing.
var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
// fromEnv is a Detector that implements the Detector and collects
// resources from environment. This Detector is included as a
@ -91,7 +89,7 @@ func constructOTResources(s string) (*Resource, error) {
continue
}
key := strings.TrimSpace(k)
val, err := url.QueryUnescape(strings.TrimSpace(v))
val, err := url.PathUnescape(strings.TrimSpace(v))
if err != nil {
// Retain original value if decoding fails, otherwise it will be
// an empty string.

View File

@ -36,8 +36,10 @@ func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) {
osDescription = osDescriptionProvider
}
type osTypeDetector struct{}
type osDescriptionDetector struct{}
type (
osTypeDetector struct{}
osDescriptionDetector struct{}
)
// Detect returns a *Resource that describes the operating system type the
// service is running on.
@ -56,7 +58,6 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
// service is running on.
func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
description, err := osDescription()
if err != nil {
return nil, err
}

View File

@ -25,14 +25,16 @@ import (
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
)
type pidProvider func() int
type executablePathProvider func() (string, error)
type commandArgsProvider func() []string
type ownerProvider func() (*user.User, error)
type runtimeNameProvider func() string
type runtimeVersionProvider func() string
type runtimeOSProvider func() string
type runtimeArchProvider func() string
type (
pidProvider func() int
executablePathProvider func() (string, error)
commandArgsProvider func() []string
ownerProvider func() (*user.User, error)
runtimeNameProvider func() string
runtimeVersionProvider func() string
runtimeOSProvider func() string
runtimeArchProvider func() string
)
var (
defaultPidProvider pidProvider = os.Getpid
@ -108,14 +110,16 @@ func setUserProviders(ownerProvider ownerProvider) {
owner = ownerProvider
}
type processPIDDetector struct{}
type processExecutableNameDetector struct{}
type processExecutablePathDetector struct{}
type processCommandArgsDetector struct{}
type processOwnerDetector struct{}
type processRuntimeNameDetector struct{}
type processRuntimeVersionDetector struct{}
type processRuntimeDescriptionDetector struct{}
type (
processPIDDetector struct{}
processExecutableNameDetector struct{}
processExecutablePathDetector struct{}
processCommandArgsDetector struct{}
processOwnerDetector struct{}
processRuntimeNameDetector struct{}
processRuntimeVersionDetector struct{}
processRuntimeDescriptionDetector struct{}
)
// Detect returns a *Resource that describes the process identifier (PID) of the
// executing process.

View File

@ -25,6 +25,8 @@ import (
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/noop"
)
const (
@ -73,6 +75,8 @@ func (cfg tracerProviderConfig) MarshalLog() interface{} {
// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to
// instrumentation so it can trace operational flow through a system.
type TracerProvider struct {
embedded.TracerProvider
mu sync.Mutex
namedTracer map[instrumentation.Scope]*tracer
spanProcessors atomic.Pointer[spanProcessorStates]
@ -139,7 +143,7 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
// This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown().
if p.isShutdown.Load() {
return trace.NewNoopTracerProvider().Tracer(name, opts...)
return noop.NewTracerProvider().Tracer(name, opts...)
}
c := trace.NewTracerConfig(opts...)
if name == "" {
@ -157,7 +161,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
// Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran
// after the first check above but before we acquired the mutex.
if p.isShutdown.Load() {
return trace.NewNoopTracerProvider().Tracer(name, opts...), true
return noop.NewTracerProvider().Tracer(name, opts...), true
}
t, ok := p.namedTracer[is]
if !ok {

View File

@ -158,9 +158,9 @@ func NeverSample() Sampler {
return alwaysOffSampler{}
}
// ParentBased returns a composite sampler which behaves differently,
// ParentBased returns a sampler decorator which behaves differently,
// based on the parent of the span. If the span has no parent,
// the root(Sampler) is used to make sampling decision. If the span has
// the decorated sampler is used to make sampling decision. If the span has
// a parent, depending on whether the parent is remote and whether it
// is sampled, one of the following samplers will apply:
// - remoteParentSampled(Sampler) (default: AlwaysOn)

View File

@ -32,6 +32,7 @@ import (
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
// ReadOnlySpan allows reading information from the data structure underlying a
@ -108,6 +109,8 @@ type ReadWriteSpan interface {
// recordingSpan is an implementation of the OpenTelemetry Span API
// representing the individual component of a trace that is sampled.
type recordingSpan struct {
embedded.Span
// mu protects the contents of this span.
mu sync.Mutex
@ -158,8 +161,10 @@ type recordingSpan struct {
tracer *tracer
}
var _ ReadWriteSpan = (*recordingSpan)(nil)
var _ runtimeTracer = (*recordingSpan)(nil)
var (
_ ReadWriteSpan = (*recordingSpan)(nil)
_ runtimeTracer = (*recordingSpan)(nil)
)
// SpanContext returns the SpanContext of this span.
func (s *recordingSpan) SpanContext() trace.SpanContext {
@ -772,6 +777,8 @@ func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context {
// that wraps a SpanContext. It performs no operations other than to return
// the wrapped SpanContext or TracerProvider that created it.
type nonRecordingSpan struct {
embedded.Span
// tracer is the SDK tracer that created this span.
tracer *tracer
sc trace.SpanContext

View File

@ -20,9 +20,12 @@ import (
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
type tracer struct {
embedded.Tracer
provider *TracerProvider
instrumentationScope instrumentation.Scope
}

View File

@ -162,6 +162,7 @@ func (s spanSnapshot) Resource() *resource.Resource { return s.resource }
func (s spanSnapshot) InstrumentationScope() instrumentation.Scope {
return s.instrumentationScope
}
func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library {
return s.instrumentationScope
}

View File

@ -16,5 +16,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
return "1.19.0"
return "1.21.0"
}

View File

@ -268,6 +268,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig {
c.stackTrace = bool(o)
return c
}
func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig {
c.stackTrace = bool(o)
return c

View File

@ -62,5 +62,69 @@ a default.
defer span.End()
// ...
}
# API Implementations
This package does not conform to the standard Go versioning policy; all of its
interfaces may have methods added to them without a package major version bump.
This non-standard API evolution could surprise an uninformed implementation
author. They could unknowingly build their implementation in a way that would
result in a runtime panic for their users that update to the new API.
The API is designed to help inform an instrumentation author about this
non-standard API evolution. It requires them to choose a default behavior for
unimplemented interface methods. There are three behavior choices they can
make:
- Compilation failure
- Panic
- Default to another implementation
All interfaces in this API embed a corresponding interface from
[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default
behavior of their implementations to be a compilation failure, signaling to
their users they need to update to the latest version of that implementation,
they need to embed the corresponding interface from
[go.opentelemetry.io/otel/trace/embedded] in their implementation. For
example,
import "go.opentelemetry.io/otel/trace/embedded"
type TracerProvider struct {
embedded.TracerProvider
// ...
}
If an author wants the default behavior of their implementations to panic, they
can embed the API interface directly.
import "go.opentelemetry.io/otel/trace"
type TracerProvider struct {
trace.TracerProvider
// ...
}
This option is not recommended. It will lead to publishing packages that
contain runtime panics when users update to newer versions of
[go.opentelemetry.io/otel/trace], which may be done with a trasitive
dependency.
Finally, an author can embed another implementation in theirs. The embedded
implementation will be used for methods not defined by the author. For example,
an author who wants to default to silently dropping the call can use
[go.opentelemetry.io/otel/trace/noop]:
import "go.opentelemetry.io/otel/trace/noop"
type TracerProvider struct {
noop.TracerProvider
// ...
}
It is strongly recommended that authors only embed
[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior.
That implementation is the only one OpenTelemetry authors can guarantee will
fully implement all the API interfaces when a user updates their API.
*/
package trace // import "go.opentelemetry.io/otel/trace"

View File

@ -0,0 +1,56 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package embedded provides interfaces embedded within the [OpenTelemetry
// trace API].
//
// Implementers of the [OpenTelemetry trace API] can embed the relevant type
// from this package into their implementation directly. Doing so will result
// in a compilation error for users when the [OpenTelemetry trace API] is
// extended (which is something that can happen without a major version bump of
// the API package).
//
// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace
package embedded // import "go.opentelemetry.io/otel/trace/embedded"
// TracerProvider is embedded in
// [go.opentelemetry.io/otel/trace.TracerProvider].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to
// experience a compilation error, signaling they need to update to your latest
// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider]
// interface is extended (which is something that can happen without a major
// version bump of the API package).
type TracerProvider interface{ tracerProvider() }
// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a
// compilation error, signaling they need to update to your latest
// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface
// is extended (which is something that can happen without a major version bump
// of the API package).
type Tracer interface{ tracer() }
// Span is embedded in [go.opentelemetry.io/otel/trace.Span].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a
// compilation error, signaling they need to update to your latest
// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is
// extended (which is something that can happen without a major version bump of
// the API package).
type Span interface{ span() }

View File

@ -19,16 +19,20 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace/embedded"
)
// NewNoopTracerProvider returns an implementation of TracerProvider that
// performs no operations. The Tracer and Spans created from the returned
// TracerProvider also perform no operations.
//
// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider]
// instead.
func NewNoopTracerProvider() TracerProvider {
return noopTracerProvider{}
}
type noopTracerProvider struct{}
type noopTracerProvider struct{ embedded.TracerProvider }
var _ TracerProvider = noopTracerProvider{}
@ -38,7 +42,7 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
}
// noopTracer is an implementation of Tracer that performs no operations.
type noopTracer struct{}
type noopTracer struct{ embedded.Tracer }
var _ Tracer = noopTracer{}
@ -54,7 +58,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption
}
// noopSpan is an implementation of Span that performs no operations.
type noopSpan struct{}
type noopSpan struct{ embedded.Span }
var _ Span = noopSpan{}

118
vendor/go.opentelemetry.io/otel/trace/noop/noop.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package noop provides an implementation of the OpenTelemetry trace API that
// produces no telemetry and minimizes used computation resources.
//
// Using this package to implement the OpenTelemetry trace API will effectively
// disable OpenTelemetry.
//
// This implementation can be embedded in other implementations of the
// OpenTelemetry trace API. Doing so will mean the implementation defaults to
// no operation for methods it does not implement.
package noop // import "go.opentelemetry.io/otel/trace/noop"
import (
"context"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
var (
// Compile-time check this implements the OpenTelemetry API.
_ trace.TracerProvider = TracerProvider{}
_ trace.Tracer = Tracer{}
_ trace.Span = Span{}
)
// TracerProvider is an OpenTelemetry No-Op TracerProvider.
type TracerProvider struct{ embedded.TracerProvider }
// NewTracerProvider returns a TracerProvider that does not record any telemetry.
func NewTracerProvider() TracerProvider {
return TracerProvider{}
}
// Tracer returns an OpenTelemetry Tracer that does not record any telemetry.
func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer {
return Tracer{}
}
// Tracer is an OpenTelemetry No-Op Tracer.
type Tracer struct{ embedded.Tracer }
// Start creates a span. The created span will be set in a child context of ctx
// and returned with the span.
//
// If ctx contains a span context, the returned span will also contain that
// span context. If the span context in ctx is for a non-recording span, that
// span instance will be returned directly.
func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
span := trace.SpanFromContext(ctx)
// If the parent context contains a non-zero span context, that span
// context needs to be returned as a non-recording span
// (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk).
var zeroSC trace.SpanContext
if sc := span.SpanContext(); !sc.Equal(zeroSC) {
if !span.IsRecording() {
// If the span is not recording return it directly.
return ctx, span
}
// Otherwise, return the span context needs in a non-recording span.
span = Span{sc: sc}
} else {
// No parent, return a No-Op span with an empty span context.
span = Span{}
}
return trace.ContextWithSpan(ctx, span), span
}
// Span is an OpenTelemetry No-Op Span.
type Span struct {
embedded.Span
sc trace.SpanContext
}
// SpanContext returns an empty span context.
func (s Span) SpanContext() trace.SpanContext { return s.sc }
// IsRecording always returns false.
func (Span) IsRecording() bool { return false }
// SetStatus does nothing.
func (Span) SetStatus(codes.Code, string) {}
// SetAttributes does nothing.
func (Span) SetAttributes(...attribute.KeyValue) {}
// End does nothing.
func (Span) End(...trace.SpanEndOption) {}
// RecordError does nothing.
func (Span) RecordError(error, ...trace.EventOption) {}
// AddEvent does nothing.
func (Span) AddEvent(string, ...trace.EventOption) {}
// SetName does nothing.
func (Span) SetName(string) {}
// TracerProvider returns a No-Op TracerProvider.
func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} }

View File

@ -22,6 +22,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace/embedded"
)
const (
@ -48,8 +49,10 @@ func (e errorConst) Error() string {
// nolint:revive // revive complains about stutter of `trace.TraceID`.
type TraceID [16]byte
var nilTraceID TraceID
var _ json.Marshaler = nilTraceID
var (
nilTraceID TraceID
_ json.Marshaler = nilTraceID
)
// IsValid checks whether the trace TraceID is valid. A valid trace ID does
// not consist of zeros only.
@ -71,8 +74,10 @@ func (t TraceID) String() string {
// SpanID is a unique identity of a span in a trace.
type SpanID [8]byte
var nilSpanID SpanID
var _ json.Marshaler = nilSpanID
var (
nilSpanID SpanID
_ json.Marshaler = nilSpanID
)
// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
// of zeros only.
@ -338,8 +343,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) {
// create a Span and it is then up to the operation the Span represents to
// properly end the Span when the operation itself ends.
//
// Warning: methods may be added to this interface in minor releases.
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Span interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Span
// End completes the Span. The Span is considered complete and ready to be
// delivered through the rest of the telemetry pipeline after this method
// is called. Therefore, updates to the Span are not allowed after this
@ -486,8 +498,15 @@ func (sk SpanKind) String() string {
// Tracer is the creator of Spans.
//
// Warning: methods may be added to this interface in minor releases.
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Tracer interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Tracer
// Start creates a span and a context.Context containing the newly-created span.
//
// If the context.Context provided in `ctx` contains a Span then the newly-created
@ -518,8 +537,15 @@ type Tracer interface {
// at runtime from its users or it can simply use the globally registered one
// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
//
// Warning: methods may be added to this interface in minor releases.
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type TracerProvider interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.TracerProvider
// Tracer returns a unique Tracer scoped to be used by instrumentation code
// to trace computational workflows. The scope and identity of that
// instrumentation code is uniquely defined by the name and options passed.

View File

@ -28,9 +28,9 @@ const (
// based on the W3C Trace Context specification, see
// https://www.w3.org/TR/trace-context-1/#tracestate-header
noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}`
withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}`
valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*`
withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*`
valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
errInvalidKey errorConst = "invalid tracestate key"
errInvalidValue errorConst = "invalid tracestate value"
@ -40,9 +40,10 @@ const (
)
var (
keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`)
valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`)
memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`)
noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`)
withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`)
valueRe = regexp.MustCompile(`^` + valueFormat + `$`)
memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`)
)
type member struct {
@ -51,10 +52,19 @@ type member struct {
}
func newMember(key, value string) (member, error) {
if !keyRe.MatchString(key) {
if len(key) > 256 {
return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
if !noTenantKeyRe.MatchString(key) {
if !withTenantKeyRe.MatchString(key) {
return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
}
atIndex := strings.LastIndex(key, "@")
if atIndex > 241 || len(key)-1-atIndex > 14 {
return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
}
}
if len(value) > 256 || !valueRe.MatchString(value) {
return member{}, fmt.Errorf("%w: %s", errInvalidValue, value)
}
return member{Key: key, Value: value}, nil
@ -62,14 +72,14 @@ func newMember(key, value string) (member, error) {
func parseMember(m string) (member, error) {
matches := memberRe.FindStringSubmatch(m)
if len(matches) != 5 {
if len(matches) != 3 {
return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
}
return member{
Key: matches[1],
Value: matches[4],
}, nil
result, e := newMember(matches[1], matches[2])
if e != nil {
return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
}
return result, nil
}
// String encodes member into a string compliant with the W3C Trace Context

View File

@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
return "1.19.0"
return "1.21.0"
}

View File

@ -14,13 +14,12 @@
module-sets:
stable-v1:
version: v1.19.0
version: v1.21.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opentracing
- go.opentelemetry.io/otel/bridge/opentracing/test
- go.opentelemetry.io/otel/example/dice
- go.opentelemetry.io/otel/example/fib
- go.opentelemetry.io/otel/example/namedtracer
- go.opentelemetry.io/otel/example/otel-collector
- go.opentelemetry.io/otel/example/passthrough
@ -35,14 +34,12 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
version: v0.42.0
version: v0.44.0
modules:
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
- go.opentelemetry.io/otel/example/view
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/prometheus

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.7
// +build go1.7
package context

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.9
// +build go1.9
package context

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.7
// +build !go1.7
package context

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.9
// +build !go1.9
package context

View File

@ -20,41 +20,44 @@ import (
// TODO: Benchmark to determine if the pools are necessary. The GC may have
// improved enough that we can instead allocate chunks like this:
// make([]byte, max(16<<10, expectedBytesRemaining))
var (
dataChunkSizeClasses = []int{
1 << 10,
2 << 10,
4 << 10,
8 << 10,
16 << 10,
}
dataChunkPools = [...]sync.Pool{
{New: func() interface{} { return make([]byte, 1<<10) }},
{New: func() interface{} { return make([]byte, 2<<10) }},
{New: func() interface{} { return make([]byte, 4<<10) }},
{New: func() interface{} { return make([]byte, 8<<10) }},
{New: func() interface{} { return make([]byte, 16<<10) }},
}
)
var dataChunkPools = [...]sync.Pool{
{New: func() interface{} { return new([1 << 10]byte) }},
{New: func() interface{} { return new([2 << 10]byte) }},
{New: func() interface{} { return new([4 << 10]byte) }},
{New: func() interface{} { return new([8 << 10]byte) }},
{New: func() interface{} { return new([16 << 10]byte) }},
}
func getDataBufferChunk(size int64) []byte {
i := 0
for ; i < len(dataChunkSizeClasses)-1; i++ {
if size <= int64(dataChunkSizeClasses[i]) {
break
switch {
case size <= 1<<10:
return dataChunkPools[0].Get().(*[1 << 10]byte)[:]
case size <= 2<<10:
return dataChunkPools[1].Get().(*[2 << 10]byte)[:]
case size <= 4<<10:
return dataChunkPools[2].Get().(*[4 << 10]byte)[:]
case size <= 8<<10:
return dataChunkPools[3].Get().(*[8 << 10]byte)[:]
default:
return dataChunkPools[4].Get().(*[16 << 10]byte)[:]
}
}
return dataChunkPools[i].Get().([]byte)
}
func putDataBufferChunk(p []byte) {
for i, n := range dataChunkSizeClasses {
if len(p) == n {
dataChunkPools[i].Put(p)
return
}
}
switch len(p) {
case 1 << 10:
dataChunkPools[0].Put((*[1 << 10]byte)(p))
case 2 << 10:
dataChunkPools[1].Put((*[2 << 10]byte)(p))
case 4 << 10:
dataChunkPools[2].Put((*[4 << 10]byte)(p))
case 8 << 10:
dataChunkPools[3].Put((*[8 << 10]byte)(p))
case 16 << 10:
dataChunkPools[4].Put((*[16 << 10]byte)(p))
default:
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
}
// dataBuffer is an io.ReadWriter backed by a list of data chunks.

View File

@ -1,30 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.11
// +build go1.11
package http2
import (
"net/http/httptrace"
"net/textproto"
)
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
return trace != nil && trace.WroteHeaderField != nil
}
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(k, []string{v})
}
}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse
}
return nil
}

View File

@ -1,27 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.15
// +build go1.15
package http2
import (
"context"
"crypto/tls"
)
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
// connection.
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
dialer := &tls.Dialer{
Config: cfg,
}
cn, err := dialer.DialContext(ctx, network, addr)
if err != nil {
return nil, err
}
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
return tlsCn, nil
}

View File

@ -1,17 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.18
// +build go1.18
package http2
import (
"crypto/tls"
"net"
)
func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
return tc.NetConn()
}

View File

@ -1,21 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.11
// +build !go1.11
package http2
import (
"net/http/httptrace"
"net/textproto"
)
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false }
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
return nil
}

View File

@ -1,31 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.15
// +build !go1.15
package http2
import (
"context"
"crypto/tls"
)
// dialTLSWithContext opens a TLS connection.
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
cn, err := tls.Dial(network, addr, cfg)
if err != nil {
return nil, err
}
if err := cn.Handshake(); err != nil {
return nil, err
}
if cfg.InsecureSkipVerify {
return cn, nil
}
if err := cn.VerifyHostname(cfg.ServerName); err != nil {
return nil, err
}
return cn, nil
}

View File

@ -1,17 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.18
// +build !go1.18
package http2
import (
"crypto/tls"
"net"
)
func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
return nil
}

View File

@ -2549,7 +2549,6 @@ type responseWriterState struct {
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished
dirty bool // a Write failed; don't reuse this responseWriterState
sentContentLen int64 // non-zero if handler set a Content-Length header
wroteBytes int64
@ -2669,7 +2668,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
date: date,
})
if err != nil {
rws.dirty = true
return 0, err
}
if endStream {
@ -2690,7 +2688,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
rws.dirty = true
return 0, err
}
}
@ -2702,9 +2699,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
trailers: rws.trailers,
endStream: true,
})
if err != nil {
rws.dirty = true
}
return len(p), err
}
return len(p), nil
@ -2920,14 +2914,12 @@ func (rws *responseWriterState) writeHeader(code int) {
h.Del("Transfer-Encoding")
}
if rws.conn.writeHeaders(rws.stream, &writeResHeaders{
rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id,
httpResCode: code,
h: h,
endStream: rws.handlerDone && !rws.hasTrailers(),
}) != nil {
rws.dirty = true
}
})
return
}
@ -2992,19 +2984,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
func (w *responseWriter) handlerDone() {
rws := w.rws
dirty := rws.dirty
rws.handlerDone = true
w.Flush()
w.rws = nil
if !dirty {
// Only recycle the pool if all prior Write calls to
// the serverConn goroutine completed successfully. If
// they returned earlier due to resets from the peer
// there might still be write goroutines outstanding
// from the serverConn referencing the rws memory. See
// issue 20704.
responseWriterStatePool.Put(rws)
}
}
// Push errors.
@ -3187,6 +3170,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
}
sc.curHandlers++
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
return promisedID, nil
}

View File

@ -1018,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() {
if !ok {
return
}
if nc := tlsUnderlyingConn(tc); nc != nil {
if nc := tc.NetConn(); nc != nil {
nc.Close()
}
}
@ -3201,3 +3201,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
trace.GotFirstResponseByte()
}
}
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
return trace != nil && trace.WroteHeaderField != nil
}
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(k, []string{v})
}
}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse
}
return nil
}
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
// connection.
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
dialer := &tls.Dialer{
Config: cfg,
}
cn, err := dialer.DialContext(ctx, network, addr)
if err != nil {
return nil, err
}
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
return tlsCn, nil
}

View File

@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.18
// +build go1.18
package idna

View File

@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.10
// +build go1.10
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to

View File

@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.10
// +build !go1.10
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to

View File

@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.18
// +build !go1.18
package idna

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.10 && !go1.13
// +build go1.10,!go1.13
package idna

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.13 && !go1.14
// +build go1.13,!go1.14
package idna

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.14 && !go1.16
// +build go1.14,!go1.16
package idna

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.16 && !go1.21
// +build go1.16,!go1.21
package idna

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.21
// +build go1.21
package idna

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build !go1.10
// +build !go1.10
package idna

View File

@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.16
// +build !go1.16
package idna

View File

@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.16
// +build go1.16
package idna

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package term

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
// +build darwin dragonfly freebsd netbsd openbsd
package term

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || linux || solaris || zos
// +build aix linux solaris zos
package term

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !zos && !windows && !solaris && !plan9
// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9
package term

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.10
// +build go1.10
package bidirule

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.10
// +build !go1.10
package bidirule

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.10 && !go1.13
// +build go1.10,!go1.13
package bidi

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.13 && !go1.14
// +build go1.13,!go1.14
package bidi

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.14 && !go1.16
// +build go1.14,!go1.16
package bidi

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.16 && !go1.21
// +build go1.16,!go1.21
package bidi

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.21
// +build go1.21
package bidi

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build !go1.10
// +build !go1.10
package bidi

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.10 && !go1.13
// +build go1.10,!go1.13
package norm

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.13 && !go1.14
// +build go1.13,!go1.14
package norm

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.14 && !go1.16
// +build go1.14,!go1.16
package norm

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.16 && !go1.21
// +build go1.16,!go1.21
package norm

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.21
// +build go1.21
package norm

View File

@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build !go1.10
// +build !go1.10
package norm

30
vendor/modules.txt vendored
View File

@ -204,7 +204,7 @@ github.com/felixge/httpsnoop
# github.com/fsnotify/fsnotify v1.7.0
## explicit; go 1.17
github.com/fsnotify/fsnotify
# github.com/go-logr/logr v1.3.0
# github.com/go-logr/logr v1.4.1
## explicit; go 1.18
github.com/go-logr/logr
github.com/go-logr/logr/funcr
@ -403,15 +403,15 @@ go.opencensus.io/internal
go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/tracestate
# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0
## explicit; go 1.19
# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
## explicit; go 1.20
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal
# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0
## explicit; go 1.19
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
# go.opentelemetry.io/otel v1.19.0
# go.opentelemetry.io/otel v1.21.0
## explicit; go 1.20
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@ -442,11 +442,11 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry
# go.opentelemetry.io/otel/metric v1.19.0
# go.opentelemetry.io/otel/metric v1.21.0
## explicit; go 1.20
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
# go.opentelemetry.io/otel/sdk v1.19.0
# go.opentelemetry.io/otel/sdk v1.21.0
## explicit; go 1.20
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/sdk/instrumentation
@ -455,9 +455,11 @@ go.opentelemetry.io/otel/sdk/internal/env
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
go.opentelemetry.io/otel/sdk/trace/tracetest
# go.opentelemetry.io/otel/trace v1.19.0
# go.opentelemetry.io/otel/trace v1.21.0
## explicit; go 1.20
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
go.opentelemetry.io/otel/trace/noop
# go.opentelemetry.io/proto/otlp v1.0.0
## explicit; go 1.17
go.opentelemetry.io/proto/otlp/collector/trace/v1
@ -467,8 +469,8 @@ go.opentelemetry.io/proto/otlp/trace/v1
# golang.org/x/mod v0.14.0
## explicit; go 1.18
golang.org/x/mod/semver
# golang.org/x/net v0.17.0
## explicit; go 1.17
# golang.org/x/net v0.19.0
## explicit; go 1.18
golang.org/x/net/context
golang.org/x/net/http/httpguts
golang.org/x/net/http2
@ -498,11 +500,11 @@ golang.org/x/sys/windows/registry
golang.org/x/sys/windows/svc
golang.org/x/sys/windows/svc/debug
golang.org/x/sys/windows/svc/mgr
# golang.org/x/term v0.13.0
## explicit; go 1.17
# golang.org/x/term v0.15.0
## explicit; go 1.18
golang.org/x/term
# golang.org/x/text v0.13.0
## explicit; go 1.17
# golang.org/x/text v0.14.0
## explicit; go 1.18
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
@ -541,7 +543,7 @@ google.golang.org/appengine/urlfetch
# google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97
## explicit; go 1.19
google.golang.org/genproto/googleapis/api/httpbody
# google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97
# google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/errdetails