Bump grpc from 1.7.5 to 1.13.0
This commit is contained in:
121
Godeps/Godeps.json
generated
121
Godeps/Godeps.json
generated
@@ -3752,98 +3752,143 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer/base",
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer/roundrobin",
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/connectivity",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/encoding",
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/encoding/proto",
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/backoff",
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/channelz",
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/grpcrand",
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver/dns",
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver/passthrough",
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/status",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Comment": "v1.13.0",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/gcfg.v1",
|
||||
|
||||
114331
Godeps/LICENSES
generated
114331
Godeps/LICENSES
generated
File diff suppressed because it is too large
Load Diff
@@ -820,79 +820,111 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer/base",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer/roundrobin",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/connectivity",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"ImportPath": "google.golang.org/grpc/encoding",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/encoding/proto",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/backoff",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/channelz",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/grpcrand",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver/dns",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver/passthrough",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/status",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/inf.v0",
|
||||
|
||||
72
staging/src/k8s.io/apiserver/Godeps/Godeps.json
generated
72
staging/src/k8s.io/apiserver/Godeps/Godeps.json
generated
@@ -812,79 +812,111 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer/base",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer/roundrobin",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/connectivity",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"ImportPath": "google.golang.org/grpc/encoding",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/encoding/proto",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/backoff",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/channelz",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/grpcrand",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver/dns",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver/passthrough",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/status",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/inf.v0",
|
||||
|
||||
@@ -416,75 +416,107 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer/base",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer/roundrobin",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/connectivity",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"ImportPath": "google.golang.org/grpc/encoding",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/encoding/proto",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/backoff",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/channelz",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/grpcrand",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver/dns",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver/passthrough",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/status",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/inf.v0",
|
||||
|
||||
@@ -388,75 +388,107 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer/base",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer/roundrobin",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/connectivity",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"ImportPath": "google.golang.org/grpc/encoding",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/encoding/proto",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/backoff",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/channelz",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal/grpcrand",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver/dns",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver/passthrough",
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/status",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
"Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/inf.v0",
|
||||
|
||||
@@ -304,7 +304,7 @@ func TestObjectSizeResponses(t *testing.T) {
|
||||
const DeploymentTwoMegabyteSize = 1000000
|
||||
|
||||
expectedMsgFor1MB := `etcdserver: request is too large`
|
||||
expectedMsgFor2MB := `rpc error: code = ResourceExhausted desc = grpc: trying to send message larger than max`
|
||||
expectedMsgFor2MB := `rpc error: code = ResourceExhausted desc = trying to send message larger than max`
|
||||
expectedMsgForLargeAnnotation := `metadata.annotations: Too long: must have at most 262144 characters`
|
||||
|
||||
deployment1 := constructBody("a", DeploymentMegabyteSize, "labels", t) // >1 MB file
|
||||
|
||||
0
vendor/google.golang.org/grpc/.please-update
generated
vendored
0
vendor/google.golang.org/grpc/.please-update
generated
vendored
14
vendor/google.golang.org/grpc/.travis.yml
generated
vendored
14
vendor/google.golang.org/grpc/.travis.yml
generated
vendored
@@ -1,20 +1,24 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.9.x
|
||||
env: ARCH=386
|
||||
- go: 1.10.x
|
||||
env: RUN386=1
|
||||
|
||||
go_import_path: google.golang.org/grpc
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi
|
||||
- if [[ -n "$RUN386" ]]; then export GOARCH=386; fi
|
||||
- if [[ "$TRAVIS_GO_VERSION" = 1.10* && "$GOARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi
|
||||
|
||||
script:
|
||||
- if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh || exit 1; fi
|
||||
- make test testrace
|
||||
- if [[ "$TRAVIS_GO_VERSION" = 1.10* && "$GOARCH" != "386" ]]; then ./vet.sh || exit 1; fi
|
||||
- make test || exit 1
|
||||
- if [[ "$GOARCH" != "386" ]]; then make testrace; fi
|
||||
|
||||
17
vendor/google.golang.org/grpc/BUILD
generated
vendored
17
vendor/google.golang.org/grpc/BUILD
generated
vendored
@@ -11,7 +11,9 @@ go_library(
|
||||
"clientconn.go",
|
||||
"codec.go",
|
||||
"doc.go",
|
||||
"grpclb.go",
|
||||
"envconfig.go",
|
||||
"go16.go",
|
||||
"go17.go",
|
||||
"interceptor.go",
|
||||
"picker_wrapper.go",
|
||||
"pickfirst.go",
|
||||
@@ -19,29 +21,37 @@ go_library(
|
||||
"resolver_conn_wrapper.go",
|
||||
"rpc_util.go",
|
||||
"server.go",
|
||||
"service_config.go",
|
||||
"stickiness_linkedmap.go",
|
||||
"stream.go",
|
||||
"trace.go",
|
||||
"version.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc",
|
||||
importpath = "google.golang.org/grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/golang.org/x/net/http2:go_default_library",
|
||||
"//vendor/golang.org/x/net/trace:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/balancer:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/balancer/roundrobin:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/connectivity:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/encoding:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/encoding/proto:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/internal:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/internal/backoff:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/internal/channelz:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/keepalive:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/metadata:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/naming:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/peer:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/resolver:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/resolver/dns:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/resolver/passthrough:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/stats:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/tap:go_default_library",
|
||||
@@ -64,6 +74,7 @@ filegroup(
|
||||
"//vendor/google.golang.org/grpc/codes:all-srcs",
|
||||
"//vendor/google.golang.org/grpc/connectivity:all-srcs",
|
||||
"//vendor/google.golang.org/grpc/credentials:all-srcs",
|
||||
"//vendor/google.golang.org/grpc/encoding:all-srcs",
|
||||
"//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages:all-srcs",
|
||||
"//vendor/google.golang.org/grpc/grpclog:all-srcs",
|
||||
"//vendor/google.golang.org/grpc/health:all-srcs",
|
||||
|
||||
6
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
6
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
@@ -7,7 +7,7 @@ If you are new to github, please start by reading [Pull Request howto](https://h
|
||||
## Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
[Contributor License Agreement](https://cla.developers.google.com/clas).
|
||||
[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
|
||||
|
||||
## Guidelines for Pull Requests
|
||||
How to get your contributions merged smoothly and quickly.
|
||||
@@ -27,6 +27,10 @@ How to get your contributions merged smoothly and quickly.
|
||||
- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change).
|
||||
|
||||
- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on.
|
||||
- `make all` to test everything, OR
|
||||
- `make vet` to catch vet errors
|
||||
- `make test` to run the tests
|
||||
- `make testrace` to run tests in race mode
|
||||
|
||||
- Exceptions to the rules can be made if there's a compelling reason for doing so.
|
||||
|
||||
|
||||
13
vendor/google.golang.org/grpc/Makefile
generated
vendored
13
vendor/google.golang.org/grpc/Makefile
generated
vendored
@@ -1,4 +1,4 @@
|
||||
all: test testrace
|
||||
all: vet test testrace
|
||||
|
||||
deps:
|
||||
go get -d -v google.golang.org/grpc/...
|
||||
@@ -22,11 +22,14 @@ proto:
|
||||
fi
|
||||
go generate google.golang.org/grpc/...
|
||||
|
||||
vet:
|
||||
./vet.sh
|
||||
|
||||
test: testdeps
|
||||
go test -cpu 1,4 google.golang.org/grpc/...
|
||||
go test -cpu 1,4 -timeout 5m google.golang.org/grpc/...
|
||||
|
||||
testrace: testdeps
|
||||
go test -race -cpu 1,4 google.golang.org/grpc/...
|
||||
go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
|
||||
|
||||
clean:
|
||||
go clean -i google.golang.org/grpc/...
|
||||
@@ -39,7 +42,7 @@ clean:
|
||||
updatetestdeps \
|
||||
build \
|
||||
proto \
|
||||
vet \
|
||||
test \
|
||||
testrace \
|
||||
clean \
|
||||
coverage
|
||||
clean
|
||||
|
||||
4
vendor/google.golang.org/grpc/README.md
generated
vendored
4
vendor/google.golang.org/grpc/README.md
generated
vendored
@@ -1,6 +1,6 @@
|
||||
# gRPC-Go
|
||||
|
||||
[](https://travis-ci.org/grpc/grpc-go) [](https://godoc.org/google.golang.org/grpc)
|
||||
[](https://travis-ci.org/grpc/grpc-go) [](https://godoc.org/google.golang.org/grpc) [](https://goreportcard.com/report/github.com/grpc/grpc-go)
|
||||
|
||||
The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide.
|
||||
|
||||
@@ -16,7 +16,7 @@ $ go get -u google.golang.org/grpc
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
This requires Go 1.7 or later.
|
||||
This requires Go 1.6 or later. Go 1.7 will be required soon.
|
||||
|
||||
Constraints
|
||||
-----------
|
||||
|
||||
68
vendor/google.golang.org/grpc/backoff.go
generated
vendored
68
vendor/google.golang.org/grpc/backoff.go
generated
vendored
@@ -16,83 +16,23 @@
|
||||
*
|
||||
*/
|
||||
|
||||
// See internal/backoff package for the backoff implementation. This file is
|
||||
// kept for the exported types and API backward compatility.
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultBackoffConfig uses values specified for backoff in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
var (
|
||||
DefaultBackoffConfig = BackoffConfig{
|
||||
var DefaultBackoffConfig = BackoffConfig{
|
||||
MaxDelay: 120 * time.Second,
|
||||
baseDelay: 1.0 * time.Second,
|
||||
factor: 1.6,
|
||||
jitter: 0.2,
|
||||
}
|
||||
)
|
||||
|
||||
// backoffStrategy defines the methodology for backing off after a grpc
|
||||
// connection failure.
|
||||
//
|
||||
// This is unexported until the gRPC project decides whether or not to allow
|
||||
// alternative backoff strategies. Once a decision is made, this type and its
|
||||
// method may be exported.
|
||||
type backoffStrategy interface {
|
||||
// backoff returns the amount of time to wait before the next retry given
|
||||
// the number of consecutive failures.
|
||||
backoff(retries int) time.Duration
|
||||
}
|
||||
|
||||
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
|
||||
type BackoffConfig struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
|
||||
// TODO(stevvooe): The following fields are not exported, as allowing
|
||||
// changes would violate the current gRPC specification for backoff. If
|
||||
// gRPC decides to allow more interesting backoff strategies, these fields
|
||||
// may be opened up in the future.
|
||||
|
||||
// baseDelay is the amount of time to wait before retrying after the first
|
||||
// failure.
|
||||
baseDelay time.Duration
|
||||
|
||||
// factor is applied to the backoff after each retry.
|
||||
factor float64
|
||||
|
||||
// jitter provides a range to randomize backoff delays.
|
||||
jitter float64
|
||||
}
|
||||
|
||||
func setDefaults(bc *BackoffConfig) {
|
||||
md := bc.MaxDelay
|
||||
*bc = DefaultBackoffConfig
|
||||
|
||||
if md > 0 {
|
||||
bc.MaxDelay = md
|
||||
}
|
||||
}
|
||||
|
||||
func (bc BackoffConfig) backoff(retries int) time.Duration {
|
||||
if retries == 0 {
|
||||
return bc.baseDelay
|
||||
}
|
||||
backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= bc.factor
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
||||
|
||||
16
vendor/google.golang.org/grpc/balancer.go
generated
vendored
16
vendor/google.golang.org/grpc/balancer.go
generated
vendored
@@ -28,10 +28,12 @@ import (
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/naming"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Address represents a server the client connects to.
|
||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||
//
|
||||
// Deprecated: please use package balancer.
|
||||
type Address struct {
|
||||
// Addr is the server address on which a connection will be established.
|
||||
Addr string
|
||||
@@ -41,6 +43,8 @@ type Address struct {
|
||||
}
|
||||
|
||||
// BalancerConfig specifies the configurations for Balancer.
|
||||
//
|
||||
// Deprecated: please use package balancer.
|
||||
type BalancerConfig struct {
|
||||
// DialCreds is the transport credential the Balancer implementation can
|
||||
// use to dial to a remote load balancer server. The Balancer implementations
|
||||
@@ -53,7 +57,8 @@ type BalancerConfig struct {
|
||||
}
|
||||
|
||||
// BalancerGetOptions configures a Get call.
|
||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||
//
|
||||
// Deprecated: please use package balancer.
|
||||
type BalancerGetOptions struct {
|
||||
// BlockingWait specifies whether Get should block when there is no
|
||||
// connected address.
|
||||
@@ -61,7 +66,8 @@ type BalancerGetOptions struct {
|
||||
}
|
||||
|
||||
// Balancer chooses network addresses for RPCs.
|
||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||
//
|
||||
// Deprecated: please use package balancer.
|
||||
type Balancer interface {
|
||||
// Start does the initialization work to bootstrap a Balancer. For example,
|
||||
// this function may start the name resolution and watch the updates. It will
|
||||
@@ -134,6 +140,8 @@ func downErrorf(timeout, temporary bool, format string, a ...interface{}) downEr
|
||||
|
||||
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
||||
// the name resolution updates and updates the addresses available correspondingly.
|
||||
//
|
||||
// Deprecated: please use package balancer/roundrobin.
|
||||
func RoundRobin(r naming.Resolver) Balancer {
|
||||
return &roundRobin{r: r}
|
||||
}
|
||||
@@ -310,7 +318,7 @@ func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Ad
|
||||
if !opts.BlockingWait {
|
||||
if len(rr.addrs) == 0 {
|
||||
rr.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "there is no address available")
|
||||
err = status.Errorf(codes.Unavailable, "there is no address available")
|
||||
return
|
||||
}
|
||||
// Returns the next addr on rr.addrs for failfast RPCs.
|
||||
|
||||
6
vendor/google.golang.org/grpc/balancer/BUILD
generated
vendored
6
vendor/google.golang.org/grpc/balancer/BUILD
generated
vendored
@@ -23,7 +23,11 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/google.golang.org/grpc/balancer/base:all-srcs",
|
||||
"//vendor/google.golang.org/grpc/balancer/roundrobin:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
86
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
86
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
@@ -23,6 +23,7 @@ package balancer
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
@@ -33,24 +34,26 @@ import (
|
||||
var (
|
||||
// m is a map from name to balancer builder.
|
||||
m = make(map[string]Builder)
|
||||
// defaultBuilder is the default balancer to use.
|
||||
defaultBuilder Builder // TODO(bar) install pickfirst as default.
|
||||
)
|
||||
|
||||
// Register registers the balancer builder to the balancer map.
|
||||
// b.Name will be used as the name registered with this builder.
|
||||
// Register registers the balancer builder to the balancer map. b.Name
|
||||
// (lowercased) will be used as the name registered with this builder.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Balancers are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func Register(b Builder) {
|
||||
m[b.Name()] = b
|
||||
m[strings.ToLower(b.Name())] = b
|
||||
}
|
||||
|
||||
// Get returns the resolver builder registered with the given name.
|
||||
// If no builder is register with the name, the default pickfirst will
|
||||
// be used.
|
||||
// Note that the compare is done in a case-insenstive fashion.
|
||||
// If no builder is register with the name, nil will be returned.
|
||||
func Get(name string) Builder {
|
||||
if b, ok := m[name]; ok {
|
||||
if b, ok := m[strings.ToLower(name)]; ok {
|
||||
return b
|
||||
}
|
||||
return defaultBuilder
|
||||
return nil
|
||||
}
|
||||
|
||||
// SubConn represents a gRPC sub connection.
|
||||
@@ -66,6 +69,11 @@ func Get(name string) Builder {
|
||||
// When the connection encounters an error, it will reconnect immediately.
|
||||
// When the connection becomes IDLE, it will not reconnect unless Connect is
|
||||
// called.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need a
|
||||
// brand new implementation of this interface. For the situations like
|
||||
// testing, the new implementation should embed this interface. This allows
|
||||
// gRPC to add new methods to this interface.
|
||||
type SubConn interface {
|
||||
// UpdateAddresses updates the addresses used in this SubConn.
|
||||
// gRPC checks if currently-connected address is still in the new list.
|
||||
@@ -83,6 +91,11 @@ type SubConn interface {
|
||||
type NewSubConnOptions struct{}
|
||||
|
||||
// ClientConn represents a gRPC ClientConn.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need a
|
||||
// brand new implementation of this interface. For the situations like
|
||||
// testing, the new implementation should embed this interface. This allows
|
||||
// gRPC to add new methods to this interface.
|
||||
type ClientConn interface {
|
||||
// NewSubConn is called by balancer to create a new SubConn.
|
||||
// It doesn't block and wait for the connections to be established.
|
||||
@@ -99,6 +112,9 @@ type ClientConn interface {
|
||||
// on the new picker to pick new SubConn.
|
||||
UpdateBalancerState(s connectivity.State, p Picker)
|
||||
|
||||
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
|
||||
ResolveNow(resolver.ResolveNowOption)
|
||||
|
||||
// Target returns the dial target for this ClientConn.
|
||||
Target() string
|
||||
}
|
||||
@@ -113,6 +129,8 @@ type BuildOptions struct {
|
||||
// to a remote load balancer server. The Balancer implementations
|
||||
// can ignore this if it doesn't need to talk to remote balancer.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
// ChannelzParentID is the entity parent's channelz unique identification number.
|
||||
ChannelzParentID int64
|
||||
}
|
||||
|
||||
// Builder creates a balancer.
|
||||
@@ -131,6 +149,10 @@ type PickOptions struct{}
|
||||
type DoneInfo struct {
|
||||
// Err is the rpc error the RPC finished with. It could be nil.
|
||||
Err error
|
||||
// BytesSent indicates if any bytes have been sent to the server.
|
||||
BytesSent bool
|
||||
// BytesReceived indicates if any byte has been received from the server.
|
||||
BytesReceived bool
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -143,7 +165,7 @@ var (
|
||||
)
|
||||
|
||||
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||
// Balancer is expected to generate a new picker from its snapshot everytime its
|
||||
// Balancer is expected to generate a new picker from its snapshot every time its
|
||||
// internal state has changed.
|
||||
//
|
||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
||||
@@ -161,7 +183,7 @@ type Picker interface {
|
||||
// If a SubConn is returned:
|
||||
// - If it is READY, gRPC will send the RPC on it;
|
||||
// - If it is not ready, or becomes not ready after it's returned, gRPC will block
|
||||
// this call until a new picker is updated and will call pick on the new picker.
|
||||
// until UpdateBalancerState() is called and will call pick on the new picker.
|
||||
//
|
||||
// If the returned error is not nil:
|
||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
|
||||
@@ -204,3 +226,45 @@ type Balancer interface {
|
||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
||||
Close()
|
||||
}
|
||||
|
||||
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
||||
// and returns one aggregated connectivity state.
|
||||
//
|
||||
// It's not thread safe.
|
||||
type ConnectivityStateEvaluator struct {
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||
}
|
||||
|
||||
// RecordTransition records state change happening in subConn and based on that
|
||||
// it evaluates what aggregated state should be.
|
||||
//
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||
// - Else the aggregated state is TransientFailure.
|
||||
//
|
||||
// Idle and Shutdown are not considered.
|
||||
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
// Update counters.
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
cse.numTransientFailure += updateVal
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate.
|
||||
if cse.numReady > 0 {
|
||||
return connectivity.Ready
|
||||
}
|
||||
if cse.numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
}
|
||||
|
||||
33
vendor/google.golang.org/grpc/balancer/base/BUILD
generated
vendored
Normal file
33
vendor/google.golang.org/grpc/balancer/base/BUILD
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"balancer.go",
|
||||
"base.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc/balancer/base",
|
||||
importpath = "google.golang.org/grpc/balancer/base",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/balancer:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/connectivity:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/resolver:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
208
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
Normal file
208
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
type baseBuilder struct {
|
||||
name string
|
||||
pickerBuilder PickerBuilder
|
||||
}
|
||||
|
||||
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
return &baseBalancer{
|
||||
cc: cc,
|
||||
pickerBuilder: bb.pickerBuilder,
|
||||
|
||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
csEvltr: &connectivityStateEvaluator{},
|
||||
// Initialize picker to a picker that always return
|
||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||
// may call UpdateBalancerState with this picker.
|
||||
picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||
}
|
||||
}
|
||||
|
||||
func (bb *baseBuilder) Name() string {
|
||||
return bb.name
|
||||
}
|
||||
|
||||
type baseBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
pickerBuilder PickerBuilder
|
||||
|
||||
csEvltr *connectivityStateEvaluator
|
||||
state connectivity.State
|
||||
|
||||
subConns map[resolver.Address]balancer.SubConn
|
||||
scStates map[balancer.SubConn]connectivity.State
|
||||
picker balancer.Picker
|
||||
}
|
||||
|
||||
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
if err != nil {
|
||||
grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
|
||||
return
|
||||
}
|
||||
grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
|
||||
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
||||
addrsSet := make(map[resolver.Address]struct{})
|
||||
for _, a := range addrs {
|
||||
addrsSet[a] = struct{}{}
|
||||
if _, ok := b.subConns[a]; !ok {
|
||||
// a is a new address (not existing in b.subConns).
|
||||
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
||||
continue
|
||||
}
|
||||
b.subConns[a] = sc
|
||||
b.scStates[sc] = connectivity.Idle
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
for a, sc := range b.subConns {
|
||||
// a was removed by resolver.
|
||||
if _, ok := addrsSet[a]; !ok {
|
||||
b.cc.RemoveSubConn(sc)
|
||||
delete(b.subConns, a)
|
||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||
// The entry will be deleted in HandleSubConnStateChange.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
||||
// from it. The picker is
|
||||
// - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
|
||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
func (b *baseBalancer) regeneratePicker() {
|
||||
if b.state == connectivity.TransientFailure {
|
||||
b.picker = NewErrPicker(balancer.ErrTransientFailure)
|
||||
return
|
||||
}
|
||||
readySCs := make(map[resolver.Address]balancer.SubConn)
|
||||
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, sc := range b.subConns {
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[addr] = sc
|
||||
}
|
||||
}
|
||||
b.picker = b.pickerBuilder.Build(readySCs)
|
||||
}
|
||||
|
||||
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||
oldS, ok := b.scStates[sc]
|
||||
if !ok {
|
||||
grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
|
||||
return
|
||||
}
|
||||
b.scStates[sc] = s
|
||||
switch s {
|
||||
case connectivity.Idle:
|
||||
sc.Connect()
|
||||
case connectivity.Shutdown:
|
||||
// When an address was removed by resolver, b called RemoveSubConn but
|
||||
// kept the sc's state in scStates. Remove state for this sc here.
|
||||
delete(b.scStates, sc)
|
||||
}
|
||||
|
||||
oldAggrState := b.state
|
||||
b.state = b.csEvltr.recordTransition(oldS, s)
|
||||
|
||||
// Regenerate picker when one of the following happens:
|
||||
// - this sc became ready from not-ready
|
||||
// - this sc became not-ready from ready
|
||||
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
|
||||
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
|
||||
if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
|
||||
(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
|
||||
b.regeneratePicker()
|
||||
}
|
||||
|
||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||
}
|
||||
|
||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||
// and it doesn't need to call RemoveSubConn for the SubConns.
|
||||
func (b *baseBalancer) Close() {
|
||||
}
|
||||
|
||||
// NewErrPicker returns a picker that always returns err on Pick().
|
||||
func NewErrPicker(err error) balancer.Picker {
|
||||
return &errPicker{err: err}
|
||||
}
|
||||
|
||||
type errPicker struct {
|
||||
err error // Pick() always returns this err.
|
||||
}
|
||||
|
||||
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
return nil, nil, p.err
|
||||
}
|
||||
|
||||
// connectivityStateEvaluator gets updated by addrConns when their
|
||||
// states transition, based on which it evaluates the state of
|
||||
// ClientConn.
|
||||
type connectivityStateEvaluator struct {
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||
}
|
||||
|
||||
// recordTransition records state change happening in every subConn and based on
|
||||
// that it evaluates what aggregated state should be.
|
||||
// It can only transition between Ready, Connecting and TransientFailure. Other states,
|
||||
// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
|
||||
// before any subConn is created ClientConn is in idle state. In the end when ClientConn
|
||||
// closes it is in Shutdown state.
|
||||
//
|
||||
// recordTransition should only be called synchronously from the same goroutine.
|
||||
func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
// Update counters.
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
cse.numTransientFailure += updateVal
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate.
|
||||
if cse.numReady > 0 {
|
||||
return connectivity.Ready
|
||||
}
|
||||
if cse.numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
}
|
||||
52
vendor/google.golang.org/grpc/balancer/base/base.go
generated
vendored
Normal file
52
vendor/google.golang.org/grpc/balancer/base/base.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package base defines a balancer base that can be used to build balancers with
|
||||
// different picking algorithms.
|
||||
//
|
||||
// The base balancer creates a new SubConn for each resolved address. The
|
||||
// provided picker will only be notified about READY SubConns.
|
||||
//
|
||||
// This package is the base of round_robin balancer, its purpose is to be used
|
||||
// to build round_robin like balancers with complex picking algorithms.
|
||||
// Balancers with more complicated logic should try to implement a balancer
|
||||
// builder from scratch.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package base
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// PickerBuilder creates balancer.Picker.
|
||||
type PickerBuilder interface {
|
||||
// Build takes a slice of ready SubConns, and returns a picker that will be
|
||||
// used by gRPC to pick a SubConn.
|
||||
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
|
||||
}
|
||||
|
||||
// NewBalancerBuilder returns a balancer builder. The balancers
|
||||
// built by this builder will use the picker builder to build pickers.
|
||||
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
|
||||
return &baseBuilder{
|
||||
name: name,
|
||||
pickerBuilder: pb,
|
||||
}
|
||||
}
|
||||
30
vendor/google.golang.org/grpc/balancer/roundrobin/BUILD
generated
vendored
Normal file
30
vendor/google.golang.org/grpc/balancer/roundrobin/BUILD
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["roundrobin.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc/balancer/roundrobin",
|
||||
importpath = "google.golang.org/grpc/balancer/roundrobin",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/balancer:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/balancer/base:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/resolver:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
79
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
Normal file
79
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
|
||||
// installed as one of the default balancers in gRPC, users don't need to
|
||||
// explicitly install this balancer.
|
||||
package roundrobin
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// Name is the name of round_robin balancer.
|
||||
const Name = "round_robin"
|
||||
|
||||
// newBuilder creates a new roundrobin balancer builder.
|
||||
func newBuilder() balancer.Builder {
|
||||
return base.NewBalancerBuilder(Name, &rrPickerBuilder{})
|
||||
}
|
||||
|
||||
func init() {
|
||||
balancer.Register(newBuilder())
|
||||
}
|
||||
|
||||
type rrPickerBuilder struct{}
|
||||
|
||||
func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
|
||||
grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
|
||||
var scs []balancer.SubConn
|
||||
for _, sc := range readySCs {
|
||||
scs = append(scs, sc)
|
||||
}
|
||||
return &rrPicker{
|
||||
subConns: scs,
|
||||
}
|
||||
}
|
||||
|
||||
type rrPicker struct {
|
||||
// subConns is the snapshot of the roundrobin balancer when this picker was
|
||||
// created. The slice is immutable. Each Get() will do a round robin
|
||||
// selection from it and return the selected SubConn.
|
||||
subConns []balancer.SubConn
|
||||
|
||||
mu sync.Mutex
|
||||
next int
|
||||
}
|
||||
|
||||
func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
if len(p.subConns) <= 0 {
|
||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
sc := p.subConns[p.next]
|
||||
p.next = (p.next + 1) % len(p.subConns)
|
||||
p.mu.Unlock()
|
||||
return sc, nil, nil
|
||||
}
|
||||
68
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
68
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
@@ -19,6 +19,7 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
@@ -73,7 +74,7 @@ func (b *scStateUpdateBuffer) load() {
|
||||
}
|
||||
}
|
||||
|
||||
// get returns the channel that receives a recvMsg in the buffer.
|
||||
// get returns the channel that the scStateUpdate will be sent to.
|
||||
//
|
||||
// Upon receiving, the caller should call load to send another
|
||||
// scStateChangeTuple onto the channel if there is any.
|
||||
@@ -96,6 +97,9 @@ type ccBalancerWrapper struct {
|
||||
stateChangeQueue *scStateUpdateBuffer
|
||||
resolverUpdateCh chan *resolverUpdate
|
||||
done chan struct{}
|
||||
|
||||
mu sync.Mutex
|
||||
subConns map[*acBalancerWrapper]struct{}
|
||||
}
|
||||
|
||||
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||
@@ -104,21 +108,34 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
|
||||
stateChangeQueue: newSCStateUpdateBuffer(),
|
||||
resolverUpdateCh: make(chan *resolverUpdate, 1),
|
||||
done: make(chan struct{}),
|
||||
subConns: make(map[*acBalancerWrapper]struct{}),
|
||||
}
|
||||
go ccb.watcher()
|
||||
ccb.balancer = b.Build(ccb, bopts)
|
||||
return ccb
|
||||
}
|
||||
|
||||
// watcher balancer functions sequencially, so the balancer can be implemeneted
|
||||
// watcher balancer functions sequentially, so the balancer can be implemented
|
||||
// lock-free.
|
||||
func (ccb *ccBalancerWrapper) watcher() {
|
||||
for {
|
||||
select {
|
||||
case t := <-ccb.stateChangeQueue.get():
|
||||
ccb.stateChangeQueue.load()
|
||||
select {
|
||||
case <-ccb.done:
|
||||
ccb.balancer.Close()
|
||||
return
|
||||
default:
|
||||
}
|
||||
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
|
||||
case t := <-ccb.resolverUpdateCh:
|
||||
select {
|
||||
case <-ccb.done:
|
||||
ccb.balancer.Close()
|
||||
return
|
||||
default:
|
||||
}
|
||||
ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
|
||||
case <-ccb.done:
|
||||
}
|
||||
@@ -126,6 +143,13 @@ func (ccb *ccBalancerWrapper) watcher() {
|
||||
select {
|
||||
case <-ccb.done:
|
||||
ccb.balancer.Close()
|
||||
ccb.mu.Lock()
|
||||
scs := ccb.subConns
|
||||
ccb.subConns = nil
|
||||
ccb.mu.Unlock()
|
||||
for acbw := range scs {
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
return
|
||||
default:
|
||||
}
|
||||
@@ -165,33 +189,54 @@ func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs)
|
||||
if len(addrs) <= 0 {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||
}
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
|
||||
}
|
||||
ac, err := ccb.cc.newAddrConn(addrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
acbw := &acBalancerWrapper{ac: ac}
|
||||
ac.mu.Lock()
|
||||
acbw.ac.mu.Lock()
|
||||
ac.acbw = acbw
|
||||
ac.mu.Unlock()
|
||||
acbw.ac.mu.Unlock()
|
||||
ccb.subConns[acbw] = struct{}{}
|
||||
return acbw, nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
grpclog.Infof("ccBalancerWrapper: removing subconn")
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return
|
||||
}
|
||||
delete(ccb.subConns, acbw)
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
|
||||
grpclog.Infof("ccBalancerWrapper: updating state and picker called by balancer: %v, %p", s, p)
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return
|
||||
}
|
||||
ccb.cc.csMgr.updateState(s)
|
||||
ccb.cc.blockingpicker.updatePicker(p)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
|
||||
ccb.cc.resolveNow(o)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) Target() string {
|
||||
return ccb.cc.target
|
||||
}
|
||||
@@ -204,9 +249,12 @@ type acBalancerWrapper struct {
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||
grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs)
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
if len(addrs) <= 0 {
|
||||
acbw.ac.tearDown(errConnDrain)
|
||||
return
|
||||
}
|
||||
if !acbw.ac.tryUpdateAddrs(addrs) {
|
||||
cc := acbw.ac.cc
|
||||
acbw.ac.mu.Lock()
|
||||
@@ -234,7 +282,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||
ac.acbw = acbw
|
||||
ac.mu.Unlock()
|
||||
if acState != connectivity.Idle {
|
||||
ac.connect(false)
|
||||
ac.connect()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -242,7 +290,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||
func (acbw *acBalancerWrapper) Connect() {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
acbw.ac.connect(false)
|
||||
acbw.ac.connect()
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
|
||||
|
||||
83
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
83
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
@@ -19,6 +19,7 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
@@ -27,6 +28,7 @@ import (
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type balancerWrapperBuilder struct {
|
||||
@@ -34,7 +36,13 @@ type balancerWrapperBuilder struct {
|
||||
}
|
||||
|
||||
func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
|
||||
bwb.b.Start(cc.Target(), BalancerConfig{
|
||||
targetAddr := cc.Target()
|
||||
targetSplitted := strings.Split(targetAddr, ":///")
|
||||
if len(targetSplitted) >= 2 {
|
||||
targetAddr = targetSplitted[1]
|
||||
}
|
||||
|
||||
bwb.b.Start(targetAddr, BalancerConfig{
|
||||
DialCreds: opts.DialCreds,
|
||||
Dialer: opts.Dialer,
|
||||
})
|
||||
@@ -43,10 +51,11 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B
|
||||
balancer: bwb.b,
|
||||
pickfirst: pickfirst,
|
||||
cc: cc,
|
||||
targetAddr: targetAddr,
|
||||
startCh: make(chan struct{}),
|
||||
conns: make(map[resolver.Address]balancer.SubConn),
|
||||
connSt: make(map[balancer.SubConn]*scState),
|
||||
csEvltr: &connectivityStateEvaluator{},
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
state: connectivity.Idle,
|
||||
}
|
||||
cc.UpdateBalancerState(connectivity.Idle, bw)
|
||||
@@ -69,10 +78,7 @@ type balancerWrapper struct {
|
||||
pickfirst bool
|
||||
|
||||
cc balancer.ClientConn
|
||||
|
||||
// To aggregate the connectivity state.
|
||||
csEvltr *connectivityStateEvaluator
|
||||
state connectivity.State
|
||||
targetAddr string // Target without the scheme.
|
||||
|
||||
mu sync.Mutex
|
||||
conns map[resolver.Address]balancer.SubConn
|
||||
@@ -82,18 +88,21 @@ type balancerWrapper struct {
|
||||
// - NewSubConn is created, cc wants to notify balancer of state changes;
|
||||
// - Build hasn't return, cc doesn't have access to balancer.
|
||||
startCh chan struct{}
|
||||
|
||||
// To aggregate the connectivity state.
|
||||
csEvltr *balancer.ConnectivityStateEvaluator
|
||||
state connectivity.State
|
||||
}
|
||||
|
||||
// lbWatcher watches the Notify channel of the balancer and manages
|
||||
// connections accordingly.
|
||||
func (bw *balancerWrapper) lbWatcher() {
|
||||
<-bw.startCh
|
||||
grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst)
|
||||
notifyCh := bw.balancer.Notify()
|
||||
if notifyCh == nil {
|
||||
// There's no resolver in the balancer. Connect directly.
|
||||
a := resolver.Address{
|
||||
Addr: bw.cc.Target(),
|
||||
Addr: bw.targetAddr,
|
||||
Type: resolver.Backend,
|
||||
}
|
||||
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
||||
@@ -103,7 +112,7 @@ func (bw *balancerWrapper) lbWatcher() {
|
||||
bw.mu.Lock()
|
||||
bw.conns[a] = sc
|
||||
bw.connSt[sc] = &scState{
|
||||
addr: Address{Addr: bw.cc.Target()},
|
||||
addr: Address{Addr: bw.targetAddr},
|
||||
s: connectivity.Idle,
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
@@ -165,10 +174,10 @@ func (bw *balancerWrapper) lbWatcher() {
|
||||
sc.Connect()
|
||||
}
|
||||
} else {
|
||||
oldSC.UpdateAddresses(newAddrs)
|
||||
bw.mu.Lock()
|
||||
bw.connSt[oldSC].addr = addrs[0]
|
||||
bw.mu.Unlock()
|
||||
oldSC.UpdateAddresses(newAddrs)
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
@@ -221,7 +230,6 @@ func (bw *balancerWrapper) lbWatcher() {
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s)
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
scSt, ok := bw.connSt[sc]
|
||||
@@ -240,7 +248,7 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne
|
||||
scSt.down(errConnClosing)
|
||||
}
|
||||
}
|
||||
sa := bw.csEvltr.recordTransition(oldS, s)
|
||||
sa := bw.csEvltr.RecordTransition(oldS, s)
|
||||
if bw.state != sa {
|
||||
bw.state = sa
|
||||
}
|
||||
@@ -249,7 +257,6 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne
|
||||
// Remove state for this sc.
|
||||
delete(bw.connSt, sc)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
|
||||
@@ -262,7 +269,6 @@ func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
|
||||
}
|
||||
// There should be a resolver inside the balancer.
|
||||
// All updates here, if any, are ignored.
|
||||
return
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) Close() {
|
||||
@@ -274,7 +280,6 @@ func (bw *balancerWrapper) Close() {
|
||||
close(bw.startCh)
|
||||
}
|
||||
bw.balancer.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// The picker is the balancerWrapper itself.
|
||||
@@ -310,58 +315,14 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
|
||||
Metadata: a.Metadata,
|
||||
}]
|
||||
if !ok && failfast {
|
||||
return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
|
||||
}
|
||||
if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
|
||||
// If the returned sc is not ready and RPC is failfast,
|
||||
// return error, and this RPC will fail.
|
||||
return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
|
||||
}
|
||||
}
|
||||
|
||||
return sc, done, nil
|
||||
}
|
||||
|
||||
// connectivityStateEvaluator gets updated by addrConns when their
|
||||
// states transition, based on which it evaluates the state of
|
||||
// ClientConn.
|
||||
type connectivityStateEvaluator struct {
|
||||
mu sync.Mutex
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||
}
|
||||
|
||||
// recordTransition records state change happening in every subConn and based on
|
||||
// that it evaluates what aggregated state should be.
|
||||
// It can only transition between Ready, Connecting and TransientFailure. Other states,
|
||||
// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
|
||||
// before any subConn is created ClientConn is in idle state. In the end when ClientConn
|
||||
// closes it is in Shutdown state.
|
||||
// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state.
|
||||
func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
cse.mu.Lock()
|
||||
defer cse.mu.Unlock()
|
||||
|
||||
// Update counters.
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
cse.numTransientFailure += updateVal
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate.
|
||||
if cse.numReady > 0 {
|
||||
return connectivity.Ready
|
||||
}
|
||||
if cse.numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
}
|
||||
|
||||
304
vendor/google.golang.org/grpc/call.go
generated
vendored
304
vendor/google.golang.org/grpc/call.go
generated
vendored
@@ -19,289 +19,75 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// recvResponse receives and parses an RPC response.
|
||||
// On error, it returns the error and indicates whether the call should be retried.
|
||||
// Invoke sends the RPC request on the wire and returns after response is
|
||||
// received. This is typically called by generated code.
|
||||
//
|
||||
// TODO(zhaoq): Check whether the received message sequence is valid.
|
||||
// TODO ctx is used for stats collection and processing. It is the context passed from the application.
|
||||
func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
|
||||
// Try to acquire header metadata from the server if there is any.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
t.CloseStream(stream, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
c.headerMD, err = stream.Header()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
p := &parser{r: stream}
|
||||
var inPayload *stats.InPayload
|
||||
if dopts.copts.StatsHandler != nil {
|
||||
inPayload = &stats.InPayload{
|
||||
Client: true,
|
||||
}
|
||||
}
|
||||
for {
|
||||
if c.maxReceiveMessageSize == nil {
|
||||
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
|
||||
}
|
||||
if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
|
||||
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
|
||||
// Fix the order if necessary.
|
||||
dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
|
||||
}
|
||||
c.trailerMD = stream.Trailer()
|
||||
return nil
|
||||
}
|
||||
// All errors returned by Invoke are compatible with the status package.
|
||||
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
||||
// allow interceptor to see all applicable call options, which means those
|
||||
// configured as defaults from dial option as well as per-call options
|
||||
opts = combine(cc.dopts.callOptions, opts)
|
||||
|
||||
// sendRequest writes out various information of an RPC such as Context and Message.
|
||||
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// If err is connection error, t will be closed, no need to close stream here.
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
t.CloseStream(stream, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
var (
|
||||
cbuf *bytes.Buffer
|
||||
outPayload *stats.OutPayload
|
||||
)
|
||||
if compressor != nil {
|
||||
cbuf = new(bytes.Buffer)
|
||||
}
|
||||
if dopts.copts.StatsHandler != nil {
|
||||
outPayload = &stats.OutPayload{
|
||||
Client: true,
|
||||
}
|
||||
}
|
||||
hdr, data, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.maxSendMessageSize == nil {
|
||||
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
|
||||
}
|
||||
if len(data) > *c.maxSendMessageSize {
|
||||
return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize)
|
||||
}
|
||||
err = t.Write(stream, hdr, data, opts)
|
||||
if err == nil && outPayload != nil {
|
||||
outPayload.SentTime = time.Now()
|
||||
dopts.copts.StatsHandler.HandleRPC(ctx, outPayload)
|
||||
}
|
||||
// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
|
||||
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
|
||||
// recvResponse to get the final status.
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
// Sent successfully.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Invoke sends the RPC request on the wire and returns after response is received.
|
||||
// Invoke is called by generated code. Also users can call Invoke directly when it
|
||||
// is really needed in their use cases.
|
||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||
if cc.dopts.unaryInt != nil {
|
||||
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
|
||||
}
|
||||
return invoke(ctx, method, args, reply, cc, opts...)
|
||||
}
|
||||
|
||||
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
|
||||
c := defaultCallInfo()
|
||||
mc := cc.GetMethodConfig(method)
|
||||
if mc.WaitForReady != nil {
|
||||
c.failFast = !*mc.WaitForReady
|
||||
func combine(o1 []CallOption, o2 []CallOption) []CallOption {
|
||||
// we don't use append because o1 could have extra capacity whose
|
||||
// elements would be overwritten, which could cause inadvertent
|
||||
// sharing (and race connditions) between concurrent calls
|
||||
if len(o1) == 0 {
|
||||
return o2
|
||||
} else if len(o2) == 0 {
|
||||
return o1
|
||||
}
|
||||
ret := make([]CallOption, len(o1)+len(o2))
|
||||
copy(ret, o1)
|
||||
copy(ret[len(o1):], o2)
|
||||
return ret
|
||||
}
|
||||
|
||||
if mc.Timeout != nil && *mc.Timeout >= 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
|
||||
defer cancel()
|
||||
}
|
||||
// Invoke sends the RPC request on the wire and returns after response is
|
||||
// received. This is typically called by generated code.
|
||||
//
|
||||
// DEPRECATED: Use ClientConn.Invoke instead.
|
||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||
return cc.Invoke(ctx, method, args, reply, opts...)
|
||||
}
|
||||
|
||||
opts = append(cc.dopts.callOptions, opts...)
|
||||
for _, o := range opts {
|
||||
if err := o.before(c); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
for _, o := range opts {
|
||||
o.after(c)
|
||||
}
|
||||
}()
|
||||
var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
|
||||
|
||||
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
|
||||
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
|
||||
|
||||
if EnableTracing {
|
||||
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
||||
defer c.traceInfo.tr.Finish()
|
||||
c.traceInfo.firstLine.client = true
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
|
||||
}
|
||||
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
|
||||
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
|
||||
defer func() {
|
||||
if e != nil {
|
||||
c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true)
|
||||
c.traceInfo.tr.SetError()
|
||||
}
|
||||
}()
|
||||
}
|
||||
ctx = newContextWithRPCInfo(ctx, c.failFast)
|
||||
sh := cc.dopts.copts.StatsHandler
|
||||
if sh != nil {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
||||
begin := &stats.Begin{
|
||||
Client: true,
|
||||
BeginTime: time.Now(),
|
||||
FailFast: c.failFast,
|
||||
}
|
||||
sh.HandleRPC(ctx, begin)
|
||||
defer func() {
|
||||
end := &stats.End{
|
||||
Client: true,
|
||||
EndTime: time.Now(),
|
||||
Error: e,
|
||||
}
|
||||
sh.HandleRPC(ctx, end)
|
||||
}()
|
||||
}
|
||||
topts := &transport.Options{
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||
// TODO: implement retries in clientStream and make this simply
|
||||
// newClientStream, SendMsg, RecvMsg.
|
||||
firstAttempt := true
|
||||
for {
|
||||
var (
|
||||
err error
|
||||
t transport.ClientTransport
|
||||
stream *transport.Stream
|
||||
// Record the done handler from Balancer.Get(...). It is called once the
|
||||
// RPC has completed or failed.
|
||||
done func(balancer.DoneInfo)
|
||||
)
|
||||
// TODO(zhaoq): Need a formal spec of fail-fast.
|
||||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
Method: method,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
}
|
||||
if c.creds != nil {
|
||||
callHdr.Creds = c.creds
|
||||
}
|
||||
|
||||
t, done, err = cc.getTransport(ctx, c.failFast)
|
||||
csInt, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
|
||||
if err != nil {
|
||||
// TODO(zhaoq): Probably revisit the error handling.
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return err
|
||||
}
|
||||
if err == errConnClosing || err == errConnUnavailable {
|
||||
if c.failFast {
|
||||
return Errorf(codes.Unavailable, "%v", err)
|
||||
}
|
||||
cs := csInt.(*clientStream)
|
||||
if err := cs.SendMsg(req); err != nil {
|
||||
if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
|
||||
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
||||
firstAttempt = false
|
||||
continue
|
||||
}
|
||||
// All the other errors are treated as Internal errors.
|
||||
return Errorf(codes.Internal, "%v", err)
|
||||
return err
|
||||
}
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
|
||||
}
|
||||
stream, err = t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
if done != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok {
|
||||
// If error is connection error, transport was sending data on wire,
|
||||
// and we are not sure if anything has been sent on wire.
|
||||
// If error is not connection error, we are sure nothing has been sent.
|
||||
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
|
||||
}
|
||||
done(balancer.DoneInfo{Err: err})
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
|
||||
if err := cs.RecvMsg(reply); err != nil {
|
||||
if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
|
||||
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
||||
firstAttempt = false
|
||||
continue
|
||||
}
|
||||
return toRPCErr(err)
|
||||
return err
|
||||
}
|
||||
if peer, ok := peer.FromContext(stream.Context()); ok {
|
||||
c.peer = peer
|
||||
}
|
||||
err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts)
|
||||
if err != nil {
|
||||
if done != nil {
|
||||
updateRPCInfoInContext(ctx, rpcInfo{
|
||||
bytesSent: stream.BytesSent(),
|
||||
bytesReceived: stream.BytesReceived(),
|
||||
})
|
||||
done(balancer.DoneInfo{Err: err})
|
||||
}
|
||||
// Retry a non-failfast RPC when
|
||||
// i) there is a connection error; or
|
||||
// ii) the server started to drain before this RPC was initiated.
|
||||
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
|
||||
continue
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
err = recvResponse(ctx, cc.dopts, t, c, stream, reply)
|
||||
if err != nil {
|
||||
if done != nil {
|
||||
updateRPCInfoInContext(ctx, rpcInfo{
|
||||
bytesSent: stream.BytesSent(),
|
||||
bytesReceived: stream.BytesReceived(),
|
||||
})
|
||||
done(balancer.DoneInfo{Err: err})
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
|
||||
continue
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
|
||||
}
|
||||
t.CloseStream(stream, nil)
|
||||
if done != nil {
|
||||
updateRPCInfoInContext(ctx, rpcInfo{
|
||||
bytesSent: stream.BytesSent(),
|
||||
bytesReceived: stream.BytesReceived(),
|
||||
})
|
||||
done(balancer.DoneInfo{Err: err})
|
||||
}
|
||||
return stream.Status().Err()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
910
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
910
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
File diff suppressed because it is too large
Load Diff
88
vendor/google.golang.org/grpc/codec.go
generated
vendored
88
vendor/google.golang.org/grpc/codec.go
generated
vendored
@@ -19,86 +19,32 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/encoding"
|
||||
_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
|
||||
)
|
||||
|
||||
// baseCodec contains the functionality of both Codec and encoding.Codec, but
|
||||
// omits the name/string, which vary between the two and are not needed for
|
||||
// anything besides the registry in the encoding package.
|
||||
type baseCodec interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
}
|
||||
|
||||
var _ baseCodec = Codec(nil)
|
||||
var _ baseCodec = encoding.Codec(nil)
|
||||
|
||||
// Codec defines the interface gRPC uses to encode and decode messages.
|
||||
// Note that implementations of this interface must be thread safe;
|
||||
// a Codec's methods can be called from concurrent goroutines.
|
||||
//
|
||||
// Deprecated: use encoding.Codec instead.
|
||||
type Codec interface {
|
||||
// Marshal returns the wire format of v.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
// Unmarshal parses the wire format into v.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
// String returns the name of the Codec implementation. The returned
|
||||
// string will be used as part of content type in transmission.
|
||||
// String returns the name of the Codec implementation. This is unused by
|
||||
// gRPC.
|
||||
String() string
|
||||
}
|
||||
|
||||
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
||||
type protoCodec struct {
|
||||
}
|
||||
|
||||
type cachedProtoBuffer struct {
|
||||
lastMarshaledSize uint32
|
||||
proto.Buffer
|
||||
}
|
||||
|
||||
func capToMaxInt32(val int) uint32 {
|
||||
if val > math.MaxInt32 {
|
||||
return uint32(math.MaxInt32)
|
||||
}
|
||||
return uint32(val)
|
||||
}
|
||||
|
||||
func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
|
||||
protoMsg := v.(proto.Message)
|
||||
newSlice := make([]byte, 0, cb.lastMarshaledSize)
|
||||
|
||||
cb.SetBuf(newSlice)
|
||||
cb.Reset()
|
||||
if err := cb.Marshal(protoMsg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := cb.Bytes()
|
||||
cb.lastMarshaledSize = capToMaxInt32(len(out))
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
out, err := p.marshal(v, cb)
|
||||
|
||||
// put back buffer and lose the ref to the slice
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
cb.SetBuf(data)
|
||||
v.(proto.Message).Reset()
|
||||
err := cb.Unmarshal(v.(proto.Message))
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return err
|
||||
}
|
||||
|
||||
func (protoCodec) String() string {
|
||||
return "proto"
|
||||
}
|
||||
|
||||
var (
|
||||
protoBufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &cachedProtoBuffer{
|
||||
Buffer: proto.Buffer{},
|
||||
lastMarshaledSize: 16,
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
66
vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
66
vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
@@ -1,16 +1,62 @@
|
||||
// Code generated by "stringer -type=Code"; DO NOT EDIT.
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package codes
|
||||
|
||||
import "fmt"
|
||||
import "strconv"
|
||||
|
||||
const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
|
||||
|
||||
var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
|
||||
|
||||
func (i Code) String() string {
|
||||
if i >= Code(len(_Code_index)-1) {
|
||||
return fmt.Sprintf("Code(%d)", i)
|
||||
func (c Code) String() string {
|
||||
switch c {
|
||||
case OK:
|
||||
return "OK"
|
||||
case Canceled:
|
||||
return "Canceled"
|
||||
case Unknown:
|
||||
return "Unknown"
|
||||
case InvalidArgument:
|
||||
return "InvalidArgument"
|
||||
case DeadlineExceeded:
|
||||
return "DeadlineExceeded"
|
||||
case NotFound:
|
||||
return "NotFound"
|
||||
case AlreadyExists:
|
||||
return "AlreadyExists"
|
||||
case PermissionDenied:
|
||||
return "PermissionDenied"
|
||||
case ResourceExhausted:
|
||||
return "ResourceExhausted"
|
||||
case FailedPrecondition:
|
||||
return "FailedPrecondition"
|
||||
case Aborted:
|
||||
return "Aborted"
|
||||
case OutOfRange:
|
||||
return "OutOfRange"
|
||||
case Unimplemented:
|
||||
return "Unimplemented"
|
||||
case Internal:
|
||||
return "Internal"
|
||||
case Unavailable:
|
||||
return "Unavailable"
|
||||
case DataLoss:
|
||||
return "DataLoss"
|
||||
case Unauthenticated:
|
||||
return "Unauthenticated"
|
||||
default:
|
||||
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
|
||||
}
|
||||
return _Code_name[_Code_index[i]:_Code_index[i+1]]
|
||||
}
|
||||
|
||||
65
vendor/google.golang.org/grpc/codes/codes.go
generated
vendored
65
vendor/google.golang.org/grpc/codes/codes.go
generated
vendored
@@ -20,11 +20,14 @@
|
||||
// consistent across various languages.
|
||||
package codes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
|
||||
type Code uint32
|
||||
|
||||
//go:generate stringer -type=Code
|
||||
|
||||
const (
|
||||
// OK is returned on success.
|
||||
OK Code = 0
|
||||
@@ -68,10 +71,6 @@ const (
|
||||
// instead for those errors).
|
||||
PermissionDenied Code = 7
|
||||
|
||||
// Unauthenticated indicates the request does not have valid
|
||||
// authentication credentials for the operation.
|
||||
Unauthenticated Code = 16
|
||||
|
||||
// ResourceExhausted indicates some resource has been exhausted, perhaps
|
||||
// a per-user quota, or perhaps the entire file system is out of space.
|
||||
ResourceExhausted Code = 8
|
||||
@@ -141,4 +140,58 @@ const (
|
||||
|
||||
// DataLoss indicates unrecoverable data loss or corruption.
|
||||
DataLoss Code = 15
|
||||
|
||||
// Unauthenticated indicates the request does not have valid
|
||||
// authentication credentials for the operation.
|
||||
Unauthenticated Code = 16
|
||||
|
||||
_maxCode = 17
|
||||
)
|
||||
|
||||
var strToCode = map[string]Code{
|
||||
`"OK"`: OK,
|
||||
`"CANCELLED"`:/* [sic] */ Canceled,
|
||||
`"UNKNOWN"`: Unknown,
|
||||
`"INVALID_ARGUMENT"`: InvalidArgument,
|
||||
`"DEADLINE_EXCEEDED"`: DeadlineExceeded,
|
||||
`"NOT_FOUND"`: NotFound,
|
||||
`"ALREADY_EXISTS"`: AlreadyExists,
|
||||
`"PERMISSION_DENIED"`: PermissionDenied,
|
||||
`"RESOURCE_EXHAUSTED"`: ResourceExhausted,
|
||||
`"FAILED_PRECONDITION"`: FailedPrecondition,
|
||||
`"ABORTED"`: Aborted,
|
||||
`"OUT_OF_RANGE"`: OutOfRange,
|
||||
`"UNIMPLEMENTED"`: Unimplemented,
|
||||
`"INTERNAL"`: Internal,
|
||||
`"UNAVAILABLE"`: Unavailable,
|
||||
`"DATA_LOSS"`: DataLoss,
|
||||
`"UNAUTHENTICATED"`: Unauthenticated,
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals b into the Code.
|
||||
func (c *Code) UnmarshalJSON(b []byte) error {
|
||||
// From json.Unmarshaler: By convention, to approximate the behavior of
|
||||
// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
|
||||
// a no-op.
|
||||
if string(b) == "null" {
|
||||
return nil
|
||||
}
|
||||
if c == nil {
|
||||
return fmt.Errorf("nil receiver passed to UnmarshalJSON")
|
||||
}
|
||||
|
||||
if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
|
||||
if ci >= _maxCode {
|
||||
return fmt.Errorf("invalid code: %q", ci)
|
||||
}
|
||||
|
||||
*c = Code(ci)
|
||||
return nil
|
||||
}
|
||||
|
||||
if jc, ok := strToCode[string(b)]; ok {
|
||||
*c = jc
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("invalid code: %q", string(b))
|
||||
}
|
||||
|
||||
27
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
27
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
@@ -34,10 +34,8 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
// alpnProtoStr are the specified application level protocols for gRPC.
|
||||
alpnProtoStr = []string{"h2"}
|
||||
)
|
||||
// alpnProtoStr are the specified application level protocols for gRPC.
|
||||
var alpnProtoStr = []string{"h2"}
|
||||
|
||||
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||
// attach security information to every RPC (e.g., oauth2).
|
||||
@@ -45,8 +43,9 @@ type PerRPCCredentials interface {
|
||||
// GetRequestMetadata gets the current request metadata, refreshing
|
||||
// tokens if required. This should be called by the transport layer on
|
||||
// each request, and the data should be populated in headers or other
|
||||
// context. uri is the URI of the entry point for the request. When
|
||||
// supported by the underlying implementation, ctx can be used for
|
||||
// context. If a status code is returned, it will be used as the status
|
||||
// for the RPC. uri is the URI of the entry point for the request.
|
||||
// When supported by the underlying implementation, ctx can be used for
|
||||
// timeout and cancellation.
|
||||
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||
// it as an arbitrary string.
|
||||
@@ -74,11 +73,9 @@ type AuthInfo interface {
|
||||
AuthType() string
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
|
||||
// and the caller should not close rawConn.
|
||||
ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
|
||||
)
|
||||
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
|
||||
// and the caller should not close rawConn.
|
||||
var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
|
||||
|
||||
// TransportCredentials defines the common interface for all the live gRPC wire
|
||||
// protocols and supported transport security protocols (e.g., TLS, SSL).
|
||||
@@ -135,15 +132,15 @@ func (c tlsCreds) Info() ProtocolInfo {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// use local cfg to avoid clobbering ServerName if using multiple endpoints
|
||||
cfg := cloneTLSConfig(c.config)
|
||||
if cfg.ServerName == "" {
|
||||
colonPos := strings.LastIndex(addr, ":")
|
||||
colonPos := strings.LastIndex(authority, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(addr)
|
||||
colonPos = len(authority)
|
||||
}
|
||||
cfg.ServerName = addr[:colonPos]
|
||||
cfg.ServerName = authority[:colonPos]
|
||||
}
|
||||
conn := tls.Client(rawConn, cfg)
|
||||
errChannel := make(chan error, 1)
|
||||
|
||||
26
vendor/google.golang.org/grpc/encoding/BUILD
generated
vendored
Normal file
26
vendor/google.golang.org/grpc/encoding/BUILD
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["encoding.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc/encoding",
|
||||
importpath = "google.golang.org/grpc/encoding",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/google.golang.org/grpc/encoding/proto:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
118
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
Normal file
118
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package encoding defines the interface for the compressor and codec, and
|
||||
// functions to register and retrieve compressors and codecs.
|
||||
//
|
||||
// This package is EXPERIMENTAL.
|
||||
package encoding
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Identity specifies the optional encoding for uncompressed streams.
|
||||
// It is intended for grpc internal use only.
|
||||
const Identity = "identity"
|
||||
|
||||
// Compressor is used for compressing and decompressing when sending or
|
||||
// receiving messages.
|
||||
type Compressor interface {
|
||||
// Compress writes the data written to wc to w after compressing it. If an
|
||||
// error occurs while initializing the compressor, that error is returned
|
||||
// instead.
|
||||
Compress(w io.Writer) (io.WriteCloser, error)
|
||||
// Decompress reads data from r, decompresses it, and provides the
|
||||
// uncompressed data via the returned io.Reader. If an error occurs while
|
||||
// initializing the decompressor, that error is returned instead.
|
||||
Decompress(r io.Reader) (io.Reader, error)
|
||||
// Name is the name of the compression codec and is used to set the content
|
||||
// coding header. The result must be static; the result cannot change
|
||||
// between calls.
|
||||
Name() string
|
||||
}
|
||||
|
||||
var registeredCompressor = make(map[string]Compressor)
|
||||
|
||||
// RegisterCompressor registers the compressor with gRPC by its name. It can
|
||||
// be activated when sending an RPC via grpc.UseCompressor(). It will be
|
||||
// automatically accessed when receiving a message based on the content coding
|
||||
// header. Servers also use it to send a response with the same encoding as
|
||||
// the request.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Compressors are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func RegisterCompressor(c Compressor) {
|
||||
registeredCompressor[c.Name()] = c
|
||||
}
|
||||
|
||||
// GetCompressor returns Compressor for the given compressor name.
|
||||
func GetCompressor(name string) Compressor {
|
||||
return registeredCompressor[name]
|
||||
}
|
||||
|
||||
// Codec defines the interface gRPC uses to encode and decode messages. Note
|
||||
// that implementations of this interface must be thread safe; a Codec's
|
||||
// methods can be called from concurrent goroutines.
|
||||
type Codec interface {
|
||||
// Marshal returns the wire format of v.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
// Unmarshal parses the wire format into v.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
// Name returns the name of the Codec implementation. The returned string
|
||||
// will be used as part of content type in transmission. The result must be
|
||||
// static; the result cannot change between calls.
|
||||
Name() string
|
||||
}
|
||||
|
||||
var registeredCodecs = make(map[string]Codec)
|
||||
|
||||
// RegisterCodec registers the provided Codec for use with all gRPC clients and
|
||||
// servers.
|
||||
//
|
||||
// The Codec will be stored and looked up by result of its Name() method, which
|
||||
// should match the content-subtype of the encoding handled by the Codec. This
|
||||
// is case-insensitive, and is stored and looked up as lowercase. If the
|
||||
// result of calling Name() is an empty string, RegisterCodec will panic. See
|
||||
// Content-Type on
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Compressors are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func RegisterCodec(codec Codec) {
|
||||
if codec == nil {
|
||||
panic("cannot register a nil Codec")
|
||||
}
|
||||
contentSubtype := strings.ToLower(codec.Name())
|
||||
if contentSubtype == "" {
|
||||
panic("cannot register Codec with empty string result for String()")
|
||||
}
|
||||
registeredCodecs[contentSubtype] = codec
|
||||
}
|
||||
|
||||
// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is
|
||||
// registered for the content-subtype.
|
||||
//
|
||||
// The content-subtype is expected to be lowercase.
|
||||
func GetCodec(contentSubtype string) Codec {
|
||||
return registeredCodecs[contentSubtype]
|
||||
}
|
||||
27
vendor/google.golang.org/grpc/encoding/proto/BUILD
generated
vendored
Normal file
27
vendor/google.golang.org/grpc/encoding/proto/BUILD
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["proto.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc/encoding/proto",
|
||||
importpath = "google.golang.org/grpc/encoding/proto",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/encoding:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
110
vendor/google.golang.org/grpc/encoding/proto/proto.go
generated
vendored
Normal file
110
vendor/google.golang.org/grpc/encoding/proto/proto.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package proto defines the protobuf codec. Importing this package will
|
||||
// register the codec.
|
||||
package proto
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/encoding"
|
||||
)
|
||||
|
||||
// Name is the name registered for the proto compressor.
|
||||
const Name = "proto"
|
||||
|
||||
func init() {
|
||||
encoding.RegisterCodec(codec{})
|
||||
}
|
||||
|
||||
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
||||
type codec struct{}
|
||||
|
||||
type cachedProtoBuffer struct {
|
||||
lastMarshaledSize uint32
|
||||
proto.Buffer
|
||||
}
|
||||
|
||||
func capToMaxInt32(val int) uint32 {
|
||||
if val > math.MaxInt32 {
|
||||
return uint32(math.MaxInt32)
|
||||
}
|
||||
return uint32(val)
|
||||
}
|
||||
|
||||
func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
|
||||
protoMsg := v.(proto.Message)
|
||||
newSlice := make([]byte, 0, cb.lastMarshaledSize)
|
||||
|
||||
cb.SetBuf(newSlice)
|
||||
cb.Reset()
|
||||
if err := cb.Marshal(protoMsg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := cb.Bytes()
|
||||
cb.lastMarshaledSize = capToMaxInt32(len(out))
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (codec) Marshal(v interface{}) ([]byte, error) {
|
||||
if pm, ok := v.(proto.Marshaler); ok {
|
||||
// object can marshal itself, no need for buffer
|
||||
return pm.Marshal()
|
||||
}
|
||||
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
out, err := marshal(v, cb)
|
||||
|
||||
// put back buffer and lose the ref to the slice
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (codec) Unmarshal(data []byte, v interface{}) error {
|
||||
protoMsg := v.(proto.Message)
|
||||
protoMsg.Reset()
|
||||
|
||||
if pu, ok := protoMsg.(proto.Unmarshaler); ok {
|
||||
// object can unmarshal itself, no need for buffer
|
||||
return pu.Unmarshal(data)
|
||||
}
|
||||
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
cb.SetBuf(data)
|
||||
err := cb.Unmarshal(protoMsg)
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return err
|
||||
}
|
||||
|
||||
func (codec) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
var protoBufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &cachedProtoBuffer{
|
||||
Buffer: proto.Buffer{},
|
||||
lastMarshaledSize: 16,
|
||||
}
|
||||
},
|
||||
}
|
||||
37
vendor/google.golang.org/grpc/envconfig.go
generated
vendored
Normal file
37
vendor/google.golang.org/grpc/envconfig.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
envConfigPrefix = "GRPC_GO_"
|
||||
envConfigStickinessStr = envConfigPrefix + "STICKINESS"
|
||||
)
|
||||
|
||||
var (
|
||||
envConfigStickinessOn bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
envConfigStickinessOn = strings.EqualFold(os.Getenv(envConfigStickinessStr), "on")
|
||||
}
|
||||
70
vendor/google.golang.org/grpc/go16.go
generated
vendored
Normal file
70
vendor/google.golang.org/grpc/go16.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// +build go1.6,!go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
|
||||
}
|
||||
|
||||
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
|
||||
req.Cancel = ctx.Done()
|
||||
if err := req.Write(conn); err != nil {
|
||||
return fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// toRPCErr converts an error into an error from the status package.
|
||||
func toRPCErr(err error) error {
|
||||
if err == nil || err == io.EOF {
|
||||
return err
|
||||
}
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return err
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case transport.StreamError:
|
||||
return status.Error(e.Code, e.Desc)
|
||||
case transport.ConnectionError:
|
||||
return status.Error(codes.Unavailable, e.Desc)
|
||||
default:
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
}
|
||||
}
|
||||
return status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
71
vendor/google.golang.org/grpc/go17.go
generated
vendored
Normal file
71
vendor/google.golang.org/grpc/go17.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
// +build go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
netctx "golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, network, address)
|
||||
}
|
||||
|
||||
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
|
||||
req = req.WithContext(ctx)
|
||||
if err := req.Write(conn); err != nil {
|
||||
return fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// toRPCErr converts an error into an error from the status package.
|
||||
func toRPCErr(err error) error {
|
||||
if err == nil || err == io.EOF {
|
||||
return err
|
||||
}
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return err
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case transport.StreamError:
|
||||
return status.Error(e.Code, e.Desc)
|
||||
case transport.ConnectionError:
|
||||
return status.Error(codes.Unavailable, e.Desc)
|
||||
default:
|
||||
switch err {
|
||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled, netctx.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
}
|
||||
}
|
||||
return status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
704
vendor/google.golang.org/grpc/grpclb.go
generated
vendored
704
vendor/google.golang.org/grpc/grpclb.go
generated
vendored
@@ -1,704 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/naming"
|
||||
)
|
||||
|
||||
// Client API for LoadBalancer service.
|
||||
// Mostly copied from generated pb.go file.
|
||||
// To avoid circular dependency.
|
||||
type loadBalancerClient struct {
|
||||
cc *ClientConn
|
||||
}
|
||||
|
||||
func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) {
|
||||
desc := &StreamDesc{
|
||||
StreamName: "BalanceLoad",
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
}
|
||||
stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &balanceLoadClientStream{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type balanceLoadClientStream struct {
|
||||
ClientStream
|
||||
}
|
||||
|
||||
func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) {
|
||||
m := new(lbmpb.LoadBalanceResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// NewGRPCLBBalancer creates a grpclb load balancer.
|
||||
func NewGRPCLBBalancer(r naming.Resolver) Balancer {
|
||||
return &grpclbBalancer{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
type remoteBalancerInfo struct {
|
||||
addr string
|
||||
// the server name used for authentication with the remote LB server.
|
||||
name string
|
||||
}
|
||||
|
||||
// grpclbAddrInfo consists of the information of a backend server.
|
||||
type grpclbAddrInfo struct {
|
||||
addr Address
|
||||
connected bool
|
||||
// dropForRateLimiting indicates whether this particular request should be
|
||||
// dropped by the client for rate limiting.
|
||||
dropForRateLimiting bool
|
||||
// dropForLoadBalancing indicates whether this particular request should be
|
||||
// dropped by the client for load balancing.
|
||||
dropForLoadBalancing bool
|
||||
}
|
||||
|
||||
type grpclbBalancer struct {
|
||||
r naming.Resolver
|
||||
target string
|
||||
mu sync.Mutex
|
||||
seq int // a sequence number to make sure addrCh does not get stale addresses.
|
||||
w naming.Watcher
|
||||
addrCh chan []Address
|
||||
rbs []remoteBalancerInfo
|
||||
addrs []*grpclbAddrInfo
|
||||
next int
|
||||
waitCh chan struct{}
|
||||
done bool
|
||||
rand *rand.Rand
|
||||
|
||||
clientStats lbmpb.ClientStats
|
||||
}
|
||||
|
||||
func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
|
||||
updates, err := w.Next()
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
|
||||
return err
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done {
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
for _, update := range updates {
|
||||
switch update.Op {
|
||||
case naming.Add:
|
||||
var exist bool
|
||||
for _, v := range b.rbs {
|
||||
// TODO: Is the same addr with different server name a different balancer?
|
||||
if update.Addr == v.addr {
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if exist {
|
||||
continue
|
||||
}
|
||||
md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB)
|
||||
if !ok {
|
||||
// TODO: Revisit the handling here and may introduce some fallback mechanism.
|
||||
grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata)
|
||||
continue
|
||||
}
|
||||
switch md.AddrType {
|
||||
case naming.Backend:
|
||||
// TODO: Revisit the handling here and may introduce some fallback mechanism.
|
||||
grpclog.Errorf("The name resolution does not give grpclb addresses")
|
||||
continue
|
||||
case naming.GRPCLB:
|
||||
b.rbs = append(b.rbs, remoteBalancerInfo{
|
||||
addr: update.Addr,
|
||||
name: md.ServerName,
|
||||
})
|
||||
default:
|
||||
grpclog.Errorf("Received unknow address type %d", md.AddrType)
|
||||
continue
|
||||
}
|
||||
case naming.Delete:
|
||||
for i, v := range b.rbs {
|
||||
if update.Addr == v.addr {
|
||||
copy(b.rbs[i:], b.rbs[i+1:])
|
||||
b.rbs = b.rbs[:len(b.rbs)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
grpclog.Errorf("Unknown update.Op %v", update.Op)
|
||||
}
|
||||
}
|
||||
// TODO: Fall back to the basic round-robin load balancing if the resulting address is
|
||||
// not a load balancer.
|
||||
select {
|
||||
case <-ch:
|
||||
default:
|
||||
}
|
||||
ch <- b.rbs
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertDuration(d *lbmpb.Duration) time.Duration {
|
||||
if d == nil {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
|
||||
}
|
||||
|
||||
func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
servers := l.GetServers()
|
||||
var (
|
||||
sl []*grpclbAddrInfo
|
||||
addrs []Address
|
||||
)
|
||||
for _, s := range servers {
|
||||
md := metadata.Pairs("lb-token", s.LoadBalanceToken)
|
||||
ip := net.IP(s.IpAddress)
|
||||
ipStr := ip.String()
|
||||
if ip.To4() == nil {
|
||||
// Add square brackets to ipv6 addresses, otherwise net.Dial() and
|
||||
// net.SplitHostPort() will return too many colons error.
|
||||
ipStr = fmt.Sprintf("[%s]", ipStr)
|
||||
}
|
||||
addr := Address{
|
||||
Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
|
||||
Metadata: &md,
|
||||
}
|
||||
sl = append(sl, &grpclbAddrInfo{
|
||||
addr: addr,
|
||||
dropForRateLimiting: s.DropForRateLimiting,
|
||||
dropForLoadBalancing: s.DropForLoadBalancing,
|
||||
})
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done || seq < b.seq {
|
||||
return
|
||||
}
|
||||
if len(sl) > 0 {
|
||||
// reset b.next to 0 when replacing the server list.
|
||||
b.next = 0
|
||||
b.addrs = sl
|
||||
b.addrCh <- addrs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
stats := b.clientStats
|
||||
b.clientStats = lbmpb.ClientStats{} // Clear the stats.
|
||||
b.mu.Unlock()
|
||||
t := time.Now()
|
||||
stats.Timestamp = &lbmpb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := s.Send(&lbmpb.LoadBalanceRequest{
|
||||
LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{
|
||||
ClientStats: &stats,
|
||||
},
|
||||
}); err != nil {
|
||||
grpclog.Errorf("grpclb: failed to send load report: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := lbc.BalanceLoad(ctx)
|
||||
if err != nil {
|
||||
grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
b.mu.Unlock()
|
||||
initReq := &lbmpb.LoadBalanceRequest{
|
||||
LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{
|
||||
InitialRequest: &lbmpb.InitialLoadBalanceRequest{
|
||||
Name: b.target,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := stream.Send(initReq); err != nil {
|
||||
grpclog.Errorf("grpclb: failed to send init request: %v", err)
|
||||
// TODO: backoff on retry?
|
||||
return true
|
||||
}
|
||||
reply, err := stream.Recv()
|
||||
if err != nil {
|
||||
grpclog.Errorf("grpclb: failed to recv init response: %v", err)
|
||||
// TODO: backoff on retry?
|
||||
return true
|
||||
}
|
||||
initResp := reply.GetInitialResponse()
|
||||
if initResp == nil {
|
||||
grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.")
|
||||
return
|
||||
}
|
||||
// TODO: Support delegation.
|
||||
if initResp.LoadBalancerDelegate != "" {
|
||||
// delegation
|
||||
grpclog.Errorf("TODO: Delegation is not supported yet.")
|
||||
return
|
||||
}
|
||||
streamDone := make(chan struct{})
|
||||
defer close(streamDone)
|
||||
b.mu.Lock()
|
||||
b.clientStats = lbmpb.ClientStats{} // Clear client stats.
|
||||
b.mu.Unlock()
|
||||
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
|
||||
go b.sendLoadReport(stream, d, streamDone)
|
||||
}
|
||||
// Retrieve the server list.
|
||||
for {
|
||||
reply, err := stream.Recv()
|
||||
if err != nil {
|
||||
grpclog.Errorf("grpclb: failed to recv server list: %v", err)
|
||||
break
|
||||
}
|
||||
b.mu.Lock()
|
||||
if b.done || seq < b.seq {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
b.seq++ // tick when receiving a new list of servers.
|
||||
seq = b.seq
|
||||
b.mu.Unlock()
|
||||
if serverList := reply.GetServerList(); serverList != nil {
|
||||
b.processServerList(serverList, seq)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *grpclbBalancer) Start(target string, config BalancerConfig) error {
|
||||
b.rand = rand.New(rand.NewSource(time.Now().Unix()))
|
||||
// TODO: Fall back to the basic direct connection if there is no name resolver.
|
||||
if b.r == nil {
|
||||
return errors.New("there is no name resolver installed")
|
||||
}
|
||||
b.target = target
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.mu.Unlock()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
b.addrCh = make(chan []Address)
|
||||
w, err := b.r.Resolve(target)
|
||||
if err != nil {
|
||||
b.mu.Unlock()
|
||||
grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err)
|
||||
return err
|
||||
}
|
||||
b.w = w
|
||||
b.mu.Unlock()
|
||||
balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
|
||||
// Spawn a goroutine to monitor the name resolution of remote load balancer.
|
||||
go func() {
|
||||
for {
|
||||
if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
|
||||
grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err)
|
||||
close(balancerAddrsCh)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Spawn a goroutine to talk to the remote load balancer.
|
||||
go func() {
|
||||
var (
|
||||
cc *ClientConn
|
||||
// ccError is closed when there is an error in the current cc.
|
||||
// A new rb should be picked from rbs and connected.
|
||||
ccError chan struct{}
|
||||
rb *remoteBalancerInfo
|
||||
rbs []remoteBalancerInfo
|
||||
rbIdx int
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if ccError != nil {
|
||||
select {
|
||||
case <-ccError:
|
||||
default:
|
||||
close(ccError)
|
||||
}
|
||||
}
|
||||
if cc != nil {
|
||||
cc.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
var ok bool
|
||||
select {
|
||||
case rbs, ok = <-balancerAddrsCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
foundIdx := -1
|
||||
if rb != nil {
|
||||
for i, trb := range rbs {
|
||||
if trb == *rb {
|
||||
foundIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if foundIdx >= 0 {
|
||||
if foundIdx >= 1 {
|
||||
// Move the address in use to the beginning of the list.
|
||||
b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
|
||||
rbIdx = 0
|
||||
}
|
||||
continue // If found, don't dial new cc.
|
||||
} else if len(rbs) > 0 {
|
||||
// Pick a random one from the list, instead of always using the first one.
|
||||
if l := len(rbs); l > 1 && rb != nil {
|
||||
tmpIdx := b.rand.Intn(l - 1)
|
||||
b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
|
||||
}
|
||||
rbIdx = 0
|
||||
rb = &rbs[0]
|
||||
} else {
|
||||
// foundIdx < 0 && len(rbs) <= 0.
|
||||
rb = nil
|
||||
}
|
||||
case <-ccError:
|
||||
ccError = nil
|
||||
if rbIdx < len(rbs)-1 {
|
||||
rbIdx++
|
||||
rb = &rbs[rbIdx]
|
||||
} else {
|
||||
rb = nil
|
||||
}
|
||||
}
|
||||
|
||||
if rb == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if cc != nil {
|
||||
cc.Close()
|
||||
}
|
||||
// Talk to the remote load balancer to get the server list.
|
||||
var (
|
||||
err error
|
||||
dopts []DialOption
|
||||
)
|
||||
if creds := config.DialCreds; creds != nil {
|
||||
if rb.name != "" {
|
||||
if err := creds.OverrideServerName(rb.name); err != nil {
|
||||
grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
dopts = append(dopts, WithTransportCredentials(creds))
|
||||
} else {
|
||||
dopts = append(dopts, WithInsecure())
|
||||
}
|
||||
if dialer := config.Dialer; dialer != nil {
|
||||
// WithDialer takes a different type of function, so we instead use a special DialOption here.
|
||||
dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
|
||||
}
|
||||
dopts = append(dopts, WithBlock())
|
||||
ccError = make(chan struct{})
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
cc, err = DialContext(ctx, rb.addr, dopts...)
|
||||
cancel()
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
|
||||
close(ccError)
|
||||
continue
|
||||
}
|
||||
b.mu.Lock()
|
||||
b.seq++ // tick when getting a new balancer address
|
||||
seq := b.seq
|
||||
b.next = 0
|
||||
b.mu.Unlock()
|
||||
go func(cc *ClientConn, ccError chan struct{}) {
|
||||
lbc := &loadBalancerClient{cc}
|
||||
b.callRemoteBalancer(lbc, seq)
|
||||
cc.Close()
|
||||
select {
|
||||
case <-ccError:
|
||||
default:
|
||||
close(ccError)
|
||||
}
|
||||
}(cc, ccError)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *grpclbBalancer) down(addr Address, err error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for _, a := range b.addrs {
|
||||
if addr == a.addr {
|
||||
a.connected = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *grpclbBalancer) Up(addr Address) func(error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done {
|
||||
return nil
|
||||
}
|
||||
var cnt int
|
||||
for _, a := range b.addrs {
|
||||
if a.addr == addr {
|
||||
if a.connected {
|
||||
return nil
|
||||
}
|
||||
a.connected = true
|
||||
}
|
||||
if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
// addr is the only one which is connected. Notify the Get() callers who are blocking.
|
||||
if cnt == 1 && b.waitCh != nil {
|
||||
close(b.waitCh)
|
||||
b.waitCh = nil
|
||||
}
|
||||
return func(err error) {
|
||||
b.down(addr, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
||||
var ch chan struct{}
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.mu.Unlock()
|
||||
err = ErrClientConnClosing
|
||||
return
|
||||
}
|
||||
seq := b.seq
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
put = func() {
|
||||
s, ok := rpcInfoFromContext(ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done || seq < b.seq {
|
||||
return
|
||||
}
|
||||
b.clientStats.NumCallsFinished++
|
||||
if !s.bytesSent {
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
} else if s.bytesReceived {
|
||||
b.clientStats.NumCallsFinishedKnownReceived++
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
b.clientStats.NumCallsStarted++
|
||||
if len(b.addrs) > 0 {
|
||||
if b.next >= len(b.addrs) {
|
||||
b.next = 0
|
||||
}
|
||||
next := b.next
|
||||
for {
|
||||
a := b.addrs[next]
|
||||
next = (next + 1) % len(b.addrs)
|
||||
if a.connected {
|
||||
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
||||
addr = a.addr
|
||||
b.next = next
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
b.next = next
|
||||
if a.dropForLoadBalancing {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
|
||||
} else if a.dropForRateLimiting {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
|
||||
}
|
||||
b.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
|
||||
return
|
||||
}
|
||||
}
|
||||
if next == b.next {
|
||||
// Has iterated all the possible address but none is connected.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
b.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "there is no address available")
|
||||
return
|
||||
}
|
||||
// Wait on b.waitCh for non-failfast RPCs.
|
||||
if b.waitCh == nil {
|
||||
ch = make(chan struct{})
|
||||
b.waitCh = ch
|
||||
} else {
|
||||
ch = b.waitCh
|
||||
}
|
||||
b.mu.Unlock()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
b.mu.Lock()
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
b.mu.Unlock()
|
||||
err = ctx.Err()
|
||||
return
|
||||
case <-ch:
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
b.mu.Unlock()
|
||||
err = ErrClientConnClosing
|
||||
return
|
||||
}
|
||||
|
||||
if len(b.addrs) > 0 {
|
||||
if b.next >= len(b.addrs) {
|
||||
b.next = 0
|
||||
}
|
||||
next := b.next
|
||||
for {
|
||||
a := b.addrs[next]
|
||||
next = (next + 1) % len(b.addrs)
|
||||
if a.connected {
|
||||
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
||||
addr = a.addr
|
||||
b.next = next
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
b.next = next
|
||||
if a.dropForLoadBalancing {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
|
||||
} else if a.dropForRateLimiting {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
|
||||
}
|
||||
b.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
|
||||
return
|
||||
}
|
||||
}
|
||||
if next == b.next {
|
||||
// Has iterated all the possible address but none is connected.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// The newly added addr got removed by Down() again.
|
||||
if b.waitCh == nil {
|
||||
ch = make(chan struct{})
|
||||
b.waitCh = ch
|
||||
} else {
|
||||
ch = b.waitCh
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *grpclbBalancer) Notify() <-chan []Address {
|
||||
return b.addrCh
|
||||
}
|
||||
|
||||
func (b *grpclbBalancer) Close() error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done {
|
||||
return errBalancerClosed
|
||||
}
|
||||
b.done = true
|
||||
if b.waitCh != nil {
|
||||
close(b.waitCh)
|
||||
}
|
||||
if b.addrCh != nil {
|
||||
close(b.addrCh)
|
||||
}
|
||||
if b.w != nil {
|
||||
b.w.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
6
vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD
generated
vendored
6
vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/BUILD
generated
vendored
@@ -6,7 +6,11 @@ go_library(
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
importpath = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/duration:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/timestamp:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
||||
507
vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go
generated
vendored
507
vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go
generated
vendored
@@ -1,28 +1,13 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: grpc_lb_v1/messages/messages.proto
|
||||
|
||||
/*
|
||||
Package messages is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
grpc_lb_v1/messages/messages.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Duration
|
||||
Timestamp
|
||||
LoadBalanceRequest
|
||||
InitialLoadBalanceRequest
|
||||
ClientStats
|
||||
LoadBalanceResponse
|
||||
InitialLoadBalanceResponse
|
||||
ServerList
|
||||
Server
|
||||
*/
|
||||
package messages
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import duration "github.com/golang/protobuf/ptypes/duration"
|
||||
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@@ -35,90 +20,49 @@ var _ = math.Inf
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Duration struct {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Duration) Reset() { *m = Duration{} }
|
||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *Duration) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Duration) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Timestamp struct {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *Timestamp) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Timestamp) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type LoadBalanceRequest struct {
|
||||
// Types that are valid to be assigned to LoadBalanceRequestType:
|
||||
// *LoadBalanceRequest_InitialRequest
|
||||
// *LoadBalanceRequest_ClientStats
|
||||
LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} }
|
||||
func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*LoadBalanceRequest) ProtoMessage() {}
|
||||
func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
func (*LoadBalanceRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_b3d89fcb5aa158f8, []int{0}
|
||||
}
|
||||
func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *LoadBalanceRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LoadBalanceRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *LoadBalanceRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_LoadBalanceRequest.Size(m)
|
||||
}
|
||||
func (m *LoadBalanceRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo
|
||||
|
||||
type isLoadBalanceRequest_LoadBalanceRequestType interface {
|
||||
isLoadBalanceRequest_LoadBalanceRequestType()
|
||||
}
|
||||
|
||||
type LoadBalanceRequest_InitialRequest struct {
|
||||
InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"`
|
||||
InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,proto3,oneof"`
|
||||
}
|
||||
type LoadBalanceRequest_ClientStats struct {
|
||||
ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"`
|
||||
ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {}
|
||||
@@ -204,12 +148,12 @@ func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
|
||||
switch x := m.LoadBalanceRequestType.(type) {
|
||||
case *LoadBalanceRequest_InitialRequest:
|
||||
s := proto.Size(x.InitialRequest)
|
||||
n += proto.SizeVarint(1<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *LoadBalanceRequest_ClientStats:
|
||||
s := proto.Size(x.ClientStats)
|
||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
@@ -220,15 +164,37 @@ func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
|
||||
}
|
||||
|
||||
type InitialLoadBalanceRequest struct {
|
||||
// Name of load balanced service (IE, balancer.service.com)
|
||||
// Name of load balanced service (IE, balancer.service.com). Its
|
||||
// length should be less than 256 bytes.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} }
|
||||
func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*InitialLoadBalanceRequest) ProtoMessage() {}
|
||||
func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_b3d89fcb5aa158f8, []int{1}
|
||||
}
|
||||
func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_InitialLoadBalanceRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *InitialLoadBalanceRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_InitialLoadBalanceRequest.Size(m)
|
||||
}
|
||||
func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *InitialLoadBalanceRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -237,34 +203,101 @@ func (m *InitialLoadBalanceRequest) GetName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Contains the number of calls finished for a particular load balance token.
|
||||
type ClientStatsPerToken struct {
|
||||
// See Server.load_balance_token.
|
||||
LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"`
|
||||
// The total number of RPCs that finished associated with the token.
|
||||
NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ClientStatsPerToken) Reset() { *m = ClientStatsPerToken{} }
|
||||
func (m *ClientStatsPerToken) String() string { return proto.CompactTextString(m) }
|
||||
func (*ClientStatsPerToken) ProtoMessage() {}
|
||||
func (*ClientStatsPerToken) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_b3d89fcb5aa158f8, []int{2}
|
||||
}
|
||||
func (m *ClientStatsPerToken) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ClientStatsPerToken.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ClientStatsPerToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ClientStatsPerToken.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ClientStatsPerToken) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ClientStatsPerToken.Merge(dst, src)
|
||||
}
|
||||
func (m *ClientStatsPerToken) XXX_Size() int {
|
||||
return xxx_messageInfo_ClientStatsPerToken.Size(m)
|
||||
}
|
||||
func (m *ClientStatsPerToken) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ClientStatsPerToken.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ClientStatsPerToken proto.InternalMessageInfo
|
||||
|
||||
func (m *ClientStatsPerToken) GetLoadBalanceToken() string {
|
||||
if m != nil {
|
||||
return m.LoadBalanceToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ClientStatsPerToken) GetNumCalls() int64 {
|
||||
if m != nil {
|
||||
return m.NumCalls
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Contains client level statistics that are useful to load balancing. Each
|
||||
// count except the timestamp should be reset to zero after reporting the stats.
|
||||
type ClientStats struct {
|
||||
// The timestamp of generating the report.
|
||||
Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||
Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
// The total number of RPCs that started.
|
||||
NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"`
|
||||
NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted,proto3" json:"num_calls_started,omitempty"`
|
||||
// The total number of RPCs that finished.
|
||||
NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"`
|
||||
// The total number of RPCs that were dropped by the client because of rate
|
||||
// limiting.
|
||||
NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"`
|
||||
// The total number of RPCs that were dropped by the client because of load
|
||||
// balancing.
|
||||
NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"`
|
||||
NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished,proto3" json:"num_calls_finished,omitempty"`
|
||||
// The total number of RPCs that failed to reach a server except dropped RPCs.
|
||||
NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
|
||||
NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend,proto3" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
|
||||
// The total number of RPCs that finished and are known to have been received
|
||||
// by a server.
|
||||
NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
|
||||
NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"`
|
||||
// The list of dropped calls.
|
||||
CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ClientStats) Reset() { *m = ClientStats{} }
|
||||
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
|
||||
func (*ClientStats) ProtoMessage() {}
|
||||
func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||
func (*ClientStats) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_b3d89fcb5aa158f8, []int{3}
|
||||
}
|
||||
func (m *ClientStats) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ClientStats.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ClientStats) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ClientStats.Merge(dst, src)
|
||||
}
|
||||
func (m *ClientStats) XXX_Size() int {
|
||||
return xxx_messageInfo_ClientStats.Size(m)
|
||||
}
|
||||
func (m *ClientStats) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ClientStats.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetTimestamp() *Timestamp {
|
||||
var xxx_messageInfo_ClientStats proto.InternalMessageInfo
|
||||
|
||||
func (m *ClientStats) GetTimestamp() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
@@ -285,20 +318,6 @@ func (m *ClientStats) GetNumCallsFinished() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 {
|
||||
if m != nil {
|
||||
return m.NumCallsFinishedWithDropForRateLimiting
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 {
|
||||
if m != nil {
|
||||
return m.NumCallsFinishedWithDropForLoadBalancing
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 {
|
||||
if m != nil {
|
||||
return m.NumCallsFinishedWithClientFailedToSend
|
||||
@@ -313,27 +332,56 @@ func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken {
|
||||
if m != nil {
|
||||
return m.CallsFinishedWithDrop
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LoadBalanceResponse struct {
|
||||
// Types that are valid to be assigned to LoadBalanceResponseType:
|
||||
// *LoadBalanceResponse_InitialResponse
|
||||
// *LoadBalanceResponse_ServerList
|
||||
LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} }
|
||||
func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*LoadBalanceResponse) ProtoMessage() {}
|
||||
func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||
func (*LoadBalanceResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_b3d89fcb5aa158f8, []int{4}
|
||||
}
|
||||
func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *LoadBalanceResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LoadBalanceResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *LoadBalanceResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_LoadBalanceResponse.Size(m)
|
||||
}
|
||||
func (m *LoadBalanceResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo
|
||||
|
||||
type isLoadBalanceResponse_LoadBalanceResponseType interface {
|
||||
isLoadBalanceResponse_LoadBalanceResponseType()
|
||||
}
|
||||
|
||||
type LoadBalanceResponse_InitialResponse struct {
|
||||
InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"`
|
||||
InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,proto3,oneof"`
|
||||
}
|
||||
type LoadBalanceResponse_ServerList struct {
|
||||
ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"`
|
||||
ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
|
||||
@@ -419,12 +467,12 @@ func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
|
||||
switch x := m.LoadBalanceResponseType.(type) {
|
||||
case *LoadBalanceResponse_InitialResponse:
|
||||
s := proto.Size(x.InitialResponse)
|
||||
n += proto.SizeVarint(1<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *LoadBalanceResponse_ServerList:
|
||||
s := proto.Size(x.ServerList)
|
||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
@@ -440,17 +488,39 @@ type InitialLoadBalanceResponse struct {
|
||||
// the response, the client should open a separate connection to the
|
||||
// load_balancer_delegate and call the BalanceLoad method. Its length should
|
||||
// be less than 64 bytes.
|
||||
LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"`
|
||||
LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate,proto3" json:"load_balancer_delegate,omitempty"`
|
||||
// This interval defines how often the client should send the client stats
|
||||
// to the load balancer. Stats should only be reported when the duration is
|
||||
// positive.
|
||||
ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
|
||||
ClientStatsReportInterval *duration.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} }
|
||||
func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*InitialLoadBalanceResponse) ProtoMessage() {}
|
||||
func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||
func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_b3d89fcb5aa158f8, []int{5}
|
||||
}
|
||||
func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_InitialLoadBalanceResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *InitialLoadBalanceResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_InitialLoadBalanceResponse.Size(m)
|
||||
}
|
||||
func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
|
||||
if m != nil {
|
||||
@@ -459,7 +529,7 @@ func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration {
|
||||
func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *duration.Duration {
|
||||
if m != nil {
|
||||
return m.ClientStatsReportInterval
|
||||
}
|
||||
@@ -471,13 +541,35 @@ type ServerList struct {
|
||||
// be updated when server resolutions change or as needed to balance load
|
||||
// across more servers. The client should consume the server list in order
|
||||
// unless instructed otherwise via the client_config.
|
||||
Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
|
||||
Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ServerList) Reset() { *m = ServerList{} }
|
||||
func (m *ServerList) String() string { return proto.CompactTextString(m) }
|
||||
func (*ServerList) ProtoMessage() {}
|
||||
func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||
func (*ServerList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_b3d89fcb5aa158f8, []int{6}
|
||||
}
|
||||
func (m *ServerList) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ServerList.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ServerList.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ServerList) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ServerList.Merge(dst, src)
|
||||
}
|
||||
func (m *ServerList) XXX_Size() int {
|
||||
return xxx_messageInfo_ServerList.Size(m)
|
||||
}
|
||||
func (m *ServerList) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ServerList.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ServerList proto.InternalMessageInfo
|
||||
|
||||
func (m *ServerList) GetServers() []*Server {
|
||||
if m != nil {
|
||||
@@ -486,35 +578,52 @@ func (m *ServerList) GetServers() []*Server {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Contains server information. When none of the [drop_for_*] fields are true,
|
||||
// use the other fields. When drop_for_rate_limiting is true, ignore all other
|
||||
// fields. Use drop_for_load_balancing only when it is true and
|
||||
// drop_for_rate_limiting is false.
|
||||
// Contains server information. When the drop field is not true, use the other
|
||||
// fields.
|
||||
type Server struct {
|
||||
// A resolved address for the server, serialized in network-byte-order. It may
|
||||
// either be an IPv4 or IPv6 address.
|
||||
IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
|
||||
// A resolved port number for the server.
|
||||
Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
|
||||
Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
|
||||
// An opaque but printable token given to the frontend for each pick. All
|
||||
// frontend requests for that pick must include the token in its initial
|
||||
// metadata. The token is used by the backend to verify the request and to
|
||||
// allow the backend to report load to the gRPC LB system.
|
||||
//
|
||||
// Its length is variable but less than 50 bytes.
|
||||
LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"`
|
||||
// Indicates whether this particular request should be dropped by the client
|
||||
// for rate limiting.
|
||||
DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
|
||||
// Indicates whether this particular request should be dropped by the client
|
||||
// for load balancing.
|
||||
DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
|
||||
// allow the backend to report load to the gRPC LB system. The token is also
|
||||
// used in client stats for reporting dropped calls.
|
||||
LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"`
|
||||
// Indicates whether this particular request should be dropped by the client.
|
||||
// If the request is dropped, there will be a corresponding entry in
|
||||
// ClientStats.calls_finished_with_drop.
|
||||
Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Server) Reset() { *m = Server{} }
|
||||
func (m *Server) String() string { return proto.CompactTextString(m) }
|
||||
func (*Server) ProtoMessage() {}
|
||||
func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
||||
func (*Server) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_messages_b3d89fcb5aa158f8, []int{7}
|
||||
}
|
||||
func (m *Server) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Server.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Server.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Server) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Server.Merge(dst, src)
|
||||
}
|
||||
func (m *Server) XXX_Size() int {
|
||||
return xxx_messageInfo_Server.Size(m)
|
||||
}
|
||||
func (m *Server) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Server.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Server proto.InternalMessageInfo
|
||||
|
||||
func (m *Server) GetIpAddress() []byte {
|
||||
if m != nil {
|
||||
@@ -537,25 +646,17 @@ func (m *Server) GetLoadBalanceToken() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Server) GetDropForRateLimiting() bool {
|
||||
func (m *Server) GetDrop() bool {
|
||||
if m != nil {
|
||||
return m.DropForRateLimiting
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Server) GetDropForLoadBalancing() bool {
|
||||
if m != nil {
|
||||
return m.DropForLoadBalancing
|
||||
return m.Drop
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration")
|
||||
proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp")
|
||||
proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest")
|
||||
proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest")
|
||||
proto.RegisterType((*ClientStatsPerToken)(nil), "grpc.lb.v1.ClientStatsPerToken")
|
||||
proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats")
|
||||
proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse")
|
||||
proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse")
|
||||
@@ -563,53 +664,55 @@ func init() {
|
||||
proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 709 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b,
|
||||
0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69,
|
||||
0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55,
|
||||
0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28,
|
||||
0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f,
|
||||
0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb,
|
||||
0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56,
|
||||
0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3,
|
||||
0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a,
|
||||
0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18,
|
||||
0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8,
|
||||
0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a,
|
||||
0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc,
|
||||
0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d,
|
||||
0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f,
|
||||
0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42,
|
||||
0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b,
|
||||
0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf,
|
||||
0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60,
|
||||
0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3,
|
||||
0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29,
|
||||
0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9,
|
||||
0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1,
|
||||
0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e,
|
||||
0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd,
|
||||
0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a,
|
||||
0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa,
|
||||
0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31,
|
||||
0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a,
|
||||
0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79,
|
||||
0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8,
|
||||
0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89,
|
||||
0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f,
|
||||
0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7,
|
||||
0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a,
|
||||
0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62,
|
||||
0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d,
|
||||
0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77,
|
||||
0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc,
|
||||
0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76,
|
||||
0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b,
|
||||
0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06,
|
||||
0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd,
|
||||
0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86,
|
||||
0xa6, 0x4a, 0x06, 0x00, 0x00,
|
||||
func init() {
|
||||
proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor_messages_b3d89fcb5aa158f8)
|
||||
}
|
||||
|
||||
var fileDescriptor_messages_b3d89fcb5aa158f8 = []byte{
|
||||
// 708 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0x61, 0x4f, 0xf3, 0x36,
|
||||
0x10, 0x26, 0x6b, 0xe8, 0xdb, 0x5e, 0x5f, 0x8d, 0xce, 0x6c, 0x2c, 0x2d, 0x30, 0x58, 0xa4, 0x21,
|
||||
0x34, 0xb1, 0x54, 0xc0, 0x3e, 0x6c, 0xd2, 0x3e, 0x6c, 0x05, 0xa1, 0xc2, 0xf8, 0x80, 0x52, 0xa4,
|
||||
0x4d, 0x48, 0x93, 0xe7, 0x36, 0x26, 0x58, 0xb8, 0x76, 0x66, 0xbb, 0x45, 0xfb, 0xbc, 0xff, 0x33,
|
||||
0xed, 0x2f, 0x4c, 0xfb, 0x63, 0x53, 0xec, 0xa4, 0x0d, 0x2d, 0xd5, 0xfb, 0x25, 0x72, 0xee, 0x9e,
|
||||
0x7b, 0xee, 0xce, 0x77, 0x8f, 0x21, 0x4c, 0x55, 0x36, 0xc6, 0x7c, 0x84, 0x67, 0xa7, 0xbd, 0x09,
|
||||
0xd5, 0x9a, 0xa4, 0x54, 0xcf, 0x0f, 0x51, 0xa6, 0xa4, 0x91, 0x08, 0x72, 0x4c, 0xc4, 0x47, 0xd1,
|
||||
0xec, 0xb4, 0xfb, 0x45, 0x2a, 0x65, 0xca, 0x69, 0xcf, 0x7a, 0x46, 0xd3, 0xc7, 0x5e, 0x32, 0x55,
|
||||
0xc4, 0x30, 0x29, 0x1c, 0xb6, 0x7b, 0xb0, 0xec, 0x37, 0x6c, 0x42, 0xb5, 0x21, 0x93, 0xcc, 0x01,
|
||||
0xc2, 0x7f, 0x3d, 0x40, 0xb7, 0x92, 0x24, 0x7d, 0xc2, 0x89, 0x18, 0xd3, 0x98, 0xfe, 0x31, 0xa5,
|
||||
0xda, 0xa0, 0x3b, 0xd8, 0x62, 0x82, 0x19, 0x46, 0x38, 0x56, 0xce, 0x14, 0x78, 0x87, 0xde, 0x71,
|
||||
0xeb, 0xec, 0xab, 0x68, 0x91, 0x3d, 0xba, 0x76, 0x90, 0xd5, 0xf8, 0xc1, 0x46, 0xfc, 0x71, 0x11,
|
||||
0x5f, 0x32, 0xfe, 0x00, 0xef, 0xc7, 0x9c, 0x51, 0x61, 0xb0, 0x36, 0xc4, 0xe8, 0xe0, 0x23, 0x4b,
|
||||
0xf7, 0x79, 0x95, 0xee, 0xc2, 0xfa, 0x87, 0xb9, 0x7b, 0xb0, 0x11, 0xb7, 0xc6, 0x8b, 0xdf, 0xfe,
|
||||
0x2e, 0x74, 0xb8, 0x24, 0x09, 0x1e, 0xb9, 0x34, 0x65, 0x51, 0xd8, 0xfc, 0x99, 0xd1, 0xb0, 0x07,
|
||||
0x9d, 0xb5, 0x95, 0x20, 0x04, 0xbe, 0x20, 0x13, 0x6a, 0xcb, 0x6f, 0xc6, 0xf6, 0x1c, 0xfe, 0x0e,
|
||||
0xdb, 0x95, 0x5c, 0x77, 0x54, 0xdd, 0xcb, 0x67, 0x2a, 0xd0, 0x09, 0xa0, 0x57, 0x49, 0x4c, 0x6e,
|
||||
0x2d, 0x02, 0xdb, 0x7c, 0x41, 0xed, 0xd0, 0xbb, 0xd0, 0x14, 0xd3, 0x09, 0x1e, 0x13, 0xce, 0x5d,
|
||||
0x37, 0xb5, 0xb8, 0x21, 0xa6, 0x93, 0x8b, 0xfc, 0x3f, 0xfc, 0xa7, 0x06, 0xad, 0x4a, 0x0a, 0xf4,
|
||||
0x1d, 0x34, 0xe7, 0x37, 0x5f, 0xdc, 0x64, 0x37, 0x72, 0xb3, 0x89, 0xca, 0xd9, 0x44, 0xf7, 0x25,
|
||||
0x22, 0x5e, 0x80, 0xd1, 0xd7, 0xf0, 0xc9, 0x3c, 0x4d, 0x7e, 0x75, 0xca, 0xd0, 0xa4, 0x48, 0xb7,
|
||||
0x55, 0xa6, 0x1b, 0x3a, 0x73, 0xde, 0xc0, 0x02, 0xfb, 0xc8, 0x04, 0xd3, 0x4f, 0x34, 0x09, 0x6a,
|
||||
0x16, 0xdc, 0x2e, 0xc1, 0x57, 0x85, 0x1d, 0xfd, 0x06, 0xdf, 0xac, 0xa2, 0xf1, 0x0b, 0x33, 0x4f,
|
||||
0xb8, 0x98, 0xd4, 0x23, 0x61, 0x9c, 0x26, 0xd8, 0x48, 0xac, 0xa9, 0x48, 0x82, 0xba, 0x25, 0x3a,
|
||||
0x5a, 0x26, 0xfa, 0x85, 0x99, 0x27, 0xd7, 0xeb, 0x95, 0xc5, 0xdf, 0xcb, 0x21, 0x15, 0x09, 0x1a,
|
||||
0xc0, 0x97, 0x6f, 0xd0, 0x3f, 0x0b, 0xf9, 0x22, 0xb0, 0xa2, 0x63, 0xca, 0x66, 0x34, 0x09, 0xde,
|
||||
0x59, 0xca, 0xfd, 0x65, 0xca, 0x9f, 0x73, 0x54, 0x5c, 0x80, 0xd0, 0xaf, 0x10, 0xbc, 0x55, 0x64,
|
||||
0xa2, 0x64, 0x16, 0x34, 0x0e, 0x6b, 0xc7, 0xad, 0xb3, 0x83, 0x35, 0x6b, 0x54, 0x8e, 0x36, 0xfe,
|
||||
0x6c, 0xbc, 0x5c, 0xf1, 0xa5, 0x92, 0xd9, 0x8d, 0xdf, 0xf0, 0xdb, 0x9b, 0x37, 0x7e, 0x63, 0xb3,
|
||||
0x5d, 0x0f, 0xff, 0xf3, 0x60, 0xfb, 0xd5, 0xfe, 0xe8, 0x4c, 0x0a, 0x4d, 0xd1, 0x10, 0xda, 0x0b,
|
||||
0x29, 0x38, 0x5b, 0x31, 0xc1, 0xa3, 0x0f, 0x69, 0xc1, 0xa1, 0x07, 0x1b, 0xf1, 0xd6, 0x5c, 0x0c,
|
||||
0x05, 0xe9, 0xf7, 0xd0, 0xd2, 0x54, 0xcd, 0xa8, 0xc2, 0x9c, 0x69, 0x53, 0x88, 0x61, 0xa7, 0xca,
|
||||
0x37, 0xb4, 0xee, 0x5b, 0x66, 0xc5, 0x04, 0x7a, 0xfe, 0xd7, 0xdf, 0x83, 0xee, 0x92, 0x14, 0x1c,
|
||||
0xa7, 0xd3, 0xc2, 0xdf, 0x1e, 0x74, 0xd7, 0x97, 0x82, 0xbe, 0x85, 0x9d, 0x6a, 0xb0, 0xc2, 0x09,
|
||||
0xe5, 0x34, 0x25, 0xa6, 0xd4, 0xc7, 0xa7, 0x95, 0x35, 0x57, 0x97, 0x85, 0x0f, 0x3d, 0xc0, 0x5e,
|
||||
0x55, 0xbb, 0x58, 0xd1, 0x4c, 0x2a, 0x83, 0x99, 0x30, 0x54, 0xcd, 0x08, 0x2f, 0xca, 0xef, 0xac,
|
||||
0x2c, 0xf4, 0x65, 0xf1, 0x18, 0xc5, 0x9d, 0x8a, 0x96, 0x63, 0x1b, 0x7c, 0x5d, 0xc4, 0x86, 0x3f,
|
||||
0x02, 0x2c, 0x5a, 0x45, 0x27, 0xf0, 0xce, 0xb5, 0xaa, 0x03, 0xcf, 0x4e, 0x16, 0xad, 0xde, 0x49,
|
||||
0x5c, 0x42, 0x6e, 0xfc, 0x46, 0xad, 0xed, 0x87, 0x7f, 0x79, 0x50, 0x77, 0x1e, 0xb4, 0x0f, 0xc0,
|
||||
0x32, 0x4c, 0x92, 0x44, 0x51, 0xad, 0x6d, 0x4b, 0xef, 0xe3, 0x26, 0xcb, 0x7e, 0x72, 0x86, 0xfc,
|
||||
0x2d, 0xc8, 0x73, 0xdb, 0x7a, 0x37, 0x63, 0x7b, 0x5e, 0x23, 0xfa, 0xda, 0x1a, 0xd1, 0x23, 0xf0,
|
||||
0xed, 0xda, 0xf9, 0x87, 0xde, 0x71, 0x23, 0xb6, 0x67, 0xb7, 0x3e, 0xfd, 0xf3, 0x87, 0xd3, 0xa2,
|
||||
0xfd, 0x54, 0x72, 0x22, 0xd2, 0x48, 0xaa, 0xb4, 0x97, 0xd7, 0x6e, 0x3f, 0x7c, 0xd4, 0x7b, 0xe3,
|
||||
0x65, 0x1f, 0xd5, 0xed, 0x55, 0x9d, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x88, 0xe6, 0xf4,
|
||||
0xf7, 0x05, 0x00, 0x00,
|
||||
}
|
||||
|
||||
82
vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto
generated
vendored
82
vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto
generated
vendored
@@ -14,35 +14,11 @@
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
package grpc.lb.v1;
|
||||
option go_package = "messages";
|
||||
|
||||
message Duration {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
||||
|
||||
message Timestamp {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
||||
option go_package = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages";
|
||||
|
||||
message LoadBalanceRequest {
|
||||
oneof load_balance_request_type {
|
||||
@@ -56,16 +32,25 @@ message LoadBalanceRequest {
|
||||
}
|
||||
|
||||
message InitialLoadBalanceRequest {
|
||||
// Name of load balanced service (IE, balancer.service.com)
|
||||
// Name of load balanced service (IE, balancer.service.com). Its
|
||||
// length should be less than 256 bytes.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Contains the number of calls finished for a particular load balance token.
|
||||
message ClientStatsPerToken {
|
||||
// See Server.load_balance_token.
|
||||
string load_balance_token = 1;
|
||||
|
||||
// The total number of RPCs that finished associated with the token.
|
||||
int64 num_calls = 2;
|
||||
}
|
||||
|
||||
// Contains client level statistics that are useful to load balancing. Each
|
||||
// count except the timestamp should be reset to zero after reporting the stats.
|
||||
message ClientStats {
|
||||
// The timestamp of generating the report.
|
||||
Timestamp timestamp = 1;
|
||||
google.protobuf.Timestamp timestamp = 1;
|
||||
|
||||
// The total number of RPCs that started.
|
||||
int64 num_calls_started = 2;
|
||||
@@ -73,20 +58,17 @@ message ClientStats {
|
||||
// The total number of RPCs that finished.
|
||||
int64 num_calls_finished = 3;
|
||||
|
||||
// The total number of RPCs that were dropped by the client because of rate
|
||||
// limiting.
|
||||
int64 num_calls_finished_with_drop_for_rate_limiting = 4;
|
||||
|
||||
// The total number of RPCs that were dropped by the client because of load
|
||||
// balancing.
|
||||
int64 num_calls_finished_with_drop_for_load_balancing = 5;
|
||||
|
||||
// The total number of RPCs that failed to reach a server except dropped RPCs.
|
||||
int64 num_calls_finished_with_client_failed_to_send = 6;
|
||||
|
||||
// The total number of RPCs that finished and are known to have been received
|
||||
// by a server.
|
||||
int64 num_calls_finished_known_received = 7;
|
||||
|
||||
// The list of dropped calls.
|
||||
repeated ClientStatsPerToken calls_finished_with_drop = 8;
|
||||
|
||||
reserved 4, 5;
|
||||
}
|
||||
|
||||
message LoadBalanceResponse {
|
||||
@@ -111,7 +93,7 @@ message InitialLoadBalanceResponse {
|
||||
// This interval defines how often the client should send the client stats
|
||||
// to the load balancer. Stats should only be reported when the duration is
|
||||
// positive.
|
||||
Duration client_stats_report_interval = 2;
|
||||
google.protobuf.Duration client_stats_report_interval = 2;
|
||||
}
|
||||
|
||||
message ServerList {
|
||||
@@ -125,10 +107,8 @@ message ServerList {
|
||||
reserved 3;
|
||||
}
|
||||
|
||||
// Contains server information. When none of the [drop_for_*] fields are true,
|
||||
// use the other fields. When drop_for_rate_limiting is true, ignore all other
|
||||
// fields. Use drop_for_load_balancing only when it is true and
|
||||
// drop_for_rate_limiting is false.
|
||||
// Contains server information. When the drop field is not true, use the other
|
||||
// fields.
|
||||
message Server {
|
||||
// A resolved address for the server, serialized in network-byte-order. It may
|
||||
// either be an IPv4 or IPv6 address.
|
||||
@@ -140,16 +120,14 @@ message Server {
|
||||
// An opaque but printable token given to the frontend for each pick. All
|
||||
// frontend requests for that pick must include the token in its initial
|
||||
// metadata. The token is used by the backend to verify the request and to
|
||||
// allow the backend to report load to the gRPC LB system.
|
||||
//
|
||||
// Its length is variable but less than 50 bytes.
|
||||
// allow the backend to report load to the gRPC LB system. The token is also
|
||||
// used in client stats for reporting dropped calls.
|
||||
string load_balance_token = 3;
|
||||
|
||||
// Indicates whether this particular request should be dropped by the client
|
||||
// for rate limiting.
|
||||
bool drop_for_rate_limiting = 4;
|
||||
// Indicates whether this particular request should be dropped by the client.
|
||||
// If the request is dropped, there will be a corresponding entry in
|
||||
// ClientStats.calls_finished_with_drop.
|
||||
bool drop = 4;
|
||||
|
||||
// Indicates whether this particular request should be dropped by the client
|
||||
// for load balancing.
|
||||
bool drop_for_load_balancing = 5;
|
||||
reserved 5;
|
||||
}
|
||||
|
||||
3
vendor/google.golang.org/grpc/grpclog/grpclog.go
generated
vendored
3
vendor/google.golang.org/grpc/grpclog/grpclog.go
generated
vendored
@@ -105,18 +105,21 @@ func Fatalln(args ...interface{}) {
|
||||
}
|
||||
|
||||
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
||||
//
|
||||
// Deprecated: use Info.
|
||||
func Print(args ...interface{}) {
|
||||
logger.Info(args...)
|
||||
}
|
||||
|
||||
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||
//
|
||||
// Deprecated: use Infof.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||
//
|
||||
// Deprecated: use Infoln.
|
||||
func Println(args ...interface{}) {
|
||||
logger.Infoln(args...)
|
||||
|
||||
2
vendor/google.golang.org/grpc/grpclog/logger.go
generated
vendored
2
vendor/google.golang.org/grpc/grpclog/logger.go
generated
vendored
@@ -19,6 +19,7 @@
|
||||
package grpclog
|
||||
|
||||
// Logger mimics golang's standard Logger as an interface.
|
||||
//
|
||||
// Deprecated: use LoggerV2.
|
||||
type Logger interface {
|
||||
Fatal(args ...interface{})
|
||||
@@ -31,6 +32,7 @@ type Logger interface {
|
||||
|
||||
// SetLogger sets the logger that is used in grpc. Call only from
|
||||
// init() functions.
|
||||
//
|
||||
// Deprecated: use SetLoggerV2.
|
||||
func SetLogger(l Logger) {
|
||||
logger = &loggerWrapper{Logger: l}
|
||||
|
||||
2
vendor/google.golang.org/grpc/health/BUILD
generated
vendored
2
vendor/google.golang.org/grpc/health/BUILD
generated
vendored
@@ -8,9 +8,9 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/health/grpc_health_v1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
113
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
113
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
@@ -1,16 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: grpc_health_v1/health.proto
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
/*
|
||||
Package grpc_health_v1 is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
grpc_health_v1/health.proto
|
||||
|
||||
It has these top-level messages:
|
||||
HealthCheckRequest
|
||||
HealthCheckResponse
|
||||
*/
|
||||
package grpc_health_v1
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
@@ -56,17 +46,39 @@ func (x HealthCheckResponse_ServingStatus) String() string {
|
||||
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
|
||||
}
|
||||
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{1, 0}
|
||||
return fileDescriptor_health_85731b6c49265086, []int{1, 0}
|
||||
}
|
||||
|
||||
type HealthCheckRequest struct {
|
||||
Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
|
||||
Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
|
||||
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*HealthCheckRequest) ProtoMessage() {}
|
||||
func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_health_85731b6c49265086, []int{0}
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_HealthCheckRequest.Size(m)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *HealthCheckRequest) GetService() string {
|
||||
if m != nil {
|
||||
@@ -76,13 +88,35 @@ func (m *HealthCheckRequest) GetService() string {
|
||||
}
|
||||
|
||||
type HealthCheckResponse struct {
|
||||
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
||||
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
|
||||
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*HealthCheckResponse) ProtoMessage() {}
|
||||
func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_health_85731b6c49265086, []int{1}
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_HealthCheckResponse.Size(m)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
|
||||
if m != nil {
|
||||
@@ -105,8 +139,9 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Health service
|
||||
|
||||
// HealthClient is the client API for Health service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type HealthClient interface {
|
||||
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
|
||||
}
|
||||
@@ -121,15 +156,14 @@ func NewHealthClient(cc *grpc.ClientConn) HealthClient {
|
||||
|
||||
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
|
||||
out := new(HealthCheckResponse)
|
||||
err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for Health service
|
||||
|
||||
// HealthServer is the server API for Health service.
|
||||
type HealthServer interface {
|
||||
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
|
||||
}
|
||||
@@ -166,25 +200,28 @@ var _Health_serviceDesc = grpc.ServiceDesc{
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "grpc_health_v1/health.proto",
|
||||
Metadata: "grpc/health/v1/health.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor0) }
|
||||
func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_85731b6c49265086) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 213 bytes of a gzipped FileDescriptorProto
|
||||
var fileDescriptor_health_85731b6c49265086 = []byte{
|
||||
// 271 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
|
||||
0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a,
|
||||
0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21,
|
||||
0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48,
|
||||
0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33,
|
||||
0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55,
|
||||
0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f,
|
||||
0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41,
|
||||
0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3,
|
||||
0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85,
|
||||
0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b,
|
||||
0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44,
|
||||
0xb8, 0x36, 0x89, 0x0d, 0x1c, 0x82, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x53, 0x2b, 0x65,
|
||||
0x20, 0x60, 0x01, 0x00, 0x00,
|
||||
0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
|
||||
0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
|
||||
0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
|
||||
0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
|
||||
0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
|
||||
0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
|
||||
0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
|
||||
0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
|
||||
0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
|
||||
0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, 0x84,
|
||||
0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, 0xb8,
|
||||
0xd6, 0x29, 0x91, 0x4b, 0x30, 0x33, 0x1f, 0x4d, 0xa1, 0x13, 0x37, 0x44, 0x65, 0x00, 0x28, 0x70,
|
||||
0x03, 0x18, 0xa3, 0x74, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xd2, 0xf3, 0x73, 0x12, 0xf3,
|
||||
0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0x91, 0x63, 0x03, 0xc4, 0x8e, 0x87, 0xb0, 0xe3, 0xcb, 0x0c,
|
||||
0x57, 0x31, 0xf1, 0xb9, 0x83, 0x4c, 0x83, 0x18, 0xa1, 0x17, 0x66, 0x98, 0xc4, 0x06, 0x8e, 0x24,
|
||||
0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xec, 0x66, 0x81, 0xcb, 0xc3, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
34
vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto
generated
vendored
34
vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto
generated
vendored
@@ -1,34 +0,0 @@
|
||||
// Copyright 2017 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package grpc.health.v1;
|
||||
|
||||
message HealthCheckRequest {
|
||||
string service = 1;
|
||||
}
|
||||
|
||||
message HealthCheckResponse {
|
||||
enum ServingStatus {
|
||||
UNKNOWN = 0;
|
||||
SERVING = 1;
|
||||
NOT_SERVING = 2;
|
||||
}
|
||||
ServingStatus status = 1;
|
||||
}
|
||||
|
||||
service Health{
|
||||
rpc Check(HealthCheckRequest) returns (HealthCheckResponse);
|
||||
}
|
||||
6
vendor/google.golang.org/grpc/health/health.go
generated
vendored
6
vendor/google.golang.org/grpc/health/health.go
generated
vendored
@@ -16,7 +16,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
//go:generate protoc --go_out=plugins=grpc:. grpc_health_v1/health.proto
|
||||
//go:generate ./regenerate.sh
|
||||
|
||||
// Package health provides some utility functions to health-check a server. The implementation
|
||||
// is based on protobuf. Users need to write their own implementations if other IDLs are used.
|
||||
@@ -26,9 +26,9 @@ import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Server implements `service Health`.
|
||||
@@ -60,7 +60,7 @@ func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*h
|
||||
Status: status,
|
||||
}, nil
|
||||
}
|
||||
return nil, grpc.Errorf(codes.NotFound, "unknown service")
|
||||
return nil, status.Error(codes.NotFound, "unknown service")
|
||||
}
|
||||
|
||||
// SetServingStatus is called when need to reset the serving status of a service
|
||||
|
||||
33
vendor/google.golang.org/grpc/health/regenerate.sh
generated
vendored
Executable file
33
vendor/google.golang.org/grpc/health/regenerate.sh
generated
vendored
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2018 gRPC authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -eux -o pipefail
|
||||
|
||||
TMP=$(mktemp -d)
|
||||
|
||||
function finish {
|
||||
rm -rf "$TMP"
|
||||
}
|
||||
trap finish EXIT
|
||||
|
||||
pushd "$TMP"
|
||||
mkdir -p grpc/health/v1
|
||||
curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto
|
||||
|
||||
protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto
|
||||
popd
|
||||
rm -f grpc_health_v1/*.pb.go
|
||||
cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/
|
||||
|
||||
4
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
4
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
@@ -48,7 +48,9 @@ type UnaryServerInfo struct {
|
||||
}
|
||||
|
||||
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
||||
// execution of a unary RPC.
|
||||
// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
|
||||
// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
|
||||
// the status message of the RPC.
|
||||
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
||||
|
||||
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||
|
||||
7
vendor/google.golang.org/grpc/internal/BUILD
generated
vendored
7
vendor/google.golang.org/grpc/internal/BUILD
generated
vendored
@@ -17,7 +17,12 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/google.golang.org/grpc/internal/backoff:all-srcs",
|
||||
"//vendor/google.golang.org/grpc/internal/channelz:all-srcs",
|
||||
"//vendor/google.golang.org/grpc/internal/grpcrand:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
24
vendor/google.golang.org/grpc/internal/backoff/BUILD
generated
vendored
Normal file
24
vendor/google.golang.org/grpc/internal/backoff/BUILD
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["backoff.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc/internal/backoff",
|
||||
importpath = "google.golang.org/grpc/internal/backoff",
|
||||
visibility = ["//vendor/google.golang.org/grpc:__subpackages__"],
|
||||
deps = ["//vendor/google.golang.org/grpc/internal/grpcrand:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
78
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
Normal file
78
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package backoff implement the backoff strategy for gRPC.
|
||||
//
|
||||
// This is kept in internal until the gRPC project decides whether or not to
|
||||
// allow alternative backoff strategies.
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
)
|
||||
|
||||
// Strategy defines the methodology for backing off after a grpc connection
|
||||
// failure.
|
||||
//
|
||||
type Strategy interface {
|
||||
// Backoff returns the amount of time to wait before the next retry given
|
||||
// the number of consecutive failures.
|
||||
Backoff(retries int) time.Duration
|
||||
}
|
||||
|
||||
const (
|
||||
// baseDelay is the amount of time to wait before retrying after the first
|
||||
// failure.
|
||||
baseDelay = 1.0 * time.Second
|
||||
// factor is applied to the backoff after each retry.
|
||||
factor = 1.6
|
||||
// jitter provides a range to randomize backoff delays.
|
||||
jitter = 0.2
|
||||
)
|
||||
|
||||
// Exponential implements exponential backoff algorithm as defined in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
type Exponential struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
}
|
||||
|
||||
// Backoff returns the amount of time to wait before the next retry given the
|
||||
// number of retries.
|
||||
func (bc Exponential) Backoff(retries int) time.Duration {
|
||||
if retries == 0 {
|
||||
return baseDelay
|
||||
}
|
||||
backoff, max := float64(baseDelay), float64(bc.MaxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= factor
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
||||
30
vendor/google.golang.org/grpc/internal/channelz/BUILD
generated
vendored
Normal file
30
vendor/google.golang.org/grpc/internal/channelz/BUILD
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"funcs.go",
|
||||
"types.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc/internal/channelz",
|
||||
importpath = "google.golang.org/grpc/internal/channelz",
|
||||
visibility = ["//vendor/google.golang.org/grpc:__subpackages__"],
|
||||
deps = [
|
||||
"//vendor/google.golang.org/grpc/connectivity:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
573
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
Normal file
573
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
Normal file
@@ -0,0 +1,573 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package channelz defines APIs for enabling channelz service, entry
|
||||
// registration/deletion, and accessing channelz data. It also defines channelz
|
||||
// metric struct formats.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var (
|
||||
db dbWrapper
|
||||
idGen idGenerator
|
||||
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
||||
EntryPerPage = 50
|
||||
curState int32
|
||||
)
|
||||
|
||||
// TurnOn turns on channelz data collection.
|
||||
func TurnOn() {
|
||||
if !IsOn() {
|
||||
NewChannelzStorage()
|
||||
atomic.StoreInt32(&curState, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// IsOn returns whether channelz data collection is on.
|
||||
func IsOn() bool {
|
||||
return atomic.CompareAndSwapInt32(&curState, 1, 1)
|
||||
}
|
||||
|
||||
// dbWarpper wraps around a reference to internal channelz data storage, and
|
||||
// provide synchronized functionality to set and get the reference.
|
||||
type dbWrapper struct {
|
||||
mu sync.RWMutex
|
||||
DB *channelMap
|
||||
}
|
||||
|
||||
func (d *dbWrapper) set(db *channelMap) {
|
||||
d.mu.Lock()
|
||||
d.DB = db
|
||||
d.mu.Unlock()
|
||||
}
|
||||
|
||||
func (d *dbWrapper) get() *channelMap {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
return d.DB
|
||||
}
|
||||
|
||||
// NewChannelzStorage initializes channelz data storage and id generator.
|
||||
//
|
||||
// Note: This function is exported for testing purpose only. User should not call
|
||||
// it in most cases.
|
||||
func NewChannelzStorage() {
|
||||
db.set(&channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*channel),
|
||||
listenSockets: make(map[int64]*listenSocket),
|
||||
normalSockets: make(map[int64]*normalSocket),
|
||||
servers: make(map[int64]*server),
|
||||
subChannels: make(map[int64]*subChannel),
|
||||
})
|
||||
idGen.reset()
|
||||
}
|
||||
|
||||
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
|
||||
// boolean indicating whether there's more top channels to be queried for.
|
||||
//
|
||||
// The arg id specifies that only top channel with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of EntryPerPage, and is
|
||||
// sorted in ascending id order.
|
||||
func GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
||||
return db.get().GetTopChannels(id)
|
||||
}
|
||||
|
||||
// GetServers returns a slice of server's ServerMetric, along with a
|
||||
// boolean indicating whether there's more servers to be queried for.
|
||||
//
|
||||
// The arg id specifies that only server with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of EntryPerPage, and is
|
||||
// sorted in ascending id order.
|
||||
func GetServers(id int64) ([]*ServerMetric, bool) {
|
||||
return db.get().GetServers(id)
|
||||
}
|
||||
|
||||
// GetServerSockets returns a slice of server's (identified by id) normal socket's
|
||||
// SocketMetric, along with a boolean indicating whether there's more sockets to
|
||||
// be queried for.
|
||||
//
|
||||
// The arg startID specifies that only sockets with id at or above it will be
|
||||
// included in the result. The returned slice is up to a length of EntryPerPage,
|
||||
// and is sorted in ascending id order.
|
||||
func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
|
||||
return db.get().GetServerSockets(id, startID)
|
||||
}
|
||||
|
||||
// GetChannel returns the ChannelMetric for the channel (identified by id).
|
||||
func GetChannel(id int64) *ChannelMetric {
|
||||
return db.get().GetChannel(id)
|
||||
}
|
||||
|
||||
// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
|
||||
func GetSubChannel(id int64) *SubChannelMetric {
|
||||
return db.get().GetSubChannel(id)
|
||||
}
|
||||
|
||||
// GetSocket returns the SocketInternalMetric for the socket (identified by id).
|
||||
func GetSocket(id int64) *SocketMetric {
|
||||
return db.get().GetSocket(id)
|
||||
}
|
||||
|
||||
// RegisterChannel registers the given channel c in channelz database with ref
|
||||
// as its reference name, and add it to the child list of its parent (identified
|
||||
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
|
||||
// assigned to this channel.
|
||||
func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
||||
id := idGen.genID()
|
||||
cn := &channel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
subChans: make(map[int64]string),
|
||||
nestedChans: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
}
|
||||
if pid == 0 {
|
||||
db.get().addChannel(id, cn, true, pid, ref)
|
||||
} else {
|
||||
db.get().addChannel(id, cn, false, pid, ref)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterSubChannel registers the given channel c in channelz database with ref
|
||||
// as its reference name, and add it to the child list of its parent (identified
|
||||
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
|
||||
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a SubChannel's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
sc := &subChannel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
sockets: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
}
|
||||
db.get().addSubChannel(id, sc, pid, ref)
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterServer registers the given server s in channelz database. It returns
|
||||
// the unique channelz tracking id assigned to this server.
|
||||
func RegisterServer(s Server, ref string) int64 {
|
||||
id := idGen.genID()
|
||||
svr := &server{
|
||||
refName: ref,
|
||||
s: s,
|
||||
sockets: make(map[int64]string),
|
||||
listenSockets: make(map[int64]string),
|
||||
id: id,
|
||||
}
|
||||
db.get().addServer(id, svr)
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterListenSocket registers the given listen socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this listen socket.
|
||||
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a ListenSocket's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
|
||||
db.get().addListenSocket(id, ls, pid, ref)
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterNormalSocket registers the given normal socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this normal socket.
|
||||
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a NormalSocket's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
|
||||
db.get().addNormalSocket(id, ns, pid, ref)
|
||||
return id
|
||||
}
|
||||
|
||||
// RemoveEntry removes an entry with unique channelz trakcing id to be id from
|
||||
// channelz database.
|
||||
func RemoveEntry(id int64) {
|
||||
db.get().removeEntry(id)
|
||||
}
|
||||
|
||||
// channelMap is the storage data structure for channelz.
|
||||
// Methods of channelMap can be divided in two two categories with respect to locking.
|
||||
// 1. Methods acquire the global lock.
|
||||
// 2. Methods that can only be called when global lock is held.
|
||||
// A second type of method need always to be called inside a first type of method.
|
||||
type channelMap struct {
|
||||
mu sync.RWMutex
|
||||
topLevelChannels map[int64]struct{}
|
||||
servers map[int64]*server
|
||||
channels map[int64]*channel
|
||||
subChannels map[int64]*subChannel
|
||||
listenSockets map[int64]*listenSocket
|
||||
normalSockets map[int64]*normalSocket
|
||||
}
|
||||
|
||||
func (c *channelMap) addServer(id int64, s *server) {
|
||||
c.mu.Lock()
|
||||
s.cm = c
|
||||
c.servers[id] = s
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
cn.cm = c
|
||||
c.channels[id] = cn
|
||||
if isTopChannel {
|
||||
c.topLevelChannels[id] = struct{}{}
|
||||
} else {
|
||||
c.findEntry(pid).addChild(id, cn)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
sc.cm = c
|
||||
c.subChannels[id] = sc
|
||||
c.findEntry(pid).addChild(id, sc)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
ls.cm = c
|
||||
c.listenSockets[id] = ls
|
||||
c.findEntry(pid).addChild(id, ls)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
ns.cm = c
|
||||
c.normalSockets[id] = ns
|
||||
c.findEntry(pid).addChild(id, ns)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// removeEntry triggers the removal of an entry, which may not indeed delete the
|
||||
// entry, if it has to wait on the deletion of its children, or may lead to a chain
|
||||
// of entry deletion. For example, deleting the last socket of a gracefully shutting
|
||||
// down server will lead to the server being also deleted.
|
||||
func (c *channelMap) removeEntry(id int64) {
|
||||
c.mu.Lock()
|
||||
c.findEntry(id).triggerDelete()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller.
|
||||
func (c *channelMap) findEntry(id int64) entry {
|
||||
var v entry
|
||||
var ok bool
|
||||
if v, ok = c.channels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.subChannels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.servers[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.listenSockets[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.normalSockets[id]; ok {
|
||||
return v
|
||||
}
|
||||
return &dummyEntry{idNotFound: id}
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller
|
||||
// deleteEntry simply deletes an entry from the channelMap. Before calling this
|
||||
// method, caller must check this entry is ready to be deleted, i.e removeEntry()
|
||||
// has been called on it, and no children still exist.
|
||||
// Conditionals are ordered by the expected frequency of deletion of each entity
|
||||
// type, in order to optimize performance.
|
||||
func (c *channelMap) deleteEntry(id int64) {
|
||||
var ok bool
|
||||
if _, ok = c.normalSockets[id]; ok {
|
||||
delete(c.normalSockets, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.subChannels[id]; ok {
|
||||
delete(c.subChannels, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.channels[id]; ok {
|
||||
delete(c.channels, id)
|
||||
delete(c.topLevelChannels, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.listenSockets[id]; ok {
|
||||
delete(c.listenSockets, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.servers[id]; ok {
|
||||
delete(c.servers, id)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
type int64Slice []int64
|
||||
|
||||
func (s int64Slice) Len() int { return len(s) }
|
||||
func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
func copyMap(m map[int64]string) map[int64]string {
|
||||
n := make(map[int64]string)
|
||||
for k, v := range m {
|
||||
n[k] = v
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
||||
c.mu.RLock()
|
||||
l := len(c.topLevelChannels)
|
||||
ids := make([]int64, 0, l)
|
||||
cns := make([]*channel, 0, min(l, EntryPerPage))
|
||||
|
||||
for k := range c.topLevelChannels {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := 0
|
||||
var end bool
|
||||
var t []*ChannelMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == EntryPerPage {
|
||||
break
|
||||
}
|
||||
if cn, ok := c.channels[v]; ok {
|
||||
cns = append(cns, cn)
|
||||
t = append(t, &ChannelMetric{
|
||||
NestedChans: copyMap(cn.nestedChans),
|
||||
SubChans: copyMap(cn.subChans),
|
||||
})
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
|
||||
for i, cn := range cns {
|
||||
t[i].ChannelData = cn.c.ChannelzMetric()
|
||||
t[i].ID = cn.id
|
||||
t[i].RefName = cn.refName
|
||||
}
|
||||
return t, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
|
||||
c.mu.RLock()
|
||||
l := len(c.servers)
|
||||
ids := make([]int64, 0, l)
|
||||
ss := make([]*server, 0, min(l, EntryPerPage))
|
||||
for k := range c.servers {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := 0
|
||||
var end bool
|
||||
var s []*ServerMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == EntryPerPage {
|
||||
break
|
||||
}
|
||||
if svr, ok := c.servers[v]; ok {
|
||||
ss = append(ss, svr)
|
||||
s = append(s, &ServerMetric{
|
||||
ListenSockets: copyMap(svr.listenSockets),
|
||||
})
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
|
||||
for i, svr := range ss {
|
||||
s[i].ServerData = svr.s.ChannelzMetric()
|
||||
s[i].ID = svr.id
|
||||
s[i].RefName = svr.refName
|
||||
}
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
|
||||
var svr *server
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if svr, ok = c.servers[id]; !ok {
|
||||
// server with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil, true
|
||||
}
|
||||
svrskts := svr.sockets
|
||||
l := len(svrskts)
|
||||
ids := make([]int64, 0, l)
|
||||
sks := make([]*normalSocket, 0, min(l, EntryPerPage))
|
||||
for k := range svrskts {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort((int64Slice(ids)))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := 0
|
||||
var end bool
|
||||
for i, v := range ids[idx:] {
|
||||
if count == EntryPerPage {
|
||||
break
|
||||
}
|
||||
if ns, ok := c.normalSockets[v]; ok {
|
||||
sks = append(sks, ns)
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
var s []*SocketMetric
|
||||
for _, ns := range sks {
|
||||
sm := &SocketMetric{}
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
sm.ID = ns.id
|
||||
sm.RefName = ns.refName
|
||||
s = append(s, sm)
|
||||
}
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetChannel(id int64) *ChannelMetric {
|
||||
cm := &ChannelMetric{}
|
||||
var cn *channel
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if cn, ok = c.channels[id]; !ok {
|
||||
// channel with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
cm.NestedChans = copyMap(cn.nestedChans)
|
||||
cm.SubChans = copyMap(cn.subChans)
|
||||
c.mu.RUnlock()
|
||||
cm.ChannelData = cn.c.ChannelzMetric()
|
||||
cm.ID = cn.id
|
||||
cm.RefName = cn.refName
|
||||
return cm
|
||||
}
|
||||
|
||||
func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
|
||||
cm := &SubChannelMetric{}
|
||||
var sc *subChannel
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if sc, ok = c.subChannels[id]; !ok {
|
||||
// subchannel with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
cm.Sockets = copyMap(sc.sockets)
|
||||
c.mu.RUnlock()
|
||||
cm.ChannelData = sc.c.ChannelzMetric()
|
||||
cm.ID = sc.id
|
||||
cm.RefName = sc.refName
|
||||
return cm
|
||||
}
|
||||
|
||||
func (c *channelMap) GetSocket(id int64) *SocketMetric {
|
||||
sm := &SocketMetric{}
|
||||
c.mu.RLock()
|
||||
if ls, ok := c.listenSockets[id]; ok {
|
||||
c.mu.RUnlock()
|
||||
sm.SocketData = ls.s.ChannelzMetric()
|
||||
sm.ID = ls.id
|
||||
sm.RefName = ls.refName
|
||||
return sm
|
||||
}
|
||||
if ns, ok := c.normalSockets[id]; ok {
|
||||
c.mu.RUnlock()
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
sm.ID = ns.id
|
||||
sm.RefName = ns.refName
|
||||
return sm
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
type idGenerator struct {
|
||||
id int64
|
||||
}
|
||||
|
||||
func (i *idGenerator) reset() {
|
||||
atomic.StoreInt64(&i.id, 0)
|
||||
}
|
||||
|
||||
func (i *idGenerator) genID() int64 {
|
||||
return atomic.AddInt64(&i.id, 1)
|
||||
}
|
||||
418
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
Normal file
418
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
Normal file
@@ -0,0 +1,418 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// entry represents a node in the channelz database.
|
||||
type entry interface {
|
||||
// addChild adds a child e, whose channelz id is id to child list
|
||||
addChild(id int64, e entry)
|
||||
// deleteChild deletes a child with channelz id to be id from child list
|
||||
deleteChild(id int64)
|
||||
// triggerDelete tries to delete self from channelz database. However, if child
|
||||
// list is not empty, then deletion from the database is on hold until the last
|
||||
// child is deleted from database.
|
||||
triggerDelete()
|
||||
// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
|
||||
// list is now empty. If both conditions are met, then delete self from database.
|
||||
deleteSelfIfReady()
|
||||
}
|
||||
|
||||
// dummyEntry is a fake entry to handle entry not found case.
|
||||
type dummyEntry struct {
|
||||
idNotFound int64
|
||||
}
|
||||
|
||||
func (d *dummyEntry) addChild(id int64, e entry) {
|
||||
// Note: It is possible for a normal program to reach here under race condition.
|
||||
// For example, there could be a race between ClientConn.Close() info being propagated
|
||||
// to addrConn and http2Client. ClientConn.Close() cancel the context and result
|
||||
// in http2Client to error. The error info is then caught by transport monitor
|
||||
// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
|
||||
// the addrConn will create a new transport. And when registering the new transport in
|
||||
// channelz, its parent addrConn could have already been torn down and deleted
|
||||
// from channelz tracking, and thus reach the code here.
|
||||
grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) deleteChild(id int64) {
|
||||
// It is possible for a normal program to reach here under race condition.
|
||||
// Refer to the example described in addChild().
|
||||
grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) triggerDelete() {
|
||||
grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
|
||||
}
|
||||
|
||||
func (*dummyEntry) deleteSelfIfReady() {
|
||||
// code should not reach here. deleteSelfIfReady is always called on an existing entry.
|
||||
}
|
||||
|
||||
// ChannelMetric defines the info channelz provides for a specific Channel, which
|
||||
// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
type ChannelMetric struct {
|
||||
// ID is the channelz id of this channel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this channel.
|
||||
RefName string
|
||||
// ChannelData contains channel internal metric reported by the channel through
|
||||
// ChannelzMetric().
|
||||
ChannelData *ChannelInternalMetric
|
||||
// NestedChans tracks the nested channel type children of this channel in the format of
|
||||
// a map from nested channel channelz id to corresponding reference string.
|
||||
NestedChans map[int64]string
|
||||
// SubChans tracks the subchannel type children of this channel in the format of a
|
||||
// map from subchannel channelz id to corresponding reference string.
|
||||
SubChans map[int64]string
|
||||
// Sockets tracks the socket type children of this channel in the format of a map
|
||||
// from socket channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow channel having sockets directly,
|
||||
// therefore, this is field is unused.
|
||||
Sockets map[int64]string
|
||||
}
|
||||
|
||||
// SubChannelMetric defines the info channelz provides for a specific SubChannel,
|
||||
// which includes ChannelInternalMetric and channelz-specific data, such as
|
||||
// channelz id, child list, etc.
|
||||
type SubChannelMetric struct {
|
||||
// ID is the channelz id of this subchannel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this subchannel.
|
||||
RefName string
|
||||
// ChannelData contains subchannel internal metric reported by the subchannel
|
||||
// through ChannelzMetric().
|
||||
ChannelData *ChannelInternalMetric
|
||||
// NestedChans tracks the nested channel type children of this subchannel in the format of
|
||||
// a map from nested channel channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow subchannel to have nested channels
|
||||
// as children, therefore, this field is unused.
|
||||
NestedChans map[int64]string
|
||||
// SubChans tracks the subchannel type children of this subchannel in the format of a
|
||||
// map from subchannel channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow subchannel to have subchannels
|
||||
// as children, therefore, this field is unused.
|
||||
SubChans map[int64]string
|
||||
// Sockets tracks the socket type children of this subchannel in the format of a map
|
||||
// from socket channelz id to corresponding reference string.
|
||||
Sockets map[int64]string
|
||||
}
|
||||
|
||||
// ChannelInternalMetric defines the struct that the implementor of Channel interface
|
||||
// should return from ChannelzMetric().
|
||||
type ChannelInternalMetric struct {
|
||||
// current connectivity state of the channel.
|
||||
State connectivity.State
|
||||
// The target this channel originally tried to connect to. May be absent
|
||||
Target string
|
||||
// The number of calls started on the channel.
|
||||
CallsStarted int64
|
||||
// The number of calls that have completed with an OK status.
|
||||
CallsSucceeded int64
|
||||
// The number of calls that have a completed with a non-OK status.
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the channel.
|
||||
LastCallStartedTimestamp time.Time
|
||||
//TODO: trace
|
||||
}
|
||||
|
||||
// Channel is the interface that should be satisfied in order to be tracked by
|
||||
// channelz as Channel or SubChannel.
|
||||
type Channel interface {
|
||||
ChannelzMetric() *ChannelInternalMetric
|
||||
}
|
||||
|
||||
type channel struct {
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
nestedChans map[int64]string
|
||||
subChans map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (c *channel) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *subChannel:
|
||||
c.subChans[id] = v.refName
|
||||
case *channel:
|
||||
c.nestedChans[id] = v.refName
|
||||
default:
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channel) deleteChild(id int64) {
|
||||
delete(c.subChans, id)
|
||||
delete(c.nestedChans, id)
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *channel) triggerDelete() {
|
||||
c.closeCalled = true
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *channel) deleteSelfIfReady() {
|
||||
if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
|
||||
return
|
||||
}
|
||||
c.cm.deleteEntry(c.id)
|
||||
// not top channel
|
||||
if c.pid != 0 {
|
||||
c.cm.findEntry(c.pid).deleteChild(c.id)
|
||||
}
|
||||
}
|
||||
|
||||
type subChannel struct {
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (sc *subChannel) addChild(id int64, e entry) {
|
||||
if v, ok := e.(*normalSocket); ok {
|
||||
sc.sockets[id] = v.refName
|
||||
} else {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *subChannel) deleteChild(id int64) {
|
||||
delete(sc.sockets, id)
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *subChannel) triggerDelete() {
|
||||
sc.closeCalled = true
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *subChannel) deleteSelfIfReady() {
|
||||
if !sc.closeCalled || len(sc.sockets) != 0 {
|
||||
return
|
||||
}
|
||||
sc.cm.deleteEntry(sc.id)
|
||||
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
|
||||
}
|
||||
|
||||
// SocketMetric defines the info channelz provides for a specific Socket, which
|
||||
// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
|
||||
type SocketMetric struct {
|
||||
// ID is the channelz id of this socket.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this socket.
|
||||
RefName string
|
||||
// SocketData contains socket internal metric reported by the socket through
|
||||
// ChannelzMetric().
|
||||
SocketData *SocketInternalMetric
|
||||
}
|
||||
|
||||
// SocketInternalMetric defines the struct that the implementor of Socket interface
|
||||
// should return from ChannelzMetric().
|
||||
type SocketInternalMetric struct {
|
||||
// The number of streams that have been started.
|
||||
StreamsStarted int64
|
||||
// The number of streams that have ended successfully:
|
||||
// On client side, receiving frame with eos bit set.
|
||||
// On server side, sending frame with eos bit set.
|
||||
StreamsSucceeded int64
|
||||
// The number of streams that have ended unsuccessfully:
|
||||
// On client side, termination without receiving frame with eos bit set.
|
||||
// On server side, termination without sending frame with eos bit set.
|
||||
StreamsFailed int64
|
||||
// The number of messages successfully sent on this socket.
|
||||
MessagesSent int64
|
||||
MessagesReceived int64
|
||||
// The number of keep alives sent. This is typically implemented with HTTP/2
|
||||
// ping messages.
|
||||
KeepAlivesSent int64
|
||||
// The last time a stream was created by this endpoint. Usually unset for
|
||||
// servers.
|
||||
LastLocalStreamCreatedTimestamp time.Time
|
||||
// The last time a stream was created by the remote endpoint. Usually unset
|
||||
// for clients.
|
||||
LastRemoteStreamCreatedTimestamp time.Time
|
||||
// The last time a message was sent by this endpoint.
|
||||
LastMessageSentTimestamp time.Time
|
||||
// The last time a message was received by this endpoint.
|
||||
LastMessageReceivedTimestamp time.Time
|
||||
// The amount of window, granted to the local endpoint by the remote endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
LocalFlowControlWindow int64
|
||||
// The amount of window, granted to the remote endpoint by the local endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
RemoteFlowControlWindow int64
|
||||
// The locally bound address.
|
||||
LocalAddr net.Addr
|
||||
// The remote bound address. May be absent.
|
||||
RemoteAddr net.Addr
|
||||
// Optional, represents the name of the remote endpoint, if different than
|
||||
// the original target name.
|
||||
RemoteName string
|
||||
//TODO: socket options
|
||||
//TODO: Security
|
||||
}
|
||||
|
||||
// Socket is the interface that should be satisfied in order to be tracked by
|
||||
// channelz as Socket.
|
||||
type Socket interface {
|
||||
ChannelzMetric() *SocketInternalMetric
|
||||
}
|
||||
|
||||
type listenSocket struct {
|
||||
refName string
|
||||
s Socket
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (ls *listenSocket) addChild(id int64, e entry) {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteChild(id int64) {
|
||||
grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) triggerDelete() {
|
||||
ls.cm.deleteEntry(ls.id)
|
||||
ls.cm.findEntry(ls.pid).deleteChild(ls.id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteSelfIfReady() {
|
||||
grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||
}
|
||||
|
||||
type normalSocket struct {
|
||||
refName string
|
||||
s Socket
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (ns *normalSocket) addChild(id int64, e entry) {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteChild(id int64) {
|
||||
grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) triggerDelete() {
|
||||
ns.cm.deleteEntry(ns.id)
|
||||
ns.cm.findEntry(ns.pid).deleteChild(ns.id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteSelfIfReady() {
|
||||
grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||
}
|
||||
|
||||
// ServerMetric defines the info channelz provides for a specific Server, which
|
||||
// includes ServerInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
type ServerMetric struct {
|
||||
// ID is the channelz id of this server.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this server.
|
||||
RefName string
|
||||
// ServerData contains server internal metric reported by the server through
|
||||
// ChannelzMetric().
|
||||
ServerData *ServerInternalMetric
|
||||
// ListenSockets tracks the listener socket type children of this server in the
|
||||
// format of a map from socket channelz id to corresponding reference string.
|
||||
ListenSockets map[int64]string
|
||||
}
|
||||
|
||||
// ServerInternalMetric defines the struct that the implementor of Server interface
|
||||
// should return from ChannelzMetric().
|
||||
type ServerInternalMetric struct {
|
||||
// The number of incoming calls started on the server.
|
||||
CallsStarted int64
|
||||
// The number of incoming calls that have completed with an OK status.
|
||||
CallsSucceeded int64
|
||||
// The number of incoming calls that have a completed with a non-OK status.
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the server.
|
||||
LastCallStartedTimestamp time.Time
|
||||
//TODO: trace
|
||||
}
|
||||
|
||||
// Server is the interface to be satisfied in order to be tracked by channelz as
|
||||
// Server.
|
||||
type Server interface {
|
||||
ChannelzMetric() *ServerInternalMetric
|
||||
}
|
||||
|
||||
type server struct {
|
||||
refName string
|
||||
s Server
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
listenSockets map[int64]string
|
||||
id int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (s *server) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *normalSocket:
|
||||
s.sockets[id] = v.refName
|
||||
case *listenSocket:
|
||||
s.listenSockets[id] = v.refName
|
||||
default:
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) deleteChild(id int64) {
|
||||
delete(s.sockets, id)
|
||||
delete(s.listenSockets, id)
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *server) triggerDelete() {
|
||||
s.closeCalled = true
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *server) deleteSelfIfReady() {
|
||||
if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
|
||||
return
|
||||
}
|
||||
s.cm.deleteEntry(s.id)
|
||||
}
|
||||
23
vendor/google.golang.org/grpc/internal/grpcrand/BUILD
generated
vendored
Normal file
23
vendor/google.golang.org/grpc/internal/grpcrand/BUILD
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["grpcrand.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc/internal/grpcrand",
|
||||
importpath = "google.golang.org/grpc/internal/grpcrand",
|
||||
visibility = ["//vendor/google.golang.org/grpc:__subpackages__"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
56
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
Normal file
56
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpcrand implements math/rand functions in a concurrent-safe way
|
||||
// with a global random source, independent of math/rand's global source.
|
||||
package grpcrand
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
r = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
// Int63n implements rand.Int63n on the grpcrand global source.
|
||||
func Int63n(n int64) int64 {
|
||||
mu.Lock()
|
||||
res := r.Int63n(n)
|
||||
mu.Unlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// Intn implements rand.Intn on the grpcrand global source.
|
||||
func Intn(n int) int {
|
||||
mu.Lock()
|
||||
res := r.Intn(n)
|
||||
mu.Unlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// Float64 implements rand.Float64 on the grpcrand global source.
|
||||
func Float64() float64 {
|
||||
mu.Lock()
|
||||
res := r.Float64()
|
||||
mu.Unlock()
|
||||
return res
|
||||
}
|
||||
30
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
30
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
@@ -15,20 +15,22 @@
|
||||
*
|
||||
*/
|
||||
|
||||
// Package internal contains gRPC-internal code for testing, to avoid polluting
|
||||
// the godoc of the top-level grpc package.
|
||||
// Package internal contains gRPC-internal code, to avoid polluting
|
||||
// the godoc of the top-level grpc package. It must not import any grpc
|
||||
// symbols to avoid circular dependencies.
|
||||
package internal
|
||||
|
||||
// TestingCloseConns closes all existing transports but keeps
|
||||
// grpcServer.lis accepting new connections.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
var TestingCloseConns func(grpcServer interface{})
|
||||
var (
|
||||
|
||||
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
|
||||
// It must be called before Serve and requires TLS credentials.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
var TestingUseHandlerImpl func(grpcServer interface{})
|
||||
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
|
||||
// It must be called before Serve and requires TLS credentials.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
TestingUseHandlerImpl func(grpcServer interface{})
|
||||
|
||||
// WithContextDialer is exported by clientconn.go
|
||||
WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption
|
||||
// WithResolverBuilder is exported by clientconn.go
|
||||
WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
|
||||
)
|
||||
|
||||
89
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
89
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
@@ -17,7 +17,8 @@
|
||||
*/
|
||||
|
||||
// Package metadata define the structure of the metadata supported by gRPC library.
|
||||
// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata.
|
||||
// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
||||
// for more information about custom-metadata.
|
||||
package metadata
|
||||
|
||||
import (
|
||||
@@ -27,7 +28,9 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used.
|
||||
// DecodeKeyValue returns k, v, nil.
|
||||
//
|
||||
// Deprecated: use k and v directly instead.
|
||||
func DecodeKeyValue(k, v string) (string, string, error) {
|
||||
return k, v, nil
|
||||
}
|
||||
@@ -94,6 +97,30 @@ func (md MD) Copy() MD {
|
||||
return Join(md)
|
||||
}
|
||||
|
||||
// Get obtains the values for a given key.
|
||||
func (md MD) Get(k string) []string {
|
||||
k = strings.ToLower(k)
|
||||
return md[k]
|
||||
}
|
||||
|
||||
// Set sets the value of a given key with a slice of values.
|
||||
func (md MD) Set(k string, vals ...string) {
|
||||
if len(vals) == 0 {
|
||||
return
|
||||
}
|
||||
k = strings.ToLower(k)
|
||||
md[k] = vals
|
||||
}
|
||||
|
||||
// Append adds the values to key k, not overwriting what was already stored at that key.
|
||||
func (md MD) Append(k string, vals ...string) {
|
||||
if len(vals) == 0 {
|
||||
return
|
||||
}
|
||||
k = strings.ToLower(k)
|
||||
md[k] = append(md[k], vals...)
|
||||
}
|
||||
|
||||
// Join joins any number of mds into a single MD.
|
||||
// The order of values for each key is determined by the order in which
|
||||
// the mds containing those values are presented to Join.
|
||||
@@ -115,9 +142,26 @@ func NewIncomingContext(ctx context.Context, md MD) context.Context {
|
||||
return context.WithValue(ctx, mdIncomingKey{}, md)
|
||||
}
|
||||
|
||||
// NewOutgoingContext creates a new context with outgoing md attached.
|
||||
// NewOutgoingContext creates a new context with outgoing md attached. If used
|
||||
// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
|
||||
// overwrite any previously-appended metadata.
|
||||
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
|
||||
return context.WithValue(ctx, mdOutgoingKey{}, md)
|
||||
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
|
||||
}
|
||||
|
||||
// AppendToOutgoingContext returns a new context with the provided kv merged
|
||||
// with any existing metadata in the context. Please refer to the
|
||||
// documentation of Pairs for a description of kv.
|
||||
func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
|
||||
if len(kv)%2 == 1 {
|
||||
panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
|
||||
}
|
||||
md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||
added := make([][]string, len(md.added)+1)
|
||||
copy(added, md.added)
|
||||
added[len(added)-1] = make([]string, len(kv))
|
||||
copy(added[len(added)-1], kv)
|
||||
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
|
||||
}
|
||||
|
||||
// FromIncomingContext returns the incoming metadata in ctx if it exists. The
|
||||
@@ -128,10 +172,39 @@ func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// FromOutgoingContextRaw returns the un-merged, intermediary contents
|
||||
// of rawMD. Remember to perform strings.ToLower on the keys. The returned
|
||||
// MD should not be modified. Writing to it may cause races. Modification
|
||||
// should be made to copies of the returned MD.
|
||||
//
|
||||
// This is intended for gRPC-internal use ONLY.
|
||||
func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
|
||||
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||
if !ok {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
return raw.md, raw.added, true
|
||||
}
|
||||
|
||||
// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
|
||||
// returned MD should not be modified. Writing to it may cause races.
|
||||
// Modification should be made to the copies of the returned MD.
|
||||
func FromOutgoingContext(ctx context.Context) (md MD, ok bool) {
|
||||
md, ok = ctx.Value(mdOutgoingKey{}).(MD)
|
||||
return
|
||||
// Modification should be made to copies of the returned MD.
|
||||
func FromOutgoingContext(ctx context.Context) (MD, bool) {
|
||||
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
mds := make([]MD, 0, len(raw.added)+1)
|
||||
mds = append(mds, raw.md)
|
||||
for _, vv := range raw.added {
|
||||
mds = append(mds, Pairs(vv...))
|
||||
}
|
||||
return Join(mds...), ok
|
||||
}
|
||||
|
||||
type rawMD struct {
|
||||
md MD
|
||||
added [][]string
|
||||
}
|
||||
|
||||
6
vendor/google.golang.org/grpc/naming/dns_resolver.go
generated
vendored
6
vendor/google.golang.org/grpc/naming/dns_resolver.go
generated
vendored
@@ -153,10 +153,10 @@ type ipWatcher struct {
|
||||
updateChan chan *Update
|
||||
}
|
||||
|
||||
// Next returns the adrress resolution Update for the target. For IP address,
|
||||
// the resolution is itself, thus polling name server is unncessary. Therefore,
|
||||
// Next returns the address resolution Update for the target. For IP address,
|
||||
// the resolution is itself, thus polling name server is unnecessary. Therefore,
|
||||
// Next() will return an Update the first time it is called, and will be blocked
|
||||
// for all following calls as no Update exisits until watcher is closed.
|
||||
// for all following calls as no Update exists until watcher is closed.
|
||||
func (i *ipWatcher) Next() ([]*Update, error) {
|
||||
u, ok := <-i.updateChan
|
||||
if !ok {
|
||||
|
||||
2
vendor/google.golang.org/grpc/naming/go17.go
generated
vendored
2
vendor/google.golang.org/grpc/naming/go17.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build go1.7, !go1.8
|
||||
// +build go1.6,!go1.8
|
||||
|
||||
/*
|
||||
*
|
||||
|
||||
12
vendor/google.golang.org/grpc/naming/naming.go
generated
vendored
12
vendor/google.golang.org/grpc/naming/naming.go
generated
vendored
@@ -18,20 +18,26 @@
|
||||
|
||||
// Package naming defines the naming API and related data structures for gRPC.
|
||||
// The interface is EXPERIMENTAL and may be suject to change.
|
||||
//
|
||||
// Deprecated: please use package resolver.
|
||||
package naming
|
||||
|
||||
// Operation defines the corresponding operations for a name resolution change.
|
||||
//
|
||||
// Deprecated: please use package resolver.
|
||||
type Operation uint8
|
||||
|
||||
const (
|
||||
// Add indicates a new address is added.
|
||||
Add Operation = iota
|
||||
// Delete indicates an exisiting address is deleted.
|
||||
// Delete indicates an existing address is deleted.
|
||||
Delete
|
||||
)
|
||||
|
||||
// Update defines a name resolution update. Notice that it is not valid having both
|
||||
// empty string Addr and nil Metadata in an Update.
|
||||
//
|
||||
// Deprecated: please use package resolver.
|
||||
type Update struct {
|
||||
// Op indicates the operation of the update.
|
||||
Op Operation
|
||||
@@ -43,12 +49,16 @@ type Update struct {
|
||||
}
|
||||
|
||||
// Resolver creates a Watcher for a target to track its resolution changes.
|
||||
//
|
||||
// Deprecated: please use package resolver.
|
||||
type Resolver interface {
|
||||
// Resolve creates a Watcher for target.
|
||||
Resolve(target string) (Watcher, error)
|
||||
}
|
||||
|
||||
// Watcher watches for the updates on the specified target.
|
||||
//
|
||||
// Deprecated: please use package resolver.
|
||||
type Watcher interface {
|
||||
// Next blocks until an update or error happens. It may return one or more
|
||||
// updates. The first call should get the full set of the results. It should
|
||||
|
||||
203
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
203
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
@@ -19,12 +19,17 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
@@ -36,13 +41,57 @@ type pickerWrapper struct {
|
||||
done bool
|
||||
blockingCh chan struct{}
|
||||
picker balancer.Picker
|
||||
|
||||
// The latest connection happened.
|
||||
connErrMu sync.Mutex
|
||||
connErr error
|
||||
|
||||
stickinessMDKey atomic.Value
|
||||
stickiness *stickyStore
|
||||
}
|
||||
|
||||
func newPickerWrapper() *pickerWrapper {
|
||||
bp := &pickerWrapper{blockingCh: make(chan struct{})}
|
||||
bp := &pickerWrapper{
|
||||
blockingCh: make(chan struct{}),
|
||||
stickiness: newStickyStore(),
|
||||
}
|
||||
return bp
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) updateConnectionError(err error) {
|
||||
bp.connErrMu.Lock()
|
||||
bp.connErr = err
|
||||
bp.connErrMu.Unlock()
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) connectionError() error {
|
||||
bp.connErrMu.Lock()
|
||||
err := bp.connErr
|
||||
bp.connErrMu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) updateStickinessMDKey(newKey string) {
|
||||
// No need to check ok because mdKey == "" if ok == false.
|
||||
if oldKey, _ := bp.stickinessMDKey.Load().(string); oldKey != newKey {
|
||||
bp.stickinessMDKey.Store(newKey)
|
||||
bp.stickiness.reset(newKey)
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) getStickinessMDKey() string {
|
||||
// No need to check ok because mdKey == "" if ok == false.
|
||||
mdKey, _ := bp.stickinessMDKey.Load().(string)
|
||||
return mdKey
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) clearStickinessState() {
|
||||
if oldKey := bp.getStickinessMDKey(); oldKey != "" {
|
||||
// There's no need to reset store if mdKey was "".
|
||||
bp.stickiness.reset(oldKey)
|
||||
}
|
||||
}
|
||||
|
||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
bp.mu.Lock()
|
||||
@@ -57,6 +106,23 @@ func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
bp.mu.Unlock()
|
||||
}
|
||||
|
||||
func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
|
||||
acw.mu.Lock()
|
||||
ac := acw.ac
|
||||
acw.mu.Unlock()
|
||||
ac.incrCallsStarted()
|
||||
return func(b balancer.DoneInfo) {
|
||||
if b.Err != nil && b.Err != io.EOF {
|
||||
ac.incrCallsFailed()
|
||||
} else {
|
||||
ac.incrCallsSucceeded()
|
||||
}
|
||||
if done != nil {
|
||||
done(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pick returns the transport that will be used for the RPC.
|
||||
// It may block in the following cases:
|
||||
// - there's no picker
|
||||
@@ -65,6 +131,27 @@ func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
// - the subConn returned by the current picker is not READY
|
||||
// When one of these situations happens, pick blocks until the picker gets updated.
|
||||
func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
|
||||
mdKey := bp.getStickinessMDKey()
|
||||
stickyKey, isSticky := stickyKeyFromContext(ctx, mdKey)
|
||||
|
||||
// Potential race here: if stickinessMDKey is updated after the above two
|
||||
// lines, and this pick is a sticky pick, the following put could add an
|
||||
// entry to sticky store with an outdated sticky key.
|
||||
//
|
||||
// The solution: keep the current md key in sticky store, and at the
|
||||
// beginning of each get/put, check the mdkey against store.curMDKey.
|
||||
// - Cons: one more string comparing for each get/put.
|
||||
// - Pros: the string matching happens inside get/put, so the overhead for
|
||||
// non-sticky RPCs will be minimal.
|
||||
|
||||
if isSticky {
|
||||
if t, ok := bp.stickiness.get(mdKey, stickyKey); ok {
|
||||
// Done function returned is always nil.
|
||||
return t, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
p balancer.Picker
|
||||
ch chan struct{}
|
||||
@@ -97,7 +184,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
||||
p = bp.picker
|
||||
bp.mu.Unlock()
|
||||
|
||||
subConn, put, err := p.Pick(ctx, opts)
|
||||
subConn, done, err := p.Pick(ctx, opts)
|
||||
|
||||
if err != nil {
|
||||
switch err {
|
||||
@@ -107,7 +194,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
||||
if !failfast {
|
||||
continue
|
||||
}
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "%v", err)
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
|
||||
default:
|
||||
// err is some other error.
|
||||
return nil, nil, toRPCErr(err)
|
||||
@@ -120,7 +207,13 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
||||
continue
|
||||
}
|
||||
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
|
||||
return t, put, nil
|
||||
if isSticky {
|
||||
bp.stickiness.put(mdKey, stickyKey, acw)
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
return t, doneChannelzWrapper(acw, done), nil
|
||||
}
|
||||
return t, done, nil
|
||||
}
|
||||
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
|
||||
// If ok == false, ac.state is not READY.
|
||||
@@ -139,3 +232,105 @@ func (bp *pickerWrapper) close() {
|
||||
bp.done = true
|
||||
close(bp.blockingCh)
|
||||
}
|
||||
|
||||
const stickinessKeyCountLimit = 1000
|
||||
|
||||
type stickyStoreEntry struct {
|
||||
acw *acBalancerWrapper
|
||||
addr resolver.Address
|
||||
}
|
||||
|
||||
type stickyStore struct {
|
||||
mu sync.Mutex
|
||||
// curMDKey is check before every get/put to avoid races. The operation will
|
||||
// abort immediately when the given mdKey is different from the curMDKey.
|
||||
curMDKey string
|
||||
store *linkedMap
|
||||
}
|
||||
|
||||
func newStickyStore() *stickyStore {
|
||||
return &stickyStore{
|
||||
store: newLinkedMap(),
|
||||
}
|
||||
}
|
||||
|
||||
// reset clears the map in stickyStore, and set the currentMDKey to newMDKey.
|
||||
func (ss *stickyStore) reset(newMDKey string) {
|
||||
ss.mu.Lock()
|
||||
ss.curMDKey = newMDKey
|
||||
ss.store.clear()
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
|
||||
// stickyKey is the key to look up in store. mdKey will be checked against
|
||||
// curMDKey to avoid races.
|
||||
func (ss *stickyStore) put(mdKey, stickyKey string, acw *acBalancerWrapper) {
|
||||
ss.mu.Lock()
|
||||
defer ss.mu.Unlock()
|
||||
if mdKey != ss.curMDKey {
|
||||
return
|
||||
}
|
||||
// TODO(stickiness): limit the total number of entries.
|
||||
ss.store.put(stickyKey, &stickyStoreEntry{
|
||||
acw: acw,
|
||||
addr: acw.getAddrConn().getCurAddr(),
|
||||
})
|
||||
if ss.store.len() > stickinessKeyCountLimit {
|
||||
ss.store.removeOldest()
|
||||
}
|
||||
}
|
||||
|
||||
// stickyKey is the key to look up in store. mdKey will be checked against
|
||||
// curMDKey to avoid races.
|
||||
func (ss *stickyStore) get(mdKey, stickyKey string) (transport.ClientTransport, bool) {
|
||||
ss.mu.Lock()
|
||||
defer ss.mu.Unlock()
|
||||
if mdKey != ss.curMDKey {
|
||||
return nil, false
|
||||
}
|
||||
entry, ok := ss.store.get(stickyKey)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
ac := entry.acw.getAddrConn()
|
||||
if ac.getCurAddr() != entry.addr {
|
||||
ss.store.remove(stickyKey)
|
||||
return nil, false
|
||||
}
|
||||
t, ok := ac.getReadyTransport()
|
||||
if !ok {
|
||||
ss.store.remove(stickyKey)
|
||||
return nil, false
|
||||
}
|
||||
return t, true
|
||||
}
|
||||
|
||||
// Get one value from metadata in ctx with key stickinessMDKey.
|
||||
//
|
||||
// It returns "", false if stickinessMDKey is an empty string.
|
||||
func stickyKeyFromContext(ctx context.Context, stickinessMDKey string) (string, bool) {
|
||||
if stickinessMDKey == "" {
|
||||
return "", false
|
||||
}
|
||||
|
||||
md, added, ok := metadata.FromOutgoingContextRaw(ctx)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if vv, ok := md[stickinessMDKey]; ok {
|
||||
if len(vv) > 0 {
|
||||
return vv[0], true
|
||||
}
|
||||
}
|
||||
|
||||
for _, ss := range added {
|
||||
for i := 0; i < len(ss)-1; i += 2 {
|
||||
if ss[i] == stickinessMDKey {
|
||||
return ss[i+1], true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
17
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
17
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
@@ -26,6 +26,9 @@ import (
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// PickFirstBalancerName is the name of the pick_first balancer.
|
||||
const PickFirstBalancerName = "pick_first"
|
||||
|
||||
func newPickfirstBuilder() balancer.Builder {
|
||||
return &pickfirstBuilder{}
|
||||
}
|
||||
@@ -37,7 +40,7 @@ func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions
|
||||
}
|
||||
|
||||
func (*pickfirstBuilder) Name() string {
|
||||
return "pickfirst"
|
||||
return PickFirstBalancerName
|
||||
}
|
||||
|
||||
type pickfirstBalancer struct {
|
||||
@@ -57,14 +60,20 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er
|
||||
return
|
||||
}
|
||||
b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
|
||||
b.sc.Connect()
|
||||
} else {
|
||||
b.sc.UpdateAddresses(addrs)
|
||||
b.sc.Connect()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
|
||||
if b.sc != sc || s == connectivity.Shutdown {
|
||||
if b.sc != sc {
|
||||
grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
|
||||
return
|
||||
}
|
||||
if s == connectivity.Shutdown {
|
||||
b.sc = nil
|
||||
return
|
||||
}
|
||||
@@ -93,3 +102,7 @@ func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.
|
||||
}
|
||||
return p.sc, nil, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
balancer.Register(newPickfirstBuilder())
|
||||
}
|
||||
|
||||
3
vendor/google.golang.org/grpc/proxy.go
generated
vendored
3
vendor/google.golang.org/grpc/proxy.go
generated
vendored
@@ -82,8 +82,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_
|
||||
Header: map[string][]string{"User-Agent": {grpcUA}},
|
||||
})
|
||||
|
||||
req = req.WithContext(ctx)
|
||||
if err := req.Write(conn); err != nil {
|
||||
if err := sendHTTPRequest(ctx, req, conn); err != nil {
|
||||
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||
}
|
||||
|
||||
|
||||
6
vendor/google.golang.org/grpc/resolver/BUILD
generated
vendored
6
vendor/google.golang.org/grpc/resolver/BUILD
generated
vendored
@@ -17,7 +17,11 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/google.golang.org/grpc/resolver/dns:all-srcs",
|
||||
"//vendor/google.golang.org/grpc/resolver/passthrough:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
33
vendor/google.golang.org/grpc/resolver/dns/BUILD
generated
vendored
Normal file
33
vendor/google.golang.org/grpc/resolver/dns/BUILD
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"dns_resolver.go",
|
||||
"go17.go",
|
||||
"go18.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc/resolver/dns",
|
||||
importpath = "google.golang.org/grpc/resolver/dns",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/internal/grpcrand:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/resolver:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
381
vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
generated
vendored
Normal file
381
vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
generated
vendored
Normal file
@@ -0,0 +1,381 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package dns implements a dns resolver to be installed as the default resolver
|
||||
// in grpc.
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
func init() {
|
||||
resolver.Register(NewBuilder())
|
||||
}
|
||||
|
||||
const (
|
||||
defaultPort = "443"
|
||||
defaultFreq = time.Minute * 30
|
||||
golang = "GO"
|
||||
// In DNS, service config is encoded in a TXT record via the mechanism
|
||||
// described in RFC-1464 using the attribute name grpc_config.
|
||||
txtAttribute = "grpc_config="
|
||||
)
|
||||
|
||||
var (
|
||||
errMissingAddr = errors.New("missing address")
|
||||
)
|
||||
|
||||
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
|
||||
func NewBuilder() resolver.Builder {
|
||||
return &dnsBuilder{freq: defaultFreq}
|
||||
}
|
||||
|
||||
type dnsBuilder struct {
|
||||
// frequency of polling the DNS server.
|
||||
freq time.Duration
|
||||
}
|
||||
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||
if target.Authority != "" {
|
||||
return nil, fmt.Errorf("Default DNS resolver does not support custom DNS server")
|
||||
}
|
||||
host, port, err := parseTarget(target.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// IP address.
|
||||
if net.ParseIP(host) != nil {
|
||||
host, _ = formatIP(host)
|
||||
addr := []resolver.Address{{Addr: host + ":" + port}}
|
||||
i := &ipResolver{
|
||||
cc: cc,
|
||||
ip: addr,
|
||||
rn: make(chan struct{}, 1),
|
||||
q: make(chan struct{}),
|
||||
}
|
||||
cc.NewAddress(addr)
|
||||
go i.watcher()
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// DNS address (non-IP).
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
d := &dnsResolver{
|
||||
freq: b.freq,
|
||||
host: host,
|
||||
port: port,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cc: cc,
|
||||
t: time.NewTimer(0),
|
||||
rn: make(chan struct{}, 1),
|
||||
disableServiceConfig: opts.DisableServiceConfig,
|
||||
}
|
||||
|
||||
d.wg.Add(1)
|
||||
go d.watcher()
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Scheme returns the naming scheme of this resolver builder, which is "dns".
|
||||
func (b *dnsBuilder) Scheme() string {
|
||||
return "dns"
|
||||
}
|
||||
|
||||
// ipResolver watches for the name resolution update for an IP address.
|
||||
type ipResolver struct {
|
||||
cc resolver.ClientConn
|
||||
ip []resolver.Address
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||
rn chan struct{}
|
||||
q chan struct{}
|
||||
}
|
||||
|
||||
// ResolveNow resend the address it stores, no resolution is needed.
|
||||
func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
|
||||
select {
|
||||
case i.rn <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the ipResolver.
|
||||
func (i *ipResolver) Close() {
|
||||
close(i.q)
|
||||
}
|
||||
|
||||
func (i *ipResolver) watcher() {
|
||||
for {
|
||||
select {
|
||||
case <-i.rn:
|
||||
i.cc.NewAddress(i.ip)
|
||||
case <-i.q:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dnsResolver watches for the name resolution update for a non-IP target.
|
||||
type dnsResolver struct {
|
||||
freq time.Duration
|
||||
host string
|
||||
port string
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cc resolver.ClientConn
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||
rn chan struct{}
|
||||
t *time.Timer
|
||||
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
|
||||
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
|
||||
// replace the real lookup functions with mocked ones to facilitate testing.
|
||||
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
|
||||
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
|
||||
// has data race with replaceNetFunc (WRITE the lookup function pointers).
|
||||
wg sync.WaitGroup
|
||||
disableServiceConfig bool
|
||||
}
|
||||
|
||||
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
|
||||
func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
|
||||
select {
|
||||
case d.rn <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the dnsResolver.
|
||||
func (d *dnsResolver) Close() {
|
||||
d.cancel()
|
||||
d.wg.Wait()
|
||||
d.t.Stop()
|
||||
}
|
||||
|
||||
func (d *dnsResolver) watcher() {
|
||||
defer d.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
return
|
||||
case <-d.t.C:
|
||||
case <-d.rn:
|
||||
}
|
||||
result, sc := d.lookup()
|
||||
// Next lookup should happen after an interval defined by d.freq.
|
||||
d.t.Reset(d.freq)
|
||||
d.cc.NewServiceConfig(sc)
|
||||
d.cc.NewAddress(result)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupSRV() []resolver.Address {
|
||||
var newAddrs []resolver.Address
|
||||
_, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
|
||||
return nil
|
||||
}
|
||||
for _, s := range srvs {
|
||||
lbAddrs, err := lookupHost(d.ctx, s.Target)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
|
||||
continue
|
||||
}
|
||||
for _, a := range lbAddrs {
|
||||
a, ok := formatIP(a)
|
||||
if !ok {
|
||||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
||||
continue
|
||||
}
|
||||
addr := a + ":" + strconv.Itoa(int(s.Port))
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
|
||||
}
|
||||
}
|
||||
return newAddrs
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupTXT() string {
|
||||
ss, err := lookupTXT(d.ctx, d.host)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
|
||||
return ""
|
||||
}
|
||||
var res string
|
||||
for _, s := range ss {
|
||||
res += s
|
||||
}
|
||||
|
||||
// TXT record must have "grpc_config=" attribute in order to be used as service config.
|
||||
if !strings.HasPrefix(res, txtAttribute) {
|
||||
grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
return ""
|
||||
}
|
||||
return strings.TrimPrefix(res, txtAttribute)
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupHost() []resolver.Address {
|
||||
var newAddrs []resolver.Address
|
||||
addrs, err := lookupHost(d.ctx, d.host)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
|
||||
return nil
|
||||
}
|
||||
for _, a := range addrs {
|
||||
a, ok := formatIP(a)
|
||||
if !ok {
|
||||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
||||
continue
|
||||
}
|
||||
addr := a + ":" + d.port
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr})
|
||||
}
|
||||
return newAddrs
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookup() ([]resolver.Address, string) {
|
||||
newAddrs := d.lookupSRV()
|
||||
// Support fallback to non-balancer address.
|
||||
newAddrs = append(newAddrs, d.lookupHost()...)
|
||||
if d.disableServiceConfig {
|
||||
return newAddrs, ""
|
||||
}
|
||||
sc := d.lookupTXT()
|
||||
return newAddrs, canaryingSC(sc)
|
||||
}
|
||||
|
||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
||||
// If addr is an IPv4 address, return the addr and ok = true.
|
||||
// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
|
||||
func formatIP(addr string) (addrIP string, ok bool) {
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
return "", false
|
||||
}
|
||||
if ip.To4() != nil {
|
||||
return addr, true
|
||||
}
|
||||
return "[" + addr + "]", true
|
||||
}
|
||||
|
||||
// parseTarget takes the user input target string, returns formatted host and port info.
|
||||
// If target doesn't specify a port, set the port to be the defaultPort.
|
||||
// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
|
||||
// are strippd when setting the host.
|
||||
// examples:
|
||||
// target: "www.google.com" returns host: "www.google.com", port: "443"
|
||||
// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
|
||||
// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
|
||||
// target: ":80" returns host: "localhost", port: "80"
|
||||
// target: ":" returns host: "localhost", port: "443"
|
||||
func parseTarget(target string) (host, port string, err error) {
|
||||
if target == "" {
|
||||
return "", "", errMissingAddr
|
||||
}
|
||||
if ip := net.ParseIP(target); ip != nil {
|
||||
// target is an IPv4 or IPv6(without brackets) address
|
||||
return target, defaultPort, nil
|
||||
}
|
||||
if host, port, err = net.SplitHostPort(target); err == nil {
|
||||
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
||||
if host == "" {
|
||||
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
|
||||
host = "localhost"
|
||||
}
|
||||
if port == "" {
|
||||
// If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
|
||||
port = defaultPort
|
||||
}
|
||||
return host, port, nil
|
||||
}
|
||||
if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
|
||||
// target doesn't have port
|
||||
return host, port, nil
|
||||
}
|
||||
return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
|
||||
}
|
||||
|
||||
type rawChoice struct {
|
||||
ClientLanguage *[]string `json:"clientLanguage,omitempty"`
|
||||
Percentage *int `json:"percentage,omitempty"`
|
||||
ClientHostName *[]string `json:"clientHostName,omitempty"`
|
||||
ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"`
|
||||
}
|
||||
|
||||
func containsString(a *[]string, b string) bool {
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
for _, c := range *a {
|
||||
if c == b {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func chosenByPercentage(a *int) bool {
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
return grpcrand.Intn(100)+1 <= *a
|
||||
}
|
||||
|
||||
func canaryingSC(js string) string {
|
||||
if js == "" {
|
||||
return ""
|
||||
}
|
||||
var rcs []rawChoice
|
||||
err := json.Unmarshal([]byte(js), &rcs)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
|
||||
return ""
|
||||
}
|
||||
cliHostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
|
||||
return ""
|
||||
}
|
||||
var sc string
|
||||
for _, c := range rcs {
|
||||
if !containsString(c.ClientLanguage, golang) ||
|
||||
!chosenByPercentage(c.Percentage) ||
|
||||
!containsString(c.ClientHostName, cliHostname) ||
|
||||
c.ServiceConfig == nil {
|
||||
continue
|
||||
}
|
||||
sc = string(*c.ServiceConfig)
|
||||
break
|
||||
}
|
||||
return sc
|
||||
}
|
||||
35
vendor/google.golang.org/grpc/resolver/dns/go17.go
generated
vendored
Normal file
35
vendor/google.golang.org/grpc/resolver/dns/go17.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// +build go1.6, !go1.8
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
|
||||
lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
|
||||
return net.LookupSRV(service, proto, name)
|
||||
}
|
||||
lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) }
|
||||
)
|
||||
29
vendor/google.golang.org/grpc/resolver/dns/go18.go
generated
vendored
Normal file
29
vendor/google.golang.org/grpc/resolver/dns/go18.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// +build go1.8
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import "net"
|
||||
|
||||
var (
|
||||
lookupHost = net.DefaultResolver.LookupHost
|
||||
lookupSRV = net.DefaultResolver.LookupSRV
|
||||
lookupTXT = net.DefaultResolver.LookupTXT
|
||||
)
|
||||
24
vendor/google.golang.org/grpc/resolver/passthrough/BUILD
generated
vendored
Normal file
24
vendor/google.golang.org/grpc/resolver/passthrough/BUILD
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["passthrough.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc/resolver/passthrough",
|
||||
importpath = "google.golang.org/grpc/resolver/passthrough",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//vendor/google.golang.org/grpc/resolver:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
57
vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
generated
vendored
Normal file
57
vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package passthrough implements a pass-through resolver. It sends the target
|
||||
// name without scheme back to gRPC as resolved address.
|
||||
package passthrough
|
||||
|
||||
import "google.golang.org/grpc/resolver"
|
||||
|
||||
const scheme = "passthrough"
|
||||
|
||||
type passthroughBuilder struct{}
|
||||
|
||||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||
r := &passthroughResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
}
|
||||
r.start()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (*passthroughBuilder) Scheme() string {
|
||||
return scheme
|
||||
}
|
||||
|
||||
type passthroughResolver struct {
|
||||
target resolver.Target
|
||||
cc resolver.ClientConn
|
||||
}
|
||||
|
||||
func (r *passthroughResolver) start() {
|
||||
r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
|
||||
}
|
||||
|
||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
|
||||
|
||||
func (*passthroughResolver) Close() {}
|
||||
|
||||
func init() {
|
||||
resolver.Register(&passthroughBuilder{})
|
||||
}
|
||||
47
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
47
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
@@ -24,42 +24,42 @@ var (
|
||||
// m is a map from scheme to resolver builder.
|
||||
m = make(map[string]Builder)
|
||||
// defaultScheme is the default scheme to use.
|
||||
defaultScheme string
|
||||
defaultScheme = "passthrough"
|
||||
)
|
||||
|
||||
// TODO(bar) install dns resolver in init(){}.
|
||||
|
||||
// Register registers the resolver builder to the resolver map.
|
||||
// b.Scheme will be used as the scheme registered with this builder.
|
||||
// Register registers the resolver builder to the resolver map. b.Scheme will be
|
||||
// used as the scheme registered with this builder.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Resolvers are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func Register(b Builder) {
|
||||
m[b.Scheme()] = b
|
||||
}
|
||||
|
||||
// Get returns the resolver builder registered with the given scheme.
|
||||
// If no builder is register with the scheme, the default scheme will
|
||||
// be used.
|
||||
// If the default scheme is not modified, "dns" will be the default
|
||||
// scheme, and the preinstalled dns resolver will be used.
|
||||
// If the default scheme is modified, and a resolver is registered with
|
||||
// the scheme, that resolver will be returned.
|
||||
// If the default scheme is modified, and no resolver is registered with
|
||||
// the scheme, nil will be returned.
|
||||
//
|
||||
// If no builder is register with the scheme, nil will be returned.
|
||||
func Get(scheme string) Builder {
|
||||
if b, ok := m[scheme]; ok {
|
||||
return b
|
||||
}
|
||||
if b, ok := m[defaultScheme]; ok {
|
||||
return b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDefaultScheme sets the default scheme that will be used.
|
||||
// The default default scheme is "dns".
|
||||
// The default default scheme is "passthrough".
|
||||
func SetDefaultScheme(scheme string) {
|
||||
defaultScheme = scheme
|
||||
}
|
||||
|
||||
// GetDefaultScheme gets the default scheme that will be used.
|
||||
func GetDefaultScheme() string {
|
||||
return defaultScheme
|
||||
}
|
||||
|
||||
// AddressType indicates the address type returned by name resolution.
|
||||
type AddressType uint8
|
||||
|
||||
@@ -78,7 +78,9 @@ type Address struct {
|
||||
// Type is the type of this address.
|
||||
Type AddressType
|
||||
// ServerName is the name of this address.
|
||||
// It's the name of the grpc load balancer, which will be used for authentication.
|
||||
//
|
||||
// e.g. if Type is GRPCLB, ServerName should be the name of the remote load
|
||||
// balancer, not the name of the backend.
|
||||
ServerName string
|
||||
// Metadata is the information associated with Addr, which may be used
|
||||
// to make load balancing decision.
|
||||
@@ -88,10 +90,17 @@ type Address struct {
|
||||
// BuildOption includes additional information for the builder to create
|
||||
// the resolver.
|
||||
type BuildOption struct {
|
||||
// DisableServiceConfig indicates whether resolver should fetch service config data.
|
||||
DisableServiceConfig bool
|
||||
}
|
||||
|
||||
// ClientConn contains the callbacks for resolver to notify any updates
|
||||
// to the gRPC ClientConn.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need a
|
||||
// brand new implementation of this interface. For the situations like
|
||||
// testing, the new implementation should embed this interface. This allows
|
||||
// gRPC to add new methods to this interface.
|
||||
type ClientConn interface {
|
||||
// NewAddress is called by resolver to notify ClientConn a new list
|
||||
// of resolved addresses.
|
||||
@@ -128,8 +137,10 @@ type ResolveNowOption struct{}
|
||||
// Resolver watches for the updates on the specified target.
|
||||
// Updates include address updates and service config updates.
|
||||
type Resolver interface {
|
||||
// ResolveNow will be called by gRPC to try to resolve the target name again.
|
||||
// It's just a hint, resolver can ignore this if it's not necessary.
|
||||
// ResolveNow will be called by gRPC to try to resolve the target name
|
||||
// again. It's just a hint, resolver can ignore this if it's not necessary.
|
||||
//
|
||||
// It could be called multiple times concurrently.
|
||||
ResolveNow(ResolveNowOption)
|
||||
// Close closes the resolver.
|
||||
Close()
|
||||
|
||||
73
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
73
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
@@ -19,6 +19,7 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
@@ -36,39 +37,43 @@ type ccResolverWrapper struct {
|
||||
}
|
||||
|
||||
// split2 returns the values from strings.SplitN(s, sep, 2).
|
||||
// If sep is not found, it returns "", s instead.
|
||||
func split2(s, sep string) (string, string) {
|
||||
// If sep is not found, it returns ("", s, false) instead.
|
||||
func split2(s, sep string) (string, string, bool) {
|
||||
spl := strings.SplitN(s, sep, 2)
|
||||
if len(spl) < 2 {
|
||||
return "", s
|
||||
return "", "", false
|
||||
}
|
||||
return spl[0], spl[1]
|
||||
return spl[0], spl[1], true
|
||||
}
|
||||
|
||||
// parseTarget splits target into a struct containing scheme, authority and
|
||||
// endpoint.
|
||||
//
|
||||
// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
|
||||
// target}.
|
||||
func parseTarget(target string) (ret resolver.Target) {
|
||||
ret.Scheme, ret.Endpoint = split2(target, "://")
|
||||
ret.Authority, ret.Endpoint = split2(ret.Endpoint, "/")
|
||||
var ok bool
|
||||
ret.Scheme, ret.Endpoint, ok = split2(target, "://")
|
||||
if !ok {
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
|
||||
if !ok {
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// newCCResolverWrapper parses cc.target for scheme and gets the resolver
|
||||
// builder for this scheme. It then builds the resolver and starts the
|
||||
// monitoring goroutine for it.
|
||||
// builder for this scheme and builds the resolver. The monitoring goroutine
|
||||
// for it is not started yet and can be created by calling start().
|
||||
//
|
||||
// This function could return nil, nil, in tests for old behaviors.
|
||||
// TODO(bar) never return nil, nil when DNS becomes the default resolver.
|
||||
// If withResolverBuilder dial option is set, the specified resolver will be
|
||||
// used instead.
|
||||
func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
||||
target := parseTarget(cc.target)
|
||||
grpclog.Infof("dialing to target with scheme: %q", target.Scheme)
|
||||
|
||||
rb := resolver.Get(target.Scheme)
|
||||
rb := cc.dopts.resolverBuilder
|
||||
if rb == nil {
|
||||
// TODO(bar) return error when DNS becomes the default (implemented and
|
||||
// registered by DNS package).
|
||||
grpclog.Infof("could not get resolver for scheme: %q", target.Scheme)
|
||||
return nil, nil
|
||||
return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
|
||||
}
|
||||
|
||||
ccr := &ccResolverWrapper{
|
||||
@@ -79,15 +84,18 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
||||
}
|
||||
|
||||
var err error
|
||||
ccr.resolver, err = rb.Build(target, ccr, resolver.BuildOption{})
|
||||
ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go ccr.watcher()
|
||||
return ccr, nil
|
||||
}
|
||||
|
||||
// watcher processes address updates and service config updates sequencially.
|
||||
func (ccr *ccResolverWrapper) start() {
|
||||
go ccr.watcher()
|
||||
}
|
||||
|
||||
// watcher processes address updates and service config updates sequentially.
|
||||
// Otherwise, we need to resolve possible races between address and service
|
||||
// config (e.g. they specify different balancer types).
|
||||
func (ccr *ccResolverWrapper) watcher() {
|
||||
@@ -100,20 +108,31 @@ func (ccr *ccResolverWrapper) watcher() {
|
||||
|
||||
select {
|
||||
case addrs := <-ccr.addrCh:
|
||||
grpclog.Infof("ccResolverWrapper: sending new addresses to balancer wrapper: %v", addrs)
|
||||
// TODO(bar switching) this should never be nil. Pickfirst should be default.
|
||||
if ccr.cc.balancerWrapper != nil {
|
||||
// TODO(bar switching) create balancer if it's nil?
|
||||
ccr.cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
|
||||
select {
|
||||
case <-ccr.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
|
||||
ccr.cc.handleResolvedAddrs(addrs, nil)
|
||||
case sc := <-ccr.scCh:
|
||||
select {
|
||||
case <-ccr.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
|
||||
ccr.cc.handleServiceConfig(sc)
|
||||
case <-ccr.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
|
||||
ccr.resolver.ResolveNow(o)
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) close() {
|
||||
ccr.resolver.Close()
|
||||
close(ccr.done)
|
||||
@@ -129,7 +148,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||
}
|
||||
|
||||
// NewServiceConfig is called by the resolver implemenetion to send service
|
||||
// configs to gPRC.
|
||||
// configs to gRPC.
|
||||
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||
select {
|
||||
case <-ccr.scCh:
|
||||
|
||||
653
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
653
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
@@ -21,18 +21,21 @@ package grpc
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
stdctx "context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/encoding/proto"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
@@ -41,6 +44,8 @@ import (
|
||||
)
|
||||
|
||||
// Compressor defines the interface gRPC uses to compress a message.
|
||||
//
|
||||
// Deprecated: use package encoding.
|
||||
type Compressor interface {
|
||||
// Do compresses p into w.
|
||||
Do(w io.Writer, p []byte) error
|
||||
@@ -53,14 +58,34 @@ type gzipCompressor struct {
|
||||
}
|
||||
|
||||
// NewGZIPCompressor creates a Compressor based on GZIP.
|
||||
//
|
||||
// Deprecated: use package encoding/gzip.
|
||||
func NewGZIPCompressor() Compressor {
|
||||
c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
|
||||
// of assuming DefaultCompression.
|
||||
//
|
||||
// The error returned will be nil if the level is valid.
|
||||
//
|
||||
// Deprecated: use package encoding/gzip.
|
||||
func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
|
||||
if level < gzip.DefaultCompression || level > gzip.BestCompression {
|
||||
return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
|
||||
}
|
||||
return &gzipCompressor{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return gzip.NewWriter(ioutil.Discard)
|
||||
},
|
||||
},
|
||||
w, err := gzip.NewWriterLevel(ioutil.Discard, level)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return w
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
|
||||
@@ -78,6 +103,8 @@ func (c *gzipCompressor) Type() string {
|
||||
}
|
||||
|
||||
// Decompressor defines the interface gRPC uses to decompress a message.
|
||||
//
|
||||
// Deprecated: use package encoding.
|
||||
type Decompressor interface {
|
||||
// Do reads the data from r and uncompress them.
|
||||
Do(r io.Reader) ([]byte, error)
|
||||
@@ -90,6 +117,8 @@ type gzipDecompressor struct {
|
||||
}
|
||||
|
||||
// NewGZIPDecompressor creates a Decompressor based on GZIP.
|
||||
//
|
||||
// Deprecated: use package encoding/gzip.
|
||||
func NewGZIPDecompressor() Decompressor {
|
||||
return &gzipDecompressor{}
|
||||
}
|
||||
@@ -124,14 +153,15 @@ func (d *gzipDecompressor) Type() string {
|
||||
|
||||
// callInfo contains all related configuration and information about an RPC.
|
||||
type callInfo struct {
|
||||
compressorType string
|
||||
failFast bool
|
||||
headerMD metadata.MD
|
||||
trailerMD metadata.MD
|
||||
peer *peer.Peer
|
||||
stream *clientStream
|
||||
traceInfo traceInfo // in trace.go
|
||||
maxReceiveMessageSize *int
|
||||
maxSendMessageSize *int
|
||||
creds credentials.PerRPCCredentials
|
||||
contentSubtype string
|
||||
codec baseCodec
|
||||
}
|
||||
|
||||
func defaultCallInfo() *callInfo {
|
||||
@@ -158,87 +188,239 @@ type EmptyCallOption struct{}
|
||||
func (EmptyCallOption) before(*callInfo) error { return nil }
|
||||
func (EmptyCallOption) after(*callInfo) {}
|
||||
|
||||
type beforeCall func(c *callInfo) error
|
||||
|
||||
func (o beforeCall) before(c *callInfo) error { return o(c) }
|
||||
func (o beforeCall) after(c *callInfo) {}
|
||||
|
||||
type afterCall func(c *callInfo)
|
||||
|
||||
func (o afterCall) before(c *callInfo) error { return nil }
|
||||
func (o afterCall) after(c *callInfo) { o(c) }
|
||||
|
||||
// Header returns a CallOptions that retrieves the header metadata
|
||||
// for a unary RPC.
|
||||
func Header(md *metadata.MD) CallOption {
|
||||
return afterCall(func(c *callInfo) {
|
||||
*md = c.headerMD
|
||||
})
|
||||
return HeaderCallOption{HeaderAddr: md}
|
||||
}
|
||||
|
||||
// HeaderCallOption is a CallOption for collecting response header metadata.
|
||||
// The metadata field will be populated *after* the RPC completes.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type HeaderCallOption struct {
|
||||
HeaderAddr *metadata.MD
|
||||
}
|
||||
|
||||
func (o HeaderCallOption) before(c *callInfo) error { return nil }
|
||||
func (o HeaderCallOption) after(c *callInfo) {
|
||||
if c.stream != nil {
|
||||
*o.HeaderAddr, _ = c.stream.Header()
|
||||
}
|
||||
}
|
||||
|
||||
// Trailer returns a CallOptions that retrieves the trailer metadata
|
||||
// for a unary RPC.
|
||||
func Trailer(md *metadata.MD) CallOption {
|
||||
return afterCall(func(c *callInfo) {
|
||||
*md = c.trailerMD
|
||||
})
|
||||
return TrailerCallOption{TrailerAddr: md}
|
||||
}
|
||||
|
||||
// Peer returns a CallOption that retrieves peer information for a
|
||||
// unary RPC.
|
||||
func Peer(peer *peer.Peer) CallOption {
|
||||
return afterCall(func(c *callInfo) {
|
||||
if c.peer != nil {
|
||||
*peer = *c.peer
|
||||
// TrailerCallOption is a CallOption for collecting response trailer metadata.
|
||||
// The metadata field will be populated *after* the RPC completes.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type TrailerCallOption struct {
|
||||
TrailerAddr *metadata.MD
|
||||
}
|
||||
|
||||
func (o TrailerCallOption) before(c *callInfo) error { return nil }
|
||||
func (o TrailerCallOption) after(c *callInfo) {
|
||||
if c.stream != nil {
|
||||
*o.TrailerAddr = c.stream.Trailer()
|
||||
}
|
||||
}
|
||||
|
||||
// Peer returns a CallOption that retrieves peer information for a unary RPC.
|
||||
// The peer field will be populated *after* the RPC completes.
|
||||
func Peer(p *peer.Peer) CallOption {
|
||||
return PeerCallOption{PeerAddr: p}
|
||||
}
|
||||
|
||||
// PeerCallOption is a CallOption for collecting the identity of the remote
|
||||
// peer. The peer field will be populated *after* the RPC completes.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type PeerCallOption struct {
|
||||
PeerAddr *peer.Peer
|
||||
}
|
||||
|
||||
func (o PeerCallOption) before(c *callInfo) error { return nil }
|
||||
func (o PeerCallOption) after(c *callInfo) {
|
||||
if c.stream != nil {
|
||||
if x, ok := peer.FromContext(c.stream.Context()); ok {
|
||||
*o.PeerAddr = *x
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FailFast configures the action to take when an RPC is attempted on broken
|
||||
// connections or unreachable servers. If failfast is true, the RPC will fail
|
||||
// connections or unreachable servers. If failFast is true, the RPC will fail
|
||||
// immediately. Otherwise, the RPC client will block the call until a
|
||||
// connection is available (or the call is canceled or times out) and will retry
|
||||
// the call if it fails due to a transient error. Please refer to
|
||||
// connection is available (or the call is canceled or times out) and will
|
||||
// retry the call if it fails due to a transient error. gRPC will not retry if
|
||||
// data was written to the wire unless the server indicates it did not process
|
||||
// the data. Please refer to
|
||||
// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
|
||||
// Note: failFast is default to true.
|
||||
//
|
||||
// By default, RPCs are "Fail Fast".
|
||||
func FailFast(failFast bool) CallOption {
|
||||
return beforeCall(func(c *callInfo) error {
|
||||
c.failFast = failFast
|
||||
return nil
|
||||
})
|
||||
return FailFastCallOption{FailFast: failFast}
|
||||
}
|
||||
|
||||
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
|
||||
// fast or not.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type FailFastCallOption struct {
|
||||
FailFast bool
|
||||
}
|
||||
|
||||
func (o FailFastCallOption) before(c *callInfo) error {
|
||||
c.failFast = o.FailFast
|
||||
return nil
|
||||
}
|
||||
func (o FailFastCallOption) after(c *callInfo) {}
|
||||
|
||||
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
|
||||
func MaxCallRecvMsgSize(s int) CallOption {
|
||||
return beforeCall(func(o *callInfo) error {
|
||||
o.maxReceiveMessageSize = &s
|
||||
return nil
|
||||
})
|
||||
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
|
||||
}
|
||||
|
||||
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||
// size the client can receive.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type MaxRecvMsgSizeCallOption struct {
|
||||
MaxRecvMsgSize int
|
||||
}
|
||||
|
||||
func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
|
||||
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
|
||||
return nil
|
||||
}
|
||||
func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
|
||||
|
||||
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
|
||||
func MaxCallSendMsgSize(s int) CallOption {
|
||||
return beforeCall(func(o *callInfo) error {
|
||||
o.maxSendMessageSize = &s
|
||||
return nil
|
||||
})
|
||||
return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
|
||||
}
|
||||
|
||||
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||
// size the client can send.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type MaxSendMsgSizeCallOption struct {
|
||||
MaxSendMsgSize int
|
||||
}
|
||||
|
||||
func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
|
||||
c.maxSendMessageSize = &o.MaxSendMsgSize
|
||||
return nil
|
||||
}
|
||||
func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
|
||||
|
||||
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
|
||||
// for a call.
|
||||
func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
|
||||
return beforeCall(func(c *callInfo) error {
|
||||
c.creds = creds
|
||||
return nil
|
||||
})
|
||||
return PerRPCCredsCallOption{Creds: creds}
|
||||
}
|
||||
|
||||
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
|
||||
// credentials to use for the call.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type PerRPCCredsCallOption struct {
|
||||
Creds credentials.PerRPCCredentials
|
||||
}
|
||||
|
||||
func (o PerRPCCredsCallOption) before(c *callInfo) error {
|
||||
c.creds = o.Creds
|
||||
return nil
|
||||
}
|
||||
func (o PerRPCCredsCallOption) after(c *callInfo) {}
|
||||
|
||||
// UseCompressor returns a CallOption which sets the compressor used when
|
||||
// sending the request. If WithCompressor is also set, UseCompressor has
|
||||
// higher priority.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func UseCompressor(name string) CallOption {
|
||||
return CompressorCallOption{CompressorType: name}
|
||||
}
|
||||
|
||||
// CompressorCallOption is a CallOption that indicates the compressor to use.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type CompressorCallOption struct {
|
||||
CompressorType string
|
||||
}
|
||||
|
||||
func (o CompressorCallOption) before(c *callInfo) error {
|
||||
c.compressorType = o.CompressorType
|
||||
return nil
|
||||
}
|
||||
func (o CompressorCallOption) after(c *callInfo) {}
|
||||
|
||||
// CallContentSubtype returns a CallOption that will set the content-subtype
|
||||
// for a call. For example, if content-subtype is "json", the Content-Type over
|
||||
// the wire will be "application/grpc+json". The content-subtype is converted
|
||||
// to lowercase before being included in Content-Type. See Content-Type on
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// If CallCustomCodec is not also used, the content-subtype will be used to
|
||||
// look up the Codec to use in the registry controlled by RegisterCodec. See
|
||||
// the documentation on RegisterCodec for details on registration. The lookup
|
||||
// of content-subtype is case-insensitive. If no such Codec is found, the call
|
||||
// will result in an error with code codes.Internal.
|
||||
//
|
||||
// If CallCustomCodec is also used, that Codec will be used for all request and
|
||||
// response messages, with the content-subtype set to the given contentSubtype
|
||||
// here for requests.
|
||||
func CallContentSubtype(contentSubtype string) CallOption {
|
||||
return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
|
||||
}
|
||||
|
||||
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
|
||||
// used for marshaling messages.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type ContentSubtypeCallOption struct {
|
||||
ContentSubtype string
|
||||
}
|
||||
|
||||
func (o ContentSubtypeCallOption) before(c *callInfo) error {
|
||||
c.contentSubtype = o.ContentSubtype
|
||||
return nil
|
||||
}
|
||||
func (o ContentSubtypeCallOption) after(c *callInfo) {}
|
||||
|
||||
// CallCustomCodec returns a CallOption that will set the given Codec to be
|
||||
// used for all request and response messages for a call. The result of calling
|
||||
// String() will be used as the content-subtype in a case-insensitive manner.
|
||||
//
|
||||
// See Content-Type on
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details. Also see the documentation on RegisterCodec and
|
||||
// CallContentSubtype for more details on the interaction between Codec and
|
||||
// content-subtype.
|
||||
//
|
||||
// This function is provided for advanced users; prefer to use only
|
||||
// CallContentSubtype to select a registered codec instead.
|
||||
func CallCustomCodec(codec Codec) CallOption {
|
||||
return CustomCodecCallOption{Codec: codec}
|
||||
}
|
||||
|
||||
// CustomCodecCallOption is a CallOption that indicates the codec used for
|
||||
// marshaling messages.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type CustomCodecCallOption struct {
|
||||
Codec Codec
|
||||
}
|
||||
|
||||
func (o CustomCodecCallOption) before(c *callInfo) error {
|
||||
c.codec = o.Codec
|
||||
return nil
|
||||
}
|
||||
func (o CustomCodecCallOption) after(c *callInfo) {}
|
||||
|
||||
// The format of the payload: compressed or not?
|
||||
type payloadFormat uint8
|
||||
|
||||
const (
|
||||
compressionNone payloadFormat = iota // no compression
|
||||
compressionMade
|
||||
compressionNone payloadFormat = 0 // no compression
|
||||
compressionMade payloadFormat = 1 // compressed
|
||||
)
|
||||
|
||||
// parser reads complete gRPC messages from the underlying reader.
|
||||
@@ -248,8 +430,8 @@ type parser struct {
|
||||
// error types.
|
||||
r io.Reader
|
||||
|
||||
// The header of a gRPC message. Find more detail
|
||||
// at https://grpc.io/docs/guides/wire.html.
|
||||
// The header of a gRPC message. Find more detail at
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
||||
header [5]byte
|
||||
}
|
||||
|
||||
@@ -277,8 +459,11 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
||||
if length == 0 {
|
||||
return pf, nil, nil
|
||||
}
|
||||
if length > uint32(maxReceiveMessageSize) {
|
||||
return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
||||
if int64(length) > int64(maxInt) {
|
||||
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
|
||||
}
|
||||
if int(length) > maxReceiveMessageSize {
|
||||
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
||||
}
|
||||
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
||||
// of making it for each message:
|
||||
@@ -292,67 +477,104 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
||||
return pf, msg, nil
|
||||
}
|
||||
|
||||
// encode serializes msg and returns a buffer of message header and a buffer of msg.
|
||||
// If msg is nil, it generates the message header and an empty msg buffer.
|
||||
func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, []byte, error) {
|
||||
var b []byte
|
||||
const (
|
||||
payloadLen = 1
|
||||
sizeLen = 4
|
||||
)
|
||||
|
||||
if msg != nil {
|
||||
var err error
|
||||
b, err = c.Marshal(msg)
|
||||
// encode serializes msg and returns a buffer containing the message, or an
|
||||
// error if it is too large to be transmitted by grpc. If msg is nil, it
|
||||
// generates an empty message.
|
||||
func encode(c baseCodec, msg interface{}) ([]byte, error) {
|
||||
if msg == nil { // NOTE: typed nils will not be caught by this check
|
||||
return nil, nil
|
||||
}
|
||||
b, err := c.Marshal(msg)
|
||||
if err != nil {
|
||||
return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
|
||||
return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
|
||||
}
|
||||
if outPayload != nil {
|
||||
outPayload.Payload = msg
|
||||
// TODO truncate large payload.
|
||||
outPayload.Data = b
|
||||
outPayload.Length = len(b)
|
||||
}
|
||||
if cp != nil {
|
||||
if err := cp.Do(cbuf, b); err != nil {
|
||||
return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
||||
}
|
||||
b = cbuf.Bytes()
|
||||
}
|
||||
}
|
||||
|
||||
if uint(len(b)) > math.MaxUint32 {
|
||||
return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
|
||||
}
|
||||
|
||||
bufHeader := make([]byte, payloadLen+sizeLen)
|
||||
if cp == nil {
|
||||
bufHeader[0] = byte(compressionNone)
|
||||
} else {
|
||||
bufHeader[0] = byte(compressionMade)
|
||||
}
|
||||
// Write length of b into buf
|
||||
binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b)))
|
||||
if outPayload != nil {
|
||||
outPayload.WireLength = payloadLen + sizeLen + len(b)
|
||||
}
|
||||
return bufHeader, b, nil
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
|
||||
// compress returns the input bytes compressed by compressor or cp. If both
|
||||
// compressors are nil, returns nil.
|
||||
//
|
||||
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
|
||||
func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
|
||||
if compressor == nil && cp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
wrapErr := func(err error) error {
|
||||
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
||||
}
|
||||
cbuf := &bytes.Buffer{}
|
||||
if compressor != nil {
|
||||
z, _ := compressor.Compress(cbuf)
|
||||
if _, err := z.Write(in); err != nil {
|
||||
return nil, wrapErr(err)
|
||||
}
|
||||
if err := z.Close(); err != nil {
|
||||
return nil, wrapErr(err)
|
||||
}
|
||||
} else {
|
||||
if err := cp.Do(cbuf, in); err != nil {
|
||||
return nil, wrapErr(err)
|
||||
}
|
||||
}
|
||||
return cbuf.Bytes(), nil
|
||||
}
|
||||
|
||||
const (
|
||||
payloadLen = 1
|
||||
sizeLen = 4
|
||||
headerLen = payloadLen + sizeLen
|
||||
)
|
||||
|
||||
// msgHeader returns a 5-byte header for the message being transmitted and the
|
||||
// payload, which is compData if non-nil or data otherwise.
|
||||
func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
|
||||
hdr = make([]byte, headerLen)
|
||||
if compData != nil {
|
||||
hdr[0] = byte(compressionMade)
|
||||
data = compData
|
||||
} else {
|
||||
hdr[0] = byte(compressionNone)
|
||||
}
|
||||
|
||||
// Write length of payload into buf
|
||||
binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
|
||||
return hdr, data
|
||||
}
|
||||
|
||||
func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
|
||||
return &stats.OutPayload{
|
||||
Client: client,
|
||||
Payload: msg,
|
||||
Data: data,
|
||||
Length: len(data),
|
||||
WireLength: len(payload) + headerLen,
|
||||
SentTime: t,
|
||||
}
|
||||
}
|
||||
|
||||
func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
|
||||
switch pf {
|
||||
case compressionNone:
|
||||
case compressionMade:
|
||||
if dc == nil || recvCompress != dc.Type() {
|
||||
return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
|
||||
if recvCompress == "" || recvCompress == encoding.Identity {
|
||||
return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
|
||||
}
|
||||
if !haveCompressor {
|
||||
return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
|
||||
}
|
||||
default:
|
||||
return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf)
|
||||
return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error {
|
||||
// For the two compressor parameters, both should not be set, but if they are,
|
||||
// dc takes precedence over compressor.
|
||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
|
||||
pf, d, err := p.recvMsg(maxReceiveMessageSize)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -360,22 +582,37 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
|
||||
if inPayload != nil {
|
||||
inPayload.WireLength = len(d)
|
||||
}
|
||||
if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
|
||||
return err
|
||||
|
||||
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
||||
return st.Err()
|
||||
}
|
||||
|
||||
if pf == compressionMade {
|
||||
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
|
||||
// use this decompressor as the default.
|
||||
if dc != nil {
|
||||
d, err = dc.Do(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
} else {
|
||||
dcReader, err := compressor.Decompress(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
d, err = ioutil.ReadAll(dcReader)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(d) > maxReceiveMessageSize {
|
||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||
// implementation.
|
||||
return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
|
||||
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
|
||||
}
|
||||
if err := c.Unmarshal(d, m); err != nil {
|
||||
return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||
}
|
||||
if inPayload != nil {
|
||||
inPayload.RecvTime = time.Now()
|
||||
@@ -389,8 +626,6 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
|
||||
|
||||
type rpcInfo struct {
|
||||
failfast bool
|
||||
bytesSent bool
|
||||
bytesReceived bool
|
||||
}
|
||||
|
||||
type rpcInfoContextKey struct{}
|
||||
@@ -404,69 +639,10 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func updateRPCInfoInContext(ctx context.Context, s rpcInfo) {
|
||||
if ss, ok := rpcInfoFromContext(ctx); ok {
|
||||
ss.bytesReceived = s.bytesReceived
|
||||
ss.bytesSent = s.bytesSent
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// toRPCErr converts an error into an error from the status package.
|
||||
func toRPCErr(err error) error {
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return err
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case transport.StreamError:
|
||||
return status.Error(e.Code, e.Desc)
|
||||
case transport.ConnectionError:
|
||||
return status.Error(codes.Unavailable, e.Desc)
|
||||
default:
|
||||
switch err {
|
||||
case context.DeadlineExceeded, stdctx.DeadlineExceeded:
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled, stdctx.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
case ErrClientConnClosing:
|
||||
return status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
}
|
||||
return status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
// convertCode converts a standard Go error into its canonical code. Note that
|
||||
// this is only used to translate the error returned by the server applications.
|
||||
func convertCode(err error) codes.Code {
|
||||
switch err {
|
||||
case nil:
|
||||
return codes.OK
|
||||
case io.EOF:
|
||||
return codes.OutOfRange
|
||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
||||
return codes.FailedPrecondition
|
||||
case os.ErrInvalid:
|
||||
return codes.InvalidArgument
|
||||
case context.Canceled, stdctx.Canceled:
|
||||
return codes.Canceled
|
||||
case context.DeadlineExceeded, stdctx.DeadlineExceeded:
|
||||
return codes.DeadlineExceeded
|
||||
}
|
||||
switch {
|
||||
case os.IsExist(err):
|
||||
return codes.AlreadyExists
|
||||
case os.IsNotExist(err):
|
||||
return codes.NotFound
|
||||
case os.IsPermission(err):
|
||||
return codes.PermissionDenied
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
// Code returns the error code for err if it was produced by the rpc system.
|
||||
// Otherwise, it returns codes.Unknown.
|
||||
//
|
||||
// Deprecated; use status.FromError and Code method instead.
|
||||
// Deprecated: use status.FromError and Code method instead.
|
||||
func Code(err error) codes.Code {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Code()
|
||||
@@ -477,7 +653,7 @@ func Code(err error) codes.Code {
|
||||
// ErrorDesc returns the error description of err if it was produced by the rpc system.
|
||||
// Otherwise, it returns err.Error() or empty string when err is nil.
|
||||
//
|
||||
// Deprecated; use status.FromError and Message method instead.
|
||||
// Deprecated: use status.FromError and Message method instead.
|
||||
func ErrorDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
@@ -488,85 +664,78 @@ func ErrorDesc(err error) string {
|
||||
// Errorf returns an error containing an error code and a description;
|
||||
// Errorf returns nil if c is OK.
|
||||
//
|
||||
// Deprecated; use status.Errorf instead.
|
||||
// Deprecated: use status.Errorf instead.
|
||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||
return status.Errorf(c, format, a...)
|
||||
}
|
||||
|
||||
// MethodConfig defines the configuration recommended by the service providers for a
|
||||
// particular method.
|
||||
// This is EXPERIMENTAL and subject to change.
|
||||
type MethodConfig struct {
|
||||
// WaitForReady indicates whether RPCs sent to this method should wait until
|
||||
// the connection is ready by default (!failfast). The value specified via the
|
||||
// gRPC client API will override the value set here.
|
||||
WaitForReady *bool
|
||||
// Timeout is the default timeout for RPCs sent to this method. The actual
|
||||
// deadline used will be the minimum of the value specified here and the value
|
||||
// set by the application via the gRPC client API. If either one is not set,
|
||||
// then the other will be used. If neither is set, then the RPC has no deadline.
|
||||
Timeout *time.Duration
|
||||
// MaxReqSize is the maximum allowed payload size for an individual request in a
|
||||
// stream (client->server) in bytes. The size which is measured is the serialized
|
||||
// payload after per-message compression (but before stream compression) in bytes.
|
||||
// The actual value used is the minimum of the value specified here and the value set
|
||||
// by the application via the gRPC client API. If either one is not set, then the other
|
||||
// will be used. If neither is set, then the built-in default is used.
|
||||
MaxReqSize *int
|
||||
// MaxRespSize is the maximum allowed payload size for an individual response in a
|
||||
// stream (server->client) in bytes.
|
||||
MaxRespSize *int
|
||||
// setCallInfoCodec should only be called after CallOptions have been applied.
|
||||
func setCallInfoCodec(c *callInfo) error {
|
||||
if c.codec != nil {
|
||||
// codec was already set by a CallOption; use it.
|
||||
return nil
|
||||
}
|
||||
|
||||
if c.contentSubtype == "" {
|
||||
// No codec specified in CallOptions; use proto by default.
|
||||
c.codec = encoding.GetCodec(proto.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// c.contentSubtype is already lowercased in CallContentSubtype
|
||||
c.codec = encoding.GetCodec(c.contentSubtype)
|
||||
if c.codec == nil {
|
||||
return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServiceConfig is provided by the service provider and contains parameters for how
|
||||
// clients that connect to the service should behave.
|
||||
// This is EXPERIMENTAL and subject to change.
|
||||
type ServiceConfig struct {
|
||||
// LB is the load balancer the service providers recommends. The balancer specified
|
||||
// via grpc.WithBalancer will override this.
|
||||
LB Balancer
|
||||
// Methods contains a map for the methods in this service.
|
||||
// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
|
||||
// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
|
||||
// Otherwise, the method has no MethodConfig to use.
|
||||
Methods map[string]MethodConfig
|
||||
// parseDialTarget returns the network and address to pass to dialer
|
||||
func parseDialTarget(target string) (net string, addr string) {
|
||||
net = "tcp"
|
||||
|
||||
m1 := strings.Index(target, ":")
|
||||
m2 := strings.Index(target, ":/")
|
||||
|
||||
// handle unix:addr which will fail with url.Parse
|
||||
if m1 >= 0 && m2 < 0 {
|
||||
if n := target[0:m1]; n == "unix" {
|
||||
net = n
|
||||
addr = target[m1+1:]
|
||||
return net, addr
|
||||
}
|
||||
}
|
||||
if m2 >= 0 {
|
||||
t, err := url.Parse(target)
|
||||
if err != nil {
|
||||
return net, target
|
||||
}
|
||||
scheme := t.Scheme
|
||||
addr = t.Path
|
||||
if scheme == "unix" {
|
||||
net = scheme
|
||||
if addr == "" {
|
||||
addr = t.Host
|
||||
}
|
||||
return net, addr
|
||||
}
|
||||
}
|
||||
|
||||
return net, target
|
||||
}
|
||||
|
||||
func min(a, b *int) *int {
|
||||
if *a < *b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
|
||||
if mcMax == nil && doptMax == nil {
|
||||
return &defaultVal
|
||||
}
|
||||
if mcMax != nil && doptMax != nil {
|
||||
return min(mcMax, doptMax)
|
||||
}
|
||||
if mcMax != nil {
|
||||
return mcMax
|
||||
}
|
||||
return doptMax
|
||||
}
|
||||
|
||||
// SupportPackageIsVersion3 is referenced from generated protocol buffer files.
|
||||
// The latest support package version is 4.
|
||||
// SupportPackageIsVersion3 is kept for compatibility. It will be removed in the
|
||||
// next support package version update.
|
||||
const SupportPackageIsVersion3 = true
|
||||
|
||||
// SupportPackageIsVersion4 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the grpc package.
|
||||
// The SupportPackageIsVersion variables are referenced from generated protocol
|
||||
// buffer files to ensure compatibility with the gRPC version used. The latest
|
||||
// support package version is 5.
|
||||
//
|
||||
// This constant may be renamed in the future if a change in the generated code
|
||||
// requires a synchronised update of grpc-go and protoc-gen-go. This constant
|
||||
// should not be referenced from any other code.
|
||||
const SupportPackageIsVersion4 = true
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.7.5"
|
||||
// Older versions are kept for compatibility. They may be removed if
|
||||
// compatibility cannot be maintained.
|
||||
//
|
||||
// These constants should not be referenced from any other code.
|
||||
const (
|
||||
SupportPackageIsVersion3 = true
|
||||
SupportPackageIsVersion4 = true
|
||||
SupportPackageIsVersion5 = true
|
||||
)
|
||||
|
||||
const grpcUA = "grpc-go/" + Version
|
||||
|
||||
538
vendor/google.golang.org/grpc/server.go
generated
vendored
538
vendor/google.golang.org/grpc/server.go
generated
vendored
@@ -32,13 +32,19 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"io/ioutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/trace"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/encoding/proto"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/stats"
|
||||
@@ -89,18 +95,28 @@ type Server struct {
|
||||
conns map[io.Closer]bool
|
||||
serve bool
|
||||
drain bool
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
// A CondVar to let GracefulStop() blocks until all the pending RPCs are finished
|
||||
// and all the transport goes away.
|
||||
cv *sync.Cond
|
||||
cv *sync.Cond // signaled when connections close for GracefulStop
|
||||
m map[string]*service // service name -> service info
|
||||
events trace.EventLog
|
||||
|
||||
quit chan struct{}
|
||||
done chan struct{}
|
||||
quitOnce sync.Once
|
||||
doneOnce sync.Once
|
||||
channelzRemoveOnce sync.Once
|
||||
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
||||
|
||||
channelzID int64 // channelz unique identification number
|
||||
czmu sync.RWMutex
|
||||
callsStarted int64
|
||||
callsFailed int64
|
||||
callsSucceeded int64
|
||||
lastCallStartedTime time.Time
|
||||
}
|
||||
|
||||
type options struct {
|
||||
creds credentials.TransportCredentials
|
||||
codec Codec
|
||||
codec baseCodec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
unaryInt UnaryServerInterceptor
|
||||
@@ -177,20 +193,32 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
|
||||
}
|
||||
|
||||
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
|
||||
//
|
||||
// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
|
||||
func CustomCodec(codec Codec) ServerOption {
|
||||
return func(o *options) {
|
||||
o.codec = codec
|
||||
}
|
||||
}
|
||||
|
||||
// RPCCompressor returns a ServerOption that sets a compressor for outbound messages.
|
||||
// RPCCompressor returns a ServerOption that sets a compressor for outbound
|
||||
// messages. For backward compatibility, all outbound messages will be sent
|
||||
// using this compressor, regardless of incoming message compression. By
|
||||
// default, server messages will be sent using the same compressor with which
|
||||
// request messages were sent.
|
||||
//
|
||||
// Deprecated: use encoding.RegisterCompressor instead.
|
||||
func RPCCompressor(cp Compressor) ServerOption {
|
||||
return func(o *options) {
|
||||
o.cp = cp
|
||||
}
|
||||
}
|
||||
|
||||
// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages.
|
||||
// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
|
||||
// messages. It has higher priority than decompressors registered via
|
||||
// encoding.RegisterCompressor.
|
||||
//
|
||||
// Deprecated: use encoding.RegisterCompressor instead.
|
||||
func RPCDecompressor(dc Decompressor) ServerOption {
|
||||
return func(o *options) {
|
||||
o.dc = dc
|
||||
@@ -198,7 +226,9 @@ func RPCDecompressor(dc Decompressor) ServerOption {
|
||||
}
|
||||
|
||||
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
|
||||
// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead.
|
||||
// If this is not set, gRPC uses the default limit.
|
||||
//
|
||||
// Deprecated: use MaxRecvMsgSize instead.
|
||||
func MaxMsgSize(m int) ServerOption {
|
||||
return MaxRecvMsgSize(m)
|
||||
}
|
||||
@@ -297,6 +327,8 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
|
||||
// connection establishment (up to and including HTTP/2 handshaking) for all
|
||||
// new connections. If this is not set, the default is 120 seconds. A zero or
|
||||
// negative value will result in an immediate timeout.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func ConnectionTimeout(d time.Duration) ServerOption {
|
||||
return func(o *options) {
|
||||
o.connectionTimeout = d
|
||||
@@ -310,22 +342,23 @@ func NewServer(opt ...ServerOption) *Server {
|
||||
for _, o := range opt {
|
||||
o(&opts)
|
||||
}
|
||||
if opts.codec == nil {
|
||||
// Set the default codec.
|
||||
opts.codec = protoCodec{}
|
||||
}
|
||||
s := &Server{
|
||||
lis: make(map[net.Listener]bool),
|
||||
opts: opts,
|
||||
conns: make(map[io.Closer]bool),
|
||||
m: make(map[string]*service),
|
||||
quit: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
s.cv = sync.NewCond(&s.mu)
|
||||
s.ctx, s.cancel = context.WithCancel(context.Background())
|
||||
if EnableTracing {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
|
||||
}
|
||||
|
||||
if channelz.IsOn() {
|
||||
s.channelzID = channelz.RegisterServer(s, "")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -430,11 +463,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo {
|
||||
return ret
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrServerStopped indicates that the operation is now illegal because of
|
||||
// the server being stopped.
|
||||
ErrServerStopped = errors.New("grpc: the server has been stopped")
|
||||
)
|
||||
// ErrServerStopped indicates that the operation is now illegal because of
|
||||
// the server being stopped.
|
||||
var ErrServerStopped = errors.New("grpc: the server has been stopped")
|
||||
|
||||
func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
if s.opts.creds == nil {
|
||||
@@ -443,28 +474,66 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
|
||||
return s.opts.creds.ServerHandshake(rawConn)
|
||||
}
|
||||
|
||||
type listenSocket struct {
|
||||
net.Listener
|
||||
channelzID int64
|
||||
}
|
||||
|
||||
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||
return &channelz.SocketInternalMetric{
|
||||
LocalAddr: l.Listener.Addr(),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *listenSocket) Close() error {
|
||||
err := l.Listener.Close()
|
||||
if channelz.IsOn() {
|
||||
channelz.RemoveEntry(l.channelzID)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Serve accepts incoming connections on the listener lis, creating a new
|
||||
// ServerTransport and service goroutine for each. The service goroutines
|
||||
// read gRPC requests and then call the registered handlers to reply to them.
|
||||
// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
|
||||
// this method returns.
|
||||
// Serve always returns non-nil error.
|
||||
// Serve will return a non-nil error unless Stop or GracefulStop is called.
|
||||
func (s *Server) Serve(lis net.Listener) error {
|
||||
s.mu.Lock()
|
||||
s.printf("serving")
|
||||
s.serve = true
|
||||
if s.lis == nil {
|
||||
// Serve called after Stop or GracefulStop.
|
||||
s.mu.Unlock()
|
||||
lis.Close()
|
||||
return ErrServerStopped
|
||||
}
|
||||
s.lis[lis] = true
|
||||
|
||||
s.serveWG.Add(1)
|
||||
defer func() {
|
||||
s.serveWG.Done()
|
||||
select {
|
||||
// Stop or GracefulStop called; block until done and return nil.
|
||||
case <-s.quit:
|
||||
<-s.done
|
||||
default:
|
||||
}
|
||||
}()
|
||||
|
||||
ls := &listenSocket{Listener: lis}
|
||||
s.lis[ls] = true
|
||||
|
||||
if channelz.IsOn() {
|
||||
ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, "")
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
if s.lis != nil && s.lis[lis] {
|
||||
lis.Close()
|
||||
delete(s.lis, lis)
|
||||
if s.lis != nil && s.lis[ls] {
|
||||
ls.Close()
|
||||
delete(s.lis, ls)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
@@ -491,25 +560,39 @@ func (s *Server) Serve(lis net.Listener) error {
|
||||
timer := time.NewTimer(tempDelay)
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-s.ctx.Done():
|
||||
}
|
||||
case <-s.quit:
|
||||
timer.Stop()
|
||||
return nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.printf("done serving; Accept = %v", err)
|
||||
s.mu.Unlock()
|
||||
|
||||
select {
|
||||
case <-s.quit:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
return err
|
||||
}
|
||||
tempDelay = 0
|
||||
// Start a new goroutine to deal with rawConn
|
||||
// so we don't stall this Accept loop goroutine.
|
||||
go s.handleRawConn(rawConn)
|
||||
// Start a new goroutine to deal with rawConn so we don't stall this Accept
|
||||
// loop goroutine.
|
||||
//
|
||||
// Make sure we account for the goroutine so GracefulStop doesn't nil out
|
||||
// s.conns before this conn can be added.
|
||||
s.serveWG.Add(1)
|
||||
go func() {
|
||||
s.handleRawConn(rawConn)
|
||||
s.serveWG.Done()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// handleRawConn is run in its own goroutine and handles a just-accepted
|
||||
// connection that has not had any I/O performed on it yet.
|
||||
// handleRawConn forks a goroutine to handle a just-accepted connection that
|
||||
// has not had any I/O performed on it yet.
|
||||
func (s *Server) handleRawConn(rawConn net.Conn) {
|
||||
rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
|
||||
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
|
||||
@@ -534,17 +617,28 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
var serve func()
|
||||
c := conn.(io.Closer)
|
||||
if s.opts.useHandlerImpl {
|
||||
rawConn.SetDeadline(time.Time{})
|
||||
s.serveUsingHandler(conn)
|
||||
serve = func() { s.serveUsingHandler(conn) }
|
||||
} else {
|
||||
// Finish handshaking (HTTP2)
|
||||
st := s.newHTTP2Transport(conn, authInfo)
|
||||
if st == nil {
|
||||
return
|
||||
}
|
||||
rawConn.SetDeadline(time.Time{})
|
||||
s.serveStreams(st)
|
||||
c = st
|
||||
serve = func() { s.serveStreams(st) }
|
||||
}
|
||||
|
||||
rawConn.SetDeadline(time.Time{})
|
||||
if !s.addConn(c) {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
serve()
|
||||
s.removeConn(c)
|
||||
}()
|
||||
}
|
||||
|
||||
// newHTTP2Transport sets up a http/2 transport (using the
|
||||
@@ -561,6 +655,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
|
||||
InitialConnWindowSize: s.opts.initialConnWindowSize,
|
||||
WriteBufferSize: s.opts.writeBufferSize,
|
||||
ReadBufferSize: s.opts.readBufferSize,
|
||||
ChannelzParentID: s.channelzID,
|
||||
}
|
||||
st, err := transport.NewServerTransport("http2", c, config)
|
||||
if err != nil {
|
||||
@@ -571,15 +666,11 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
|
||||
grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
return nil
|
||||
}
|
||||
if !s.addConn(st) {
|
||||
st.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
defer s.removeConn(st)
|
||||
defer st.Close()
|
||||
var wg sync.WaitGroup
|
||||
st.HandleStreams(func(stream *transport.Stream) {
|
||||
@@ -613,11 +704,6 @@ var _ http.Handler = (*Server)(nil)
|
||||
//
|
||||
// conn is the *tls.Conn that's already been authenticated.
|
||||
func (s *Server) serveUsingHandler(conn net.Conn) {
|
||||
if !s.addConn(conn) {
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
defer s.removeConn(conn)
|
||||
h2s := &http2.Server{
|
||||
MaxConcurrentStreams: s.opts.maxConcurrentStreams,
|
||||
}
|
||||
@@ -651,13 +737,12 @@ func (s *Server) serveUsingHandler(conn net.Conn) {
|
||||
// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
|
||||
// and subject to change.
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
st, err := transport.NewServerHandlerTransport(w, r)
|
||||
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if !s.addConn(st) {
|
||||
st.Close()
|
||||
return
|
||||
}
|
||||
defer s.removeConn(st)
|
||||
@@ -687,9 +772,15 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
|
||||
func (s *Server) addConn(c io.Closer) bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.conns == nil || s.drain {
|
||||
if s.conns == nil {
|
||||
c.Close()
|
||||
return false
|
||||
}
|
||||
if s.drain {
|
||||
// Transport added after we drained our existing conns: drain it
|
||||
// immediately.
|
||||
c.(transport.ServerTransport).Drain()
|
||||
}
|
||||
s.conns[c] = true
|
||||
return true
|
||||
}
|
||||
@@ -703,42 +794,82 @@ func (s *Server) removeConn(c io.Closer) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
|
||||
var (
|
||||
cbuf *bytes.Buffer
|
||||
outPayload *stats.OutPayload
|
||||
)
|
||||
if cp != nil {
|
||||
cbuf = new(bytes.Buffer)
|
||||
// ChannelzMetric returns ServerInternalMetric of current server.
|
||||
// This is an EXPERIMENTAL API.
|
||||
func (s *Server) ChannelzMetric() *channelz.ServerInternalMetric {
|
||||
s.czmu.RLock()
|
||||
defer s.czmu.RUnlock()
|
||||
return &channelz.ServerInternalMetric{
|
||||
CallsStarted: s.callsStarted,
|
||||
CallsSucceeded: s.callsSucceeded,
|
||||
CallsFailed: s.callsFailed,
|
||||
LastCallStartedTimestamp: s.lastCallStartedTime,
|
||||
}
|
||||
if s.opts.statsHandler != nil {
|
||||
outPayload = &stats.OutPayload{}
|
||||
}
|
||||
hdr, data, err := encode(s.opts.codec, msg, cp, cbuf, outPayload)
|
||||
}
|
||||
|
||||
func (s *Server) incrCallsStarted() {
|
||||
s.czmu.Lock()
|
||||
s.callsStarted++
|
||||
s.lastCallStartedTime = time.Now()
|
||||
s.czmu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Server) incrCallsSucceeded() {
|
||||
s.czmu.Lock()
|
||||
s.callsSucceeded++
|
||||
s.czmu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Server) incrCallsFailed() {
|
||||
s.czmu.Lock()
|
||||
s.callsFailed++
|
||||
s.czmu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
||||
if err != nil {
|
||||
grpclog.Errorln("grpc: server failed to encode response: ", err)
|
||||
return err
|
||||
}
|
||||
if len(data) > s.opts.maxSendMessageSize {
|
||||
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize)
|
||||
compData, err := compress(data, cp, comp)
|
||||
if err != nil {
|
||||
grpclog.Errorln("grpc: server failed to compress response: ", err)
|
||||
return err
|
||||
}
|
||||
err = t.Write(stream, hdr, data, opts)
|
||||
if err == nil && outPayload != nil {
|
||||
outPayload.SentTime = time.Now()
|
||||
s.opts.statsHandler.HandleRPC(stream.Context(), outPayload)
|
||||
hdr, payload := msgHeader(data, compData)
|
||||
// TODO(dfawley): should we be checking len(data) instead?
|
||||
if len(payload) > s.opts.maxSendMessageSize {
|
||||
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
|
||||
}
|
||||
err = t.Write(stream, hdr, payload, opts)
|
||||
if err == nil && s.opts.statsHandler != nil {
|
||||
s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
s.incrCallsFailed()
|
||||
} else {
|
||||
s.incrCallsSucceeded()
|
||||
}
|
||||
}()
|
||||
}
|
||||
sh := s.opts.statsHandler
|
||||
if sh != nil {
|
||||
beginTime := time.Now()
|
||||
begin := &stats.Begin{
|
||||
BeginTime: time.Now(),
|
||||
BeginTime: beginTime,
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), begin)
|
||||
defer func() {
|
||||
end := &stats.End{
|
||||
BeginTime: beginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
@@ -758,10 +889,43 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
}
|
||||
}()
|
||||
}
|
||||
if s.opts.cp != nil {
|
||||
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
|
||||
stream.SetSendCompress(s.opts.cp.Type())
|
||||
|
||||
// comp and cp are used for compression. decomp and dc are used for
|
||||
// decompression. If comp and decomp are both set, they are the same;
|
||||
// however they are kept separate to ensure that at most one of the
|
||||
// compressor/decompressor variable pairs are set for use later.
|
||||
var comp, decomp encoding.Compressor
|
||||
var cp Compressor
|
||||
var dc Decompressor
|
||||
|
||||
// If dc is set and matches the stream's compression, use it. Otherwise, try
|
||||
// to find a matching registered compressor for decomp.
|
||||
if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
|
||||
dc = s.opts.dc
|
||||
} else if rc != "" && rc != encoding.Identity {
|
||||
decomp = encoding.GetCompressor(rc)
|
||||
if decomp == nil {
|
||||
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
|
||||
t.WriteStatus(stream, st)
|
||||
return st.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// If cp is set, use it. Otherwise, attempt to compress the response using
|
||||
// the incoming message compression method.
|
||||
//
|
||||
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
|
||||
if s.opts.cp != nil {
|
||||
cp = s.opts.cp
|
||||
stream.SetSendCompress(cp.Type())
|
||||
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
|
||||
// Legacy compressor not specified; attempt to respond with same encoding.
|
||||
comp = encoding.GetCompressor(rc)
|
||||
if comp != nil {
|
||||
stream.SetSendCompress(rc)
|
||||
}
|
||||
}
|
||||
|
||||
p := &parser{r: stream}
|
||||
pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
|
||||
if err == io.EOF {
|
||||
@@ -769,7 +933,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
return err
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
||||
err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
||||
}
|
||||
if err != nil {
|
||||
if st, ok := status.FromError(err); ok {
|
||||
@@ -790,19 +954,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
|
||||
if st, ok := status.FromError(err); ok {
|
||||
if channelz.IsOn() {
|
||||
t.IncrMsgRecv()
|
||||
}
|
||||
if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
|
||||
if e := t.WriteStatus(stream, st); e != nil {
|
||||
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil {
|
||||
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
|
||||
}
|
||||
|
||||
// TODO checkRecvPayload always return RPC error. Add a return here if necessary.
|
||||
return st.Err()
|
||||
}
|
||||
var inPayload *stats.InPayload
|
||||
if sh != nil {
|
||||
@@ -816,9 +975,17 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
}
|
||||
if pf == compressionMade {
|
||||
var err error
|
||||
req, err = s.opts.dc.Do(bytes.NewReader(req))
|
||||
if dc != nil {
|
||||
req, err = dc.Do(bytes.NewReader(req))
|
||||
if err != nil {
|
||||
return Errorf(codes.Internal, err.Error())
|
||||
return status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
} else {
|
||||
tmp, _ := decomp.Decompress(bytes.NewReader(req))
|
||||
req, err = ioutil.ReadAll(tmp)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(req) > s.opts.maxReceiveMessageSize {
|
||||
@@ -826,7 +993,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
// java implementation.
|
||||
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
|
||||
}
|
||||
if err := s.opts.codec.Unmarshal(req, v); err != nil {
|
||||
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
||||
}
|
||||
if inPayload != nil {
|
||||
@@ -840,12 +1007,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
}
|
||||
return nil
|
||||
}
|
||||
reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
|
||||
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||
reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
|
||||
if appErr != nil {
|
||||
appStatus, ok := status.FromError(appErr)
|
||||
if !ok {
|
||||
// Convert appErr if it is not a grpc status error.
|
||||
appErr = status.Error(convertCode(appErr), appErr.Error())
|
||||
appErr = status.Error(codes.Unknown, appErr.Error())
|
||||
appStatus, _ = status.FromError(appErr)
|
||||
}
|
||||
if trInfo != nil {
|
||||
@@ -864,7 +1032,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
|
||||
|
||||
if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
|
||||
if err == io.EOF {
|
||||
// The entire stream is done (for unary RPC only).
|
||||
return err
|
||||
@@ -887,6 +1056,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
}
|
||||
return err
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
t.IncrMsgSent()
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
|
||||
}
|
||||
@@ -897,14 +1069,26 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
}
|
||||
|
||||
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
s.incrCallsFailed()
|
||||
} else {
|
||||
s.incrCallsSucceeded()
|
||||
}
|
||||
}()
|
||||
}
|
||||
sh := s.opts.statsHandler
|
||||
if sh != nil {
|
||||
beginTime := time.Now()
|
||||
begin := &stats.Begin{
|
||||
BeginTime: time.Now(),
|
||||
BeginTime: beginTime,
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), begin)
|
||||
defer func() {
|
||||
end := &stats.End{
|
||||
BeginTime: beginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
@@ -913,21 +1097,47 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
}()
|
||||
}
|
||||
if s.opts.cp != nil {
|
||||
stream.SetSendCompress(s.opts.cp.Type())
|
||||
}
|
||||
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||
ss := &serverStream{
|
||||
ctx: ctx,
|
||||
t: t,
|
||||
s: stream,
|
||||
p: &parser{r: stream},
|
||||
codec: s.opts.codec,
|
||||
cp: s.opts.cp,
|
||||
dc: s.opts.dc,
|
||||
codec: s.getCodec(stream.ContentSubtype()),
|
||||
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
||||
maxSendMessageSize: s.opts.maxSendMessageSize,
|
||||
trInfo: trInfo,
|
||||
statsHandler: sh,
|
||||
}
|
||||
|
||||
// If dc is set and matches the stream's compression, use it. Otherwise, try
|
||||
// to find a matching registered compressor for decomp.
|
||||
if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
|
||||
ss.dc = s.opts.dc
|
||||
} else if rc != "" && rc != encoding.Identity {
|
||||
ss.decomp = encoding.GetCompressor(rc)
|
||||
if ss.decomp == nil {
|
||||
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
|
||||
t.WriteStatus(ss.s, st)
|
||||
return st.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// If cp is set, use it. Otherwise, attempt to compress the response using
|
||||
// the incoming message compression method.
|
||||
//
|
||||
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
|
||||
if s.opts.cp != nil {
|
||||
ss.cp = s.opts.cp
|
||||
stream.SetSendCompress(s.opts.cp.Type())
|
||||
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
|
||||
// Legacy compressor not specified; attempt to respond with same encoding.
|
||||
ss.comp = encoding.GetCompressor(rc)
|
||||
if ss.comp != nil {
|
||||
stream.SetSendCompress(rc)
|
||||
}
|
||||
}
|
||||
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
defer func() {
|
||||
@@ -963,7 +1173,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
case transport.StreamError:
|
||||
appStatus = status.New(err.Code, err.Desc)
|
||||
default:
|
||||
appStatus = status.New(convertCode(appErr), appErr.Error())
|
||||
appStatus = status.New(codes.Unknown, appErr.Error())
|
||||
}
|
||||
appErr = appStatus.Err()
|
||||
}
|
||||
@@ -983,7 +1193,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
return t.WriteStatus(ss.s, status.New(codes.OK, ""))
|
||||
|
||||
}
|
||||
|
||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
||||
@@ -1065,12 +1274,65 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
}
|
||||
}
|
||||
|
||||
// The key to save ServerTransportStream in the context.
|
||||
type streamKey struct{}
|
||||
|
||||
// NewContextWithServerTransportStream creates a new context from ctx and
|
||||
// attaches stream to it.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
|
||||
return context.WithValue(ctx, streamKey{}, stream)
|
||||
}
|
||||
|
||||
// ServerTransportStream is a minimal interface that a transport stream must
|
||||
// implement. This can be used to mock an actual transport stream for tests of
|
||||
// handler code that use, for example, grpc.SetHeader (which requires some
|
||||
// stream to be in context).
|
||||
//
|
||||
// See also NewContextWithServerTransportStream.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type ServerTransportStream interface {
|
||||
Method() string
|
||||
SetHeader(md metadata.MD) error
|
||||
SendHeader(md metadata.MD) error
|
||||
SetTrailer(md metadata.MD) error
|
||||
}
|
||||
|
||||
// ServerTransportStreamFromContext returns the ServerTransportStream saved in
|
||||
// ctx. Returns nil if the given context has no stream associated with it
|
||||
// (which implies it is not an RPC invocation context).
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
|
||||
s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
|
||||
return s
|
||||
}
|
||||
|
||||
// Stop stops the gRPC server. It immediately closes all open
|
||||
// connections and listeners.
|
||||
// It cancels all active RPCs on the server side and the corresponding
|
||||
// pending RPCs on the client side will get notified by connection
|
||||
// errors.
|
||||
func (s *Server) Stop() {
|
||||
s.quitOnce.Do(func() {
|
||||
close(s.quit)
|
||||
})
|
||||
|
||||
defer func() {
|
||||
s.serveWG.Wait()
|
||||
s.doneOnce.Do(func() {
|
||||
close(s.done)
|
||||
})
|
||||
}()
|
||||
|
||||
s.channelzRemoveOnce.Do(func() {
|
||||
if channelz.IsOn() {
|
||||
channelz.RemoveEntry(s.channelzID)
|
||||
}
|
||||
})
|
||||
|
||||
s.mu.Lock()
|
||||
listeners := s.lis
|
||||
s.lis = nil
|
||||
@@ -1088,7 +1350,6 @@ func (s *Server) Stop() {
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.cancel()
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
@@ -1100,22 +1361,44 @@ func (s *Server) Stop() {
|
||||
// accepting new connections and RPCs and blocks until all the pending RPCs are
|
||||
// finished.
|
||||
func (s *Server) GracefulStop() {
|
||||
s.quitOnce.Do(func() {
|
||||
close(s.quit)
|
||||
})
|
||||
|
||||
defer func() {
|
||||
s.doneOnce.Do(func() {
|
||||
close(s.done)
|
||||
})
|
||||
}()
|
||||
|
||||
s.channelzRemoveOnce.Do(func() {
|
||||
if channelz.IsOn() {
|
||||
channelz.RemoveEntry(s.channelzID)
|
||||
}
|
||||
})
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.conns == nil {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
for lis := range s.lis {
|
||||
lis.Close()
|
||||
}
|
||||
s.lis = nil
|
||||
s.cancel()
|
||||
if !s.drain {
|
||||
for c := range s.conns {
|
||||
c.(transport.ServerTransport).Drain()
|
||||
}
|
||||
s.drain = true
|
||||
}
|
||||
|
||||
// Wait for serving threads to be ready to exit. Only then can we be sure no
|
||||
// new conns will be created.
|
||||
s.mu.Unlock()
|
||||
s.serveWG.Wait()
|
||||
s.mu.Lock()
|
||||
|
||||
for len(s.conns) != 0 {
|
||||
s.cv.Wait()
|
||||
}
|
||||
@@ -1124,26 +1407,29 @@ func (s *Server) GracefulStop() {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.TestingCloseConns = func(arg interface{}) {
|
||||
arg.(*Server).testingCloseConns()
|
||||
}
|
||||
internal.TestingUseHandlerImpl = func(arg interface{}) {
|
||||
arg.(*Server).opts.useHandlerImpl = true
|
||||
}
|
||||
}
|
||||
|
||||
// testingCloseConns closes all existing transports but keeps s.lis
|
||||
// accepting new connections.
|
||||
func (s *Server) testingCloseConns() {
|
||||
s.mu.Lock()
|
||||
for c := range s.conns {
|
||||
c.Close()
|
||||
delete(s.conns, c)
|
||||
// contentSubtype must be lowercase
|
||||
// cannot return nil
|
||||
func (s *Server) getCodec(contentSubtype string) baseCodec {
|
||||
if s.opts.codec != nil {
|
||||
return s.opts.codec
|
||||
}
|
||||
s.mu.Unlock()
|
||||
if contentSubtype == "" {
|
||||
return encoding.GetCodec(proto.Name)
|
||||
}
|
||||
codec := encoding.GetCodec(contentSubtype)
|
||||
if codec == nil {
|
||||
return encoding.GetCodec(proto.Name)
|
||||
}
|
||||
return codec
|
||||
}
|
||||
|
||||
// SetHeader sets the header metadata.
|
||||
@@ -1156,9 +1442,9 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
stream, ok := transport.StreamFromContext(ctx)
|
||||
if !ok {
|
||||
return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||
stream := ServerTransportStreamFromContext(ctx)
|
||||
if stream == nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||
}
|
||||
return stream.SetHeader(md)
|
||||
}
|
||||
@@ -1166,15 +1452,11 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
|
||||
// SendHeader sends header metadata. It may be called at most once.
|
||||
// The provided md and headers set by SetHeader() will be sent.
|
||||
func SendHeader(ctx context.Context, md metadata.MD) error {
|
||||
stream, ok := transport.StreamFromContext(ctx)
|
||||
if !ok {
|
||||
return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||
stream := ServerTransportStreamFromContext(ctx)
|
||||
if stream == nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||
}
|
||||
t := stream.ServerTransport()
|
||||
if t == nil {
|
||||
grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream)
|
||||
}
|
||||
if err := t.WriteHeader(stream, md); err != nil {
|
||||
if err := stream.SendHeader(md); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
return nil
|
||||
@@ -1186,9 +1468,19 @@ func SetTrailer(ctx context.Context, md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
stream, ok := transport.StreamFromContext(ctx)
|
||||
if !ok {
|
||||
return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||
stream := ServerTransportStreamFromContext(ctx)
|
||||
if stream == nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||
}
|
||||
return stream.SetTrailer(md)
|
||||
}
|
||||
|
||||
// Method returns the method string for the server context. The returned
|
||||
// string is in the format of "/service/method".
|
||||
func Method(ctx context.Context) (string, bool) {
|
||||
s := ServerTransportStreamFromContext(ctx)
|
||||
if s == nil {
|
||||
return "", false
|
||||
}
|
||||
return s.Method(), true
|
||||
}
|
||||
|
||||
233
vendor/google.golang.org/grpc/service_config.go
generated
vendored
Normal file
233
vendor/google.golang.org/grpc/service_config.go
generated
vendored
Normal file
@@ -0,0 +1,233 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const maxInt = int(^uint(0) >> 1)
|
||||
|
||||
// MethodConfig defines the configuration recommended by the service providers for a
|
||||
// particular method.
|
||||
//
|
||||
// Deprecated: Users should not use this struct. Service config should be received
|
||||
// through name resolver, as specified here
|
||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
||||
type MethodConfig struct {
|
||||
// WaitForReady indicates whether RPCs sent to this method should wait until
|
||||
// the connection is ready by default (!failfast). The value specified via the
|
||||
// gRPC client API will override the value set here.
|
||||
WaitForReady *bool
|
||||
// Timeout is the default timeout for RPCs sent to this method. The actual
|
||||
// deadline used will be the minimum of the value specified here and the value
|
||||
// set by the application via the gRPC client API. If either one is not set,
|
||||
// then the other will be used. If neither is set, then the RPC has no deadline.
|
||||
Timeout *time.Duration
|
||||
// MaxReqSize is the maximum allowed payload size for an individual request in a
|
||||
// stream (client->server) in bytes. The size which is measured is the serialized
|
||||
// payload after per-message compression (but before stream compression) in bytes.
|
||||
// The actual value used is the minimum of the value specified here and the value set
|
||||
// by the application via the gRPC client API. If either one is not set, then the other
|
||||
// will be used. If neither is set, then the built-in default is used.
|
||||
MaxReqSize *int
|
||||
// MaxRespSize is the maximum allowed payload size for an individual response in a
|
||||
// stream (server->client) in bytes.
|
||||
MaxRespSize *int
|
||||
}
|
||||
|
||||
// ServiceConfig is provided by the service provider and contains parameters for how
|
||||
// clients that connect to the service should behave.
|
||||
//
|
||||
// Deprecated: Users should not use this struct. Service config should be received
|
||||
// through name resolver, as specified here
|
||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
||||
type ServiceConfig struct {
|
||||
// LB is the load balancer the service providers recommends. The balancer specified
|
||||
// via grpc.WithBalancer will override this.
|
||||
LB *string
|
||||
// Methods contains a map for the methods in this service.
|
||||
// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
|
||||
// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
|
||||
// Otherwise, the method has no MethodConfig to use.
|
||||
Methods map[string]MethodConfig
|
||||
|
||||
stickinessMetadataKey *string
|
||||
}
|
||||
|
||||
func parseDuration(s *string) (*time.Duration, error) {
|
||||
if s == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if !strings.HasSuffix(*s, "s") {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
|
||||
if len(ss) > 2 {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
// hasDigits is set if either the whole or fractional part of the number is
|
||||
// present, since both are optional but one is required.
|
||||
hasDigits := false
|
||||
var d time.Duration
|
||||
if len(ss[0]) > 0 {
|
||||
i, err := strconv.ParseInt(ss[0], 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
||||
}
|
||||
d = time.Duration(i) * time.Second
|
||||
hasDigits = true
|
||||
}
|
||||
if len(ss) == 2 && len(ss[1]) > 0 {
|
||||
if len(ss[1]) > 9 {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
f, err := strconv.ParseInt(ss[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
||||
}
|
||||
for i := 9; i > len(ss[1]); i-- {
|
||||
f *= 10
|
||||
}
|
||||
d += time.Duration(f)
|
||||
hasDigits = true
|
||||
}
|
||||
if !hasDigits {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
|
||||
return &d, nil
|
||||
}
|
||||
|
||||
type jsonName struct {
|
||||
Service *string
|
||||
Method *string
|
||||
}
|
||||
|
||||
func (j jsonName) generatePath() (string, bool) {
|
||||
if j.Service == nil {
|
||||
return "", false
|
||||
}
|
||||
res := "/" + *j.Service + "/"
|
||||
if j.Method != nil {
|
||||
res += *j.Method
|
||||
}
|
||||
return res, true
|
||||
}
|
||||
|
||||
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
|
||||
type jsonMC struct {
|
||||
Name *[]jsonName
|
||||
WaitForReady *bool
|
||||
Timeout *string
|
||||
MaxRequestMessageBytes *int64
|
||||
MaxResponseMessageBytes *int64
|
||||
}
|
||||
|
||||
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
|
||||
type jsonSC struct {
|
||||
LoadBalancingPolicy *string
|
||||
StickinessMetadataKey *string
|
||||
MethodConfig *[]jsonMC
|
||||
}
|
||||
|
||||
func parseServiceConfig(js string) (ServiceConfig, error) {
|
||||
var rsc jsonSC
|
||||
err := json.Unmarshal([]byte(js), &rsc)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return ServiceConfig{}, err
|
||||
}
|
||||
sc := ServiceConfig{
|
||||
LB: rsc.LoadBalancingPolicy,
|
||||
Methods: make(map[string]MethodConfig),
|
||||
|
||||
stickinessMetadataKey: rsc.StickinessMetadataKey,
|
||||
}
|
||||
if rsc.MethodConfig == nil {
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
for _, m := range *rsc.MethodConfig {
|
||||
if m.Name == nil {
|
||||
continue
|
||||
}
|
||||
d, err := parseDuration(m.Timeout)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return ServiceConfig{}, err
|
||||
}
|
||||
|
||||
mc := MethodConfig{
|
||||
WaitForReady: m.WaitForReady,
|
||||
Timeout: d,
|
||||
}
|
||||
if m.MaxRequestMessageBytes != nil {
|
||||
if *m.MaxRequestMessageBytes > int64(maxInt) {
|
||||
mc.MaxReqSize = newInt(maxInt)
|
||||
} else {
|
||||
mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes))
|
||||
}
|
||||
}
|
||||
if m.MaxResponseMessageBytes != nil {
|
||||
if *m.MaxResponseMessageBytes > int64(maxInt) {
|
||||
mc.MaxRespSize = newInt(maxInt)
|
||||
} else {
|
||||
mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
|
||||
}
|
||||
}
|
||||
for _, n := range *m.Name {
|
||||
if path, valid := n.generatePath(); valid {
|
||||
sc.Methods[path] = mc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
func min(a, b *int) *int {
|
||||
if *a < *b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
|
||||
if mcMax == nil && doptMax == nil {
|
||||
return &defaultVal
|
||||
}
|
||||
if mcMax != nil && doptMax != nil {
|
||||
return min(mcMax, doptMax)
|
||||
}
|
||||
if mcMax != nil {
|
||||
return mcMax
|
||||
}
|
||||
return doptMax
|
||||
}
|
||||
|
||||
func newInt(b int) *int {
|
||||
return &b
|
||||
}
|
||||
2
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
2
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
@@ -169,6 +169,8 @@ func (s *OutTrailer) isRPCStats() {}
|
||||
type End struct {
|
||||
// Client is true if this End is from client side.
|
||||
Client bool
|
||||
// BeginTime is the time when the RPC began.
|
||||
BeginTime time.Time
|
||||
// EndTime is the time when the RPC ends.
|
||||
EndTime time.Time
|
||||
// Error is the error the RPC ended with. It is an error generated from
|
||||
|
||||
7
vendor/google.golang.org/grpc/status/BUILD
generated
vendored
7
vendor/google.golang.org/grpc/status/BUILD
generated
vendored
@@ -2,13 +2,18 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["status.go"],
|
||||
srcs = [
|
||||
"go16.go",
|
||||
"go17.go",
|
||||
"status.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/google.golang.org/grpc/status",
|
||||
importpath = "google.golang.org/grpc/status",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/ptypes:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/genproto/googleapis/rpc/status:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
],
|
||||
|
||||
42
vendor/google.golang.org/grpc/status/go16.go
generated
vendored
Normal file
42
vendor/google.golang.org/grpc/status/go16.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// +build go1.6,!go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// FromContextError converts a context error into a Status. It returns a
|
||||
// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
|
||||
// non-nil and not a context error.
|
||||
func FromContextError(err error) *Status {
|
||||
switch err {
|
||||
case nil:
|
||||
return New(codes.OK, "")
|
||||
case context.DeadlineExceeded:
|
||||
return New(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return New(codes.Canceled, err.Error())
|
||||
default:
|
||||
return New(codes.Unknown, err.Error())
|
||||
}
|
||||
}
|
||||
44
vendor/google.golang.org/grpc/status/go17.go
generated
vendored
Normal file
44
vendor/google.golang.org/grpc/status/go17.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// +build go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
netctx "golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// FromContextError converts a context error into a Status. It returns a
|
||||
// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
|
||||
// non-nil and not a context error.
|
||||
func FromContextError(err error) *Status {
|
||||
switch err {
|
||||
case nil:
|
||||
return New(codes.OK, "")
|
||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
||||
return New(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled, netctx.Canceled:
|
||||
return New(codes.Canceled, err.Error())
|
||||
default:
|
||||
return New(codes.Unknown, err.Error())
|
||||
}
|
||||
}
|
||||
31
vendor/google.golang.org/grpc/status/status.go
generated
vendored
31
vendor/google.golang.org/grpc/status/status.go
generated
vendored
@@ -46,7 +46,7 @@ func (se *statusError) Error() string {
|
||||
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
|
||||
}
|
||||
|
||||
func (se *statusError) status() *Status {
|
||||
func (se *statusError) GRPCStatus() *Status {
|
||||
return &Status{s: (*spb.Status)(se)}
|
||||
}
|
||||
|
||||
@@ -120,15 +120,23 @@ func FromProto(s *spb.Status) *Status {
|
||||
}
|
||||
|
||||
// FromError returns a Status representing err if it was produced from this
|
||||
// package, otherwise it returns nil, false.
|
||||
// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
|
||||
// Status is returned with codes.Unknown and the original error message.
|
||||
func FromError(err error) (s *Status, ok bool) {
|
||||
if err == nil {
|
||||
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
|
||||
}
|
||||
if s, ok := err.(*statusError); ok {
|
||||
return s.status(), true
|
||||
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
|
||||
return se.GRPCStatus(), true
|
||||
}
|
||||
return nil, false
|
||||
return New(codes.Unknown, err.Error()), false
|
||||
}
|
||||
|
||||
// Convert is a convenience function which removes the need to handle the
|
||||
// boolean return value from FromError.
|
||||
func Convert(err error) *Status {
|
||||
s, _ := FromError(err)
|
||||
return s
|
||||
}
|
||||
|
||||
// WithDetails returns a new status with the provided details messages appended to the status.
|
||||
@@ -166,3 +174,16 @@ func (s *Status) Details() []interface{} {
|
||||
}
|
||||
return details
|
||||
}
|
||||
|
||||
// Code returns the Code of the error if it is a Status error, codes.OK if err
|
||||
// is nil, or codes.Unknown otherwise.
|
||||
func Code(err error) codes.Code {
|
||||
// Don't use FromError to avoid allocation of OK status.
|
||||
if err == nil {
|
||||
return codes.OK
|
||||
}
|
||||
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
|
||||
return se.GRPCStatus().Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
97
vendor/google.golang.org/grpc/stickiness_linkedmap.go
generated
vendored
Normal file
97
vendor/google.golang.org/grpc/stickiness_linkedmap.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
)
|
||||
|
||||
type linkedMapKVPair struct {
|
||||
key string
|
||||
value *stickyStoreEntry
|
||||
}
|
||||
|
||||
// linkedMap is an implementation of a map that supports removing the oldest
|
||||
// entry.
|
||||
//
|
||||
// linkedMap is NOT thread safe.
|
||||
//
|
||||
// It's for use of stickiness only!
|
||||
type linkedMap struct {
|
||||
m map[string]*list.Element
|
||||
l *list.List // Head of the list is the oldest element.
|
||||
}
|
||||
|
||||
// newLinkedMap returns a new LinkedMap.
|
||||
func newLinkedMap() *linkedMap {
|
||||
return &linkedMap{
|
||||
m: make(map[string]*list.Element),
|
||||
l: list.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// put adds entry (key, value) to the map. Existing key will be overridden.
|
||||
func (m *linkedMap) put(key string, value *stickyStoreEntry) {
|
||||
if oldE, ok := m.m[key]; ok {
|
||||
// Remove existing entry.
|
||||
m.l.Remove(oldE)
|
||||
}
|
||||
e := m.l.PushBack(&linkedMapKVPair{key: key, value: value})
|
||||
m.m[key] = e
|
||||
}
|
||||
|
||||
// get returns the value of the given key.
|
||||
func (m *linkedMap) get(key string) (*stickyStoreEntry, bool) {
|
||||
e, ok := m.m[key]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
m.l.MoveToBack(e)
|
||||
return e.Value.(*linkedMapKVPair).value, true
|
||||
}
|
||||
|
||||
// remove removes key from the map, and returns the value. The map is not
|
||||
// modified if key is not in the map.
|
||||
func (m *linkedMap) remove(key string) (*stickyStoreEntry, bool) {
|
||||
e, ok := m.m[key]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
delete(m.m, key)
|
||||
m.l.Remove(e)
|
||||
return e.Value.(*linkedMapKVPair).value, true
|
||||
}
|
||||
|
||||
// len returns the len of the map.
|
||||
func (m *linkedMap) len() int {
|
||||
return len(m.m)
|
||||
}
|
||||
|
||||
// clear removes all elements from the map.
|
||||
func (m *linkedMap) clear() {
|
||||
m.m = make(map[string]*list.Element)
|
||||
m.l = list.New()
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest key from the map.
|
||||
func (m *linkedMap) removeOldest() {
|
||||
e := m.l.Front()
|
||||
m.l.Remove(e)
|
||||
delete(m.m, e.Value.(*linkedMapKVPair).key)
|
||||
}
|
||||
644
vendor/google.golang.org/grpc/stream.go
generated
vendored
644
vendor/google.golang.org/grpc/stream.go
generated
vendored
@@ -19,7 +19,6 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
@@ -29,15 +28,19 @@ import (
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// StreamHandler defines the handler called by gRPC server to complete the
|
||||
// execution of a streaming RPC.
|
||||
// execution of a streaming RPC. If a StreamHandler returns an error, it
|
||||
// should be produced by the status package, or else gRPC will use
|
||||
// codes.Unknown as the status code and err.Error() as the status message
|
||||
// of the RPC.
|
||||
type StreamHandler func(srv interface{}, stream ServerStream) error
|
||||
|
||||
// StreamDesc represents a streaming RPC service's method specification.
|
||||
@@ -51,6 +54,8 @@ type StreamDesc struct {
|
||||
}
|
||||
|
||||
// Stream defines the common interface a client or server stream has to satisfy.
|
||||
//
|
||||
// All errors returned from Stream are compatible with the status package.
|
||||
type Stream interface {
|
||||
// Context returns the context for this stream.
|
||||
Context() context.Context
|
||||
@@ -89,43 +94,78 @@ type ClientStream interface {
|
||||
// Stream.SendMsg() may return a non-nil error when something wrong happens sending
|
||||
// the request. The returned error indicates the status of this sending, not the final
|
||||
// status of the RPC.
|
||||
// Always call Stream.RecvMsg() to get the final status if you care about the status of
|
||||
// the RPC.
|
||||
//
|
||||
// Always call Stream.RecvMsg() to drain the stream and get the final
|
||||
// status, otherwise there could be leaked resources.
|
||||
Stream
|
||||
}
|
||||
|
||||
// NewClientStream creates a new Stream for the client side. This is called
|
||||
// by generated code.
|
||||
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||
// NewStream creates a new Stream for the client side. This is typically
|
||||
// called by generated code. ctx is used for the lifetime of the stream.
|
||||
//
|
||||
// To ensure resources are not leaked due to the stream returned, one of the following
|
||||
// actions must be performed:
|
||||
//
|
||||
// 1. Call Close on the ClientConn.
|
||||
// 2. Cancel the context provided.
|
||||
// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
|
||||
// client-streaming RPC, for instance, might use the helper function
|
||||
// CloseAndRecv (note that CloseSend does not Recv, therefore is not
|
||||
// guaranteed to release all resources).
|
||||
// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
|
||||
//
|
||||
// If none of the above happen, a goroutine and a context will be leaked, and grpc
|
||||
// will not call the optionally-configured stats handler with a stats.End message.
|
||||
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||
// allow interceptor to see all applicable call options, which means those
|
||||
// configured as defaults from dial option as well as per-call options
|
||||
opts = combine(cc.dopts.callOptions, opts)
|
||||
|
||||
if cc.dopts.streamInt != nil {
|
||||
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
|
||||
}
|
||||
return newClientStream(ctx, desc, cc, method, opts...)
|
||||
}
|
||||
|
||||
// NewClientStream is a wrapper for ClientConn.NewStream.
|
||||
//
|
||||
// DEPRECATED: Use ClientConn.NewStream instead.
|
||||
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
|
||||
return cc.NewStream(ctx, desc, method, opts...)
|
||||
}
|
||||
|
||||
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||
var (
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
done func(balancer.DoneInfo)
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
if channelz.IsOn() {
|
||||
cc.incrCallsStarted()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cc.incrCallsFailed()
|
||||
}
|
||||
}()
|
||||
}
|
||||
c := defaultCallInfo()
|
||||
mc := cc.GetMethodConfig(method)
|
||||
if mc.WaitForReady != nil {
|
||||
c.failFast = !*mc.WaitForReady
|
||||
}
|
||||
|
||||
if mc.Timeout != nil {
|
||||
// Possible context leak:
|
||||
// The cancel function for the child context we create will only be called
|
||||
// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
|
||||
// an error is generated by SendMsg.
|
||||
// https://github.com/grpc/grpc-go/issues/1818.
|
||||
var cancel context.CancelFunc
|
||||
if mc.Timeout != nil && *mc.Timeout >= 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
opts = append(cc.dopts.callOptions, opts...)
|
||||
for _, o := range opts {
|
||||
if err := o.before(c); err != nil {
|
||||
return nil, toRPCErr(err)
|
||||
@@ -133,6 +173,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||
}
|
||||
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
|
||||
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
|
||||
if err := setCallInfoCodec(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
@@ -142,9 +185,26 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||
// If it's client streaming, the user may never send a request or send it any
|
||||
// time soon, so we ask the transport to flush the header.
|
||||
Flush: desc.ClientStreams,
|
||||
ContentSubtype: c.contentSubtype,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
|
||||
// Set our outgoing compression according to the UseCompressor CallOption, if
|
||||
// set. In that case, also find the compressor from the encoding package.
|
||||
// Otherwise, use the compressor configured by the WithCompressor DialOption,
|
||||
// if set.
|
||||
var cp Compressor
|
||||
var comp encoding.Compressor
|
||||
if ct := c.compressorType; ct != "" {
|
||||
callHdr.SendCompress = ct
|
||||
if ct != encoding.Identity {
|
||||
comp = encoding.GetCompressor(ct)
|
||||
if comp == nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
|
||||
}
|
||||
}
|
||||
} else if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
cp = cc.dopts.cp
|
||||
}
|
||||
if c.creds != nil {
|
||||
callHdr.Creds = c.creds
|
||||
@@ -170,11 +230,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||
}
|
||||
ctx = newContextWithRPCInfo(ctx, c.failFast)
|
||||
sh := cc.dopts.copts.StatsHandler
|
||||
var beginTime time.Time
|
||||
if sh != nil {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
||||
beginTime = time.Now()
|
||||
begin := &stats.Begin{
|
||||
Client: true,
|
||||
BeginTime: time.Now(),
|
||||
BeginTime: beginTime,
|
||||
FailFast: c.failFast,
|
||||
}
|
||||
sh.HandleRPC(ctx, begin)
|
||||
@@ -184,94 +246,89 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||
end := &stats.End{
|
||||
Client: true,
|
||||
Error: err,
|
||||
BeginTime: beginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
sh.HandleRPC(ctx, end)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var (
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
done func(balancer.DoneInfo)
|
||||
)
|
||||
for {
|
||||
// Check to make sure the context has expired. This will prevent us from
|
||||
// looping forever if an error occurs for wait-for-ready RPCs where no data
|
||||
// is sent on the wire.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, toRPCErr(ctx.Err())
|
||||
default:
|
||||
}
|
||||
|
||||
t, done, err = cc.getTransport(ctx, c.failFast)
|
||||
if err != nil {
|
||||
// TODO(zhaoq): Probably revisit the error handling.
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return nil, err
|
||||
}
|
||||
if err == errConnClosing || err == errConnUnavailable {
|
||||
if c.failFast {
|
||||
return nil, Errorf(codes.Unavailable, "%v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// All the other errors are treated as Internal errors.
|
||||
return nil, Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
s, err = t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok && done != nil {
|
||||
// If error is connection error, transport was sending data on wire,
|
||||
// and we are not sure if anything has been sent on wire.
|
||||
// If error is not connection error, we are sure nothing has been sent.
|
||||
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
|
||||
}
|
||||
if done != nil {
|
||||
done(balancer.DoneInfo{Err: err})
|
||||
done = nil
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
|
||||
// In the event of any error from NewStream, we never attempted to write
|
||||
// anything to the wire, so we can retry indefinitely for non-fail-fast
|
||||
// RPCs.
|
||||
if !c.failFast {
|
||||
continue
|
||||
}
|
||||
return nil, toRPCErr(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
// Set callInfo.peer object from stream's context.
|
||||
if peer, ok := peer.FromContext(s.Context()); ok {
|
||||
c.peer = peer
|
||||
}
|
||||
|
||||
cs := &clientStream{
|
||||
opts: opts,
|
||||
c: c,
|
||||
cc: cc,
|
||||
desc: desc,
|
||||
codec: cc.dopts.codec,
|
||||
cp: cc.dopts.cp,
|
||||
dc: cc.dopts.dc,
|
||||
codec: c.codec,
|
||||
cp: cp,
|
||||
comp: comp,
|
||||
cancel: cancel,
|
||||
|
||||
done: done,
|
||||
attempt: &csAttempt{
|
||||
t: t,
|
||||
s: s,
|
||||
p: &parser{r: s},
|
||||
|
||||
tracing: EnableTracing,
|
||||
done: done,
|
||||
dc: cc.dopts.dc,
|
||||
ctx: ctx,
|
||||
trInfo: trInfo,
|
||||
|
||||
statsCtx: ctx,
|
||||
statsHandler: cc.dopts.copts.StatsHandler,
|
||||
statsHandler: sh,
|
||||
beginTime: beginTime,
|
||||
},
|
||||
}
|
||||
// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
|
||||
// when there is no pending I/O operations on this stream.
|
||||
cs.c.stream = cs
|
||||
cs.attempt.cs = cs
|
||||
if desc != unaryStreamDesc {
|
||||
// Listen on cc and stream contexts to cleanup when the user closes the
|
||||
// ClientConn or cancels the stream context. In all other cases, an error
|
||||
// should already be injected into the recv buffer by the transport, which
|
||||
// the client will eventually receive, and then we will cancel the stream's
|
||||
// context in clientStream.finish.
|
||||
go func() {
|
||||
select {
|
||||
case <-t.Error():
|
||||
// Incur transport error, simply exit.
|
||||
case <-cc.ctx.Done():
|
||||
cs.finish(ErrClientConnClosing)
|
||||
cs.closeTransportStream(ErrClientConnClosing)
|
||||
case <-s.Done():
|
||||
// TODO: The trace of the RPC is terminated here when there is no pending
|
||||
// I/O, which is probably not the optimal solution.
|
||||
cs.finish(s.Status().Err())
|
||||
cs.closeTransportStream(nil)
|
||||
case <-s.GoAway():
|
||||
cs.finish(errConnDrain)
|
||||
cs.closeTransportStream(errConnDrain)
|
||||
case <-s.Context().Done():
|
||||
err := s.Context().Err()
|
||||
cs.finish(err)
|
||||
cs.closeTransportStream(transport.ContextErr(err))
|
||||
case <-ctx.Done():
|
||||
cs.finish(toRPCErr(ctx.Err()))
|
||||
}
|
||||
}()
|
||||
}
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
@@ -279,244 +336,292 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||
type clientStream struct {
|
||||
opts []CallOption
|
||||
c *callInfo
|
||||
cc *ClientConn
|
||||
desc *StreamDesc
|
||||
|
||||
codec baseCodec
|
||||
cp Compressor
|
||||
comp encoding.Compressor
|
||||
|
||||
cancel context.CancelFunc // cancels all attempts
|
||||
|
||||
sentLast bool // sent an end stream
|
||||
|
||||
mu sync.Mutex // guards finished
|
||||
finished bool // TODO: replace with atomic cmpxchg or sync.Once?
|
||||
|
||||
attempt *csAttempt // the active client stream attempt
|
||||
// TODO(hedging): hedging will have multiple attempts simultaneously.
|
||||
}
|
||||
|
||||
// csAttempt implements a single transport stream attempt within a
|
||||
// clientStream.
|
||||
type csAttempt struct {
|
||||
cs *clientStream
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
desc *StreamDesc
|
||||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
cancel context.CancelFunc
|
||||
|
||||
tracing bool // set to EnableTracing when the clientStream is created.
|
||||
|
||||
mu sync.Mutex
|
||||
done func(balancer.DoneInfo)
|
||||
closed bool
|
||||
finished bool
|
||||
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
|
||||
// and is set to nil when the clientStream's finish method is called.
|
||||
|
||||
dc Decompressor
|
||||
decomp encoding.Compressor
|
||||
decompSet bool
|
||||
|
||||
ctx context.Context // the application's context, wrapped by stats/tracing
|
||||
|
||||
mu sync.Mutex // guards trInfo.tr
|
||||
// trInfo.tr is set when created (if EnableTracing is true),
|
||||
// and cleared when the finish method is called.
|
||||
trInfo traceInfo
|
||||
|
||||
// statsCtx keeps the user context for stats handling.
|
||||
// All stats collection should use the statsCtx (instead of the stream context)
|
||||
// so that all the generated stats for a particular RPC can be associated in the processing phase.
|
||||
statsCtx context.Context
|
||||
statsHandler stats.Handler
|
||||
beginTime time.Time
|
||||
}
|
||||
|
||||
func (cs *clientStream) Context() context.Context {
|
||||
return cs.s.Context()
|
||||
// TODO(retry): commit the current attempt (the context has peer-aware data).
|
||||
return cs.attempt.context()
|
||||
}
|
||||
|
||||
func (cs *clientStream) Header() (metadata.MD, error) {
|
||||
m, err := cs.s.Header()
|
||||
m, err := cs.attempt.header()
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
// TODO(retry): maybe retry on error or commit attempt on success.
|
||||
err = toRPCErr(err)
|
||||
cs.finish(err)
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
func (cs *clientStream) Trailer() metadata.MD {
|
||||
return cs.s.Trailer()
|
||||
// TODO(retry): on error, maybe retry (trailers-only).
|
||||
return cs.attempt.trailer()
|
||||
}
|
||||
|
||||
func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||
if cs.tracing {
|
||||
cs.mu.Lock()
|
||||
if cs.trInfo.tr != nil {
|
||||
cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||
}
|
||||
cs.mu.Unlock()
|
||||
}
|
||||
// TODO Investigate how to signal the stats handling party.
|
||||
// generate error stats if err != nil && err != io.EOF?
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if err == io.EOF {
|
||||
// Specialize the process for server streaming. SendMsg is only called
|
||||
// once when creating the stream object. io.EOF needs to be skipped when
|
||||
// the rpc is early finished (before the stream object is created.).
|
||||
// TODO: It is probably better to move this into the generated code.
|
||||
if !cs.desc.ClientStreams && cs.desc.ServerStreams {
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
err = toRPCErr(err)
|
||||
}()
|
||||
var outPayload *stats.OutPayload
|
||||
if cs.statsHandler != nil {
|
||||
outPayload = &stats.OutPayload{
|
||||
Client: true,
|
||||
}
|
||||
}
|
||||
hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cs.c.maxSendMessageSize == nil {
|
||||
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
|
||||
}
|
||||
if len(data) > *cs.c.maxSendMessageSize {
|
||||
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
|
||||
}
|
||||
err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false})
|
||||
if err == nil && outPayload != nil {
|
||||
outPayload.SentTime = time.Now()
|
||||
cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
|
||||
}
|
||||
return err
|
||||
// TODO(retry): buffer message for replaying if not committed.
|
||||
return cs.attempt.sendMsg(m)
|
||||
}
|
||||
|
||||
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
||||
// TODO(retry): maybe retry on error or commit attempt on success.
|
||||
return cs.attempt.recvMsg(m)
|
||||
}
|
||||
|
||||
func (cs *clientStream) CloseSend() error {
|
||||
cs.attempt.closeSend()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *clientStream) finish(err error) {
|
||||
if err == io.EOF {
|
||||
// Ending a stream with EOF indicates a success.
|
||||
err = nil
|
||||
}
|
||||
cs.mu.Lock()
|
||||
if cs.finished {
|
||||
cs.mu.Unlock()
|
||||
return
|
||||
}
|
||||
cs.finished = true
|
||||
cs.mu.Unlock()
|
||||
if channelz.IsOn() {
|
||||
if err != nil {
|
||||
cs.cc.incrCallsFailed()
|
||||
} else {
|
||||
cs.cc.incrCallsSucceeded()
|
||||
}
|
||||
}
|
||||
// TODO(retry): commit current attempt if necessary.
|
||||
cs.attempt.finish(err)
|
||||
for _, o := range cs.opts {
|
||||
o.after(cs.c)
|
||||
}
|
||||
cs.cancel()
|
||||
}
|
||||
|
||||
func (a *csAttempt) context() context.Context {
|
||||
return a.s.Context()
|
||||
}
|
||||
|
||||
func (a *csAttempt) header() (metadata.MD, error) {
|
||||
return a.s.Header()
|
||||
}
|
||||
|
||||
func (a *csAttempt) trailer() metadata.MD {
|
||||
return a.s.Trailer()
|
||||
}
|
||||
|
||||
func (a *csAttempt) sendMsg(m interface{}) (err error) {
|
||||
// TODO Investigate how to signal the stats handling party.
|
||||
// generate error stats if err != nil && err != io.EOF?
|
||||
cs := a.cs
|
||||
defer func() {
|
||||
// For non-client-streaming RPCs, we return nil instead of EOF on success
|
||||
// because the generated code requires it. finish is not called; RecvMsg()
|
||||
// will call it with the stream's status independently.
|
||||
if err == io.EOF && !cs.desc.ClientStreams {
|
||||
err = nil
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
// Call finish on the client stream for errors generated by this SendMsg
|
||||
// call, as these indicate problems created by this client. (Transport
|
||||
// errors are converted to an io.EOF error below; the real error will be
|
||||
// returned from RecvMsg eventually in that case, or be retried.)
|
||||
cs.finish(err)
|
||||
}
|
||||
}()
|
||||
// TODO: Check cs.sentLast and error if we already ended the stream.
|
||||
if EnableTracing {
|
||||
a.mu.Lock()
|
||||
if a.trInfo.tr != nil {
|
||||
a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||
}
|
||||
a.mu.Unlock()
|
||||
}
|
||||
data, err := encode(cs.codec, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
compData, err := compress(data, cs.cp, cs.comp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr, payload := msgHeader(data, compData)
|
||||
// TODO(dfawley): should we be checking len(data) instead?
|
||||
if len(payload) > *cs.c.maxSendMessageSize {
|
||||
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.c.maxSendMessageSize)
|
||||
}
|
||||
|
||||
if !cs.desc.ClientStreams {
|
||||
cs.sentLast = true
|
||||
}
|
||||
err = a.t.Write(a.s, hdr, payload, &transport.Options{Last: !cs.desc.ClientStreams})
|
||||
if err == nil {
|
||||
if a.statsHandler != nil {
|
||||
a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payload, time.Now()))
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
a.t.IncrMsgSent()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
func (a *csAttempt) recvMsg(m interface{}) (err error) {
|
||||
cs := a.cs
|
||||
defer func() {
|
||||
if err != nil || !cs.desc.ServerStreams {
|
||||
// err != nil or non-server-streaming indicates end of stream.
|
||||
cs.finish(err)
|
||||
}
|
||||
}()
|
||||
var inPayload *stats.InPayload
|
||||
if cs.statsHandler != nil {
|
||||
if a.statsHandler != nil {
|
||||
inPayload = &stats.InPayload{
|
||||
Client: true,
|
||||
}
|
||||
}
|
||||
if cs.c.maxReceiveMessageSize == nil {
|
||||
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
|
||||
if !a.decompSet {
|
||||
// Block until we receive headers containing received message encoding.
|
||||
if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
|
||||
if a.dc == nil || a.dc.Type() != ct {
|
||||
// No configured decompressor, or it does not match the incoming
|
||||
// message encoding; attempt to find a registered compressor that does.
|
||||
a.dc = nil
|
||||
a.decomp = encoding.GetCompressor(ct)
|
||||
}
|
||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload)
|
||||
defer func() {
|
||||
// err != nil indicates the termination of the stream.
|
||||
} else {
|
||||
// No compression is used; disable our decompressor.
|
||||
a.dc = nil
|
||||
}
|
||||
// Only initialize this state once per stream.
|
||||
a.decompSet = true
|
||||
}
|
||||
err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, inPayload, a.decomp)
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
if err == io.EOF {
|
||||
if statusErr := a.s.Status().Err(); statusErr != nil {
|
||||
return statusErr
|
||||
}
|
||||
}()
|
||||
if err == nil {
|
||||
if cs.tracing {
|
||||
cs.mu.Lock()
|
||||
if cs.trInfo.tr != nil {
|
||||
cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||
return io.EOF // indicates successful end of stream.
|
||||
}
|
||||
cs.mu.Unlock()
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if EnableTracing {
|
||||
a.mu.Lock()
|
||||
if a.trInfo.tr != nil {
|
||||
a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||
}
|
||||
a.mu.Unlock()
|
||||
}
|
||||
if inPayload != nil {
|
||||
cs.statsHandler.HandleRPC(cs.statsCtx, inPayload)
|
||||
a.statsHandler.HandleRPC(a.ctx, inPayload)
|
||||
}
|
||||
if !cs.desc.ClientStreams || cs.desc.ServerStreams {
|
||||
return
|
||||
if channelz.IsOn() {
|
||||
a.t.IncrMsgRecv()
|
||||
}
|
||||
// Special handling for client streaming rpc.
|
||||
if cs.desc.ServerStreams {
|
||||
// Subsequent messages should be received by subsequent RecvMsg calls.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special handling for non-server-stream rpcs.
|
||||
// This recv expects EOF or errors, so we don't collect inPayload.
|
||||
if cs.c.maxReceiveMessageSize == nil {
|
||||
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
|
||||
}
|
||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil)
|
||||
cs.closeTransportStream(err)
|
||||
err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, nil, a.decomp)
|
||||
if err == nil {
|
||||
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
|
||||
}
|
||||
if err == io.EOF {
|
||||
if se := cs.s.Status().Err(); se != nil {
|
||||
return se
|
||||
}
|
||||
cs.finish(err)
|
||||
return nil
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
if err == io.EOF {
|
||||
if statusErr := cs.s.Status().Err(); statusErr != nil {
|
||||
return statusErr
|
||||
}
|
||||
// Returns io.EOF to indicate the end of the stream.
|
||||
return
|
||||
return a.s.Status().Err() // non-server streaming Recv returns nil on success
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
|
||||
func (cs *clientStream) CloseSend() (err error) {
|
||||
err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true})
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
}()
|
||||
if err == nil || err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
err = toRPCErr(err)
|
||||
func (a *csAttempt) closeSend() {
|
||||
cs := a.cs
|
||||
if cs.sentLast {
|
||||
return
|
||||
}
|
||||
cs.sentLast = true
|
||||
cs.attempt.t.Write(cs.attempt.s, nil, nil, &transport.Options{Last: true})
|
||||
// We ignore errors from Write. Any error it would return would also be
|
||||
// returned by a subsequent RecvMsg call, and the user is supposed to always
|
||||
// finish the stream by calling RecvMsg until it returns err != nil.
|
||||
}
|
||||
|
||||
func (cs *clientStream) closeTransportStream(err error) {
|
||||
cs.mu.Lock()
|
||||
if cs.closed {
|
||||
cs.mu.Unlock()
|
||||
return
|
||||
}
|
||||
cs.closed = true
|
||||
cs.mu.Unlock()
|
||||
cs.t.CloseStream(cs.s, err)
|
||||
}
|
||||
func (a *csAttempt) finish(err error) {
|
||||
a.mu.Lock()
|
||||
a.t.CloseStream(a.s, err)
|
||||
|
||||
func (cs *clientStream) finish(err error) {
|
||||
cs.mu.Lock()
|
||||
defer cs.mu.Unlock()
|
||||
if cs.finished {
|
||||
return
|
||||
}
|
||||
cs.finished = true
|
||||
defer func() {
|
||||
if cs.cancel != nil {
|
||||
cs.cancel()
|
||||
}
|
||||
}()
|
||||
for _, o := range cs.opts {
|
||||
o.after(cs.c)
|
||||
}
|
||||
if cs.done != nil {
|
||||
updateRPCInfoInContext(cs.s.Context(), rpcInfo{
|
||||
bytesSent: cs.s.BytesSent(),
|
||||
bytesReceived: cs.s.BytesReceived(),
|
||||
if a.done != nil {
|
||||
a.done(balancer.DoneInfo{
|
||||
Err: err,
|
||||
BytesSent: true,
|
||||
BytesReceived: a.s.BytesReceived(),
|
||||
})
|
||||
cs.done(balancer.DoneInfo{Err: err})
|
||||
cs.done = nil
|
||||
}
|
||||
if cs.statsHandler != nil {
|
||||
if a.statsHandler != nil {
|
||||
end := &stats.End{
|
||||
Client: true,
|
||||
BeginTime: a.beginTime,
|
||||
EndTime: time.Now(),
|
||||
Error: err,
|
||||
}
|
||||
if err != io.EOF {
|
||||
// end.Error is nil if the RPC finished successfully.
|
||||
end.Error = toRPCErr(err)
|
||||
a.statsHandler.HandleRPC(a.ctx, end)
|
||||
}
|
||||
cs.statsHandler.HandleRPC(cs.statsCtx, end)
|
||||
}
|
||||
if !cs.tracing {
|
||||
return
|
||||
}
|
||||
if cs.trInfo.tr != nil {
|
||||
if err == nil || err == io.EOF {
|
||||
cs.trInfo.tr.LazyPrintf("RPC: [OK]")
|
||||
if a.trInfo.tr != nil {
|
||||
if err == nil {
|
||||
a.trInfo.tr.LazyPrintf("RPC: [OK]")
|
||||
} else {
|
||||
cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
||||
cs.trInfo.tr.SetError()
|
||||
a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
||||
a.trInfo.tr.SetError()
|
||||
}
|
||||
cs.trInfo.tr.Finish()
|
||||
cs.trInfo.tr = nil
|
||||
a.trInfo.tr.Finish()
|
||||
a.trInfo.tr = nil
|
||||
}
|
||||
a.mu.Unlock()
|
||||
}
|
||||
|
||||
// ServerStream defines the interface a server stream has to satisfy.
|
||||
@@ -540,12 +645,17 @@ type ServerStream interface {
|
||||
|
||||
// serverStream implements a server side Stream.
|
||||
type serverStream struct {
|
||||
ctx context.Context
|
||||
t transport.ServerTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
codec Codec
|
||||
codec baseCodec
|
||||
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
comp encoding.Compressor
|
||||
decomp encoding.Compressor
|
||||
|
||||
maxReceiveMessageSize int
|
||||
maxSendMessageSize int
|
||||
trInfo *traceInfo
|
||||
@@ -556,7 +666,7 @@ type serverStream struct {
|
||||
}
|
||||
|
||||
func (ss *serverStream) Context() context.Context {
|
||||
return ss.s.Context()
|
||||
return ss.ctx
|
||||
}
|
||||
|
||||
func (ss *serverStream) SetHeader(md metadata.MD) error {
|
||||
@@ -575,7 +685,6 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
|
||||
return
|
||||
}
|
||||
ss.s.SetTrailer(md)
|
||||
return
|
||||
}
|
||||
|
||||
func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||
@@ -596,24 +705,28 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||
st, _ := status.FromError(toRPCErr(err))
|
||||
ss.t.WriteStatus(ss.s, st)
|
||||
}
|
||||
}()
|
||||
var outPayload *stats.OutPayload
|
||||
if ss.statsHandler != nil {
|
||||
outPayload = &stats.OutPayload{}
|
||||
if channelz.IsOn() && err == nil {
|
||||
ss.t.IncrMsgSent()
|
||||
}
|
||||
hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload)
|
||||
}()
|
||||
data, err := encode(ss.codec, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(data) > ss.maxSendMessageSize {
|
||||
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
|
||||
compData, err := compress(data, ss.cp, ss.comp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
|
||||
hdr, payload := msgHeader(data, compData)
|
||||
// TODO(dfawley): should we be checking len(data) instead?
|
||||
if len(payload) > ss.maxSendMessageSize {
|
||||
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
|
||||
}
|
||||
if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if outPayload != nil {
|
||||
outPayload.SentTime = time.Now()
|
||||
ss.statsHandler.HandleRPC(ss.s.Context(), outPayload)
|
||||
if ss.statsHandler != nil {
|
||||
ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -636,17 +749,20 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
||||
st, _ := status.FromError(toRPCErr(err))
|
||||
ss.t.WriteStatus(ss.s, st)
|
||||
}
|
||||
if channelz.IsOn() && err == nil {
|
||||
ss.t.IncrMsgRecv()
|
||||
}
|
||||
}()
|
||||
var inPayload *stats.InPayload
|
||||
if ss.statsHandler != nil {
|
||||
inPayload = &stats.InPayload{}
|
||||
}
|
||||
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil {
|
||||
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil {
|
||||
if err == io.EOF {
|
||||
return err
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
||||
err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
@@ -655,3 +771,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MethodFromServerStream returns the method string for the input stream.
|
||||
// The returned string is in the format of "/service/method".
|
||||
func MethodFromServerStream(stream ServerStream) (string, bool) {
|
||||
return Method(stream.Context())
|
||||
}
|
||||
|
||||
7
vendor/google.golang.org/grpc/transport/BUILD
generated
vendored
7
vendor/google.golang.org/grpc/transport/BUILD
generated
vendored
@@ -4,7 +4,10 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bdp_estimator.go",
|
||||
"control.go",
|
||||
"controlbuf.go",
|
||||
"flowcontrol.go",
|
||||
"go16.go",
|
||||
"go17.go",
|
||||
"handler_server.go",
|
||||
"http2_client.go",
|
||||
"http2_server.go",
|
||||
@@ -24,6 +27,8 @@ go_library(
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/internal/channelz:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/internal/grpcrand:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/keepalive:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/metadata:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/peer:go_default_library",
|
||||
|
||||
9
vendor/google.golang.org/grpc/transport/bdp_estimator.go
generated
vendored
9
vendor/google.golang.org/grpc/transport/bdp_estimator.go
generated
vendored
@@ -41,12 +41,9 @@ const (
|
||||
gamma = 2
|
||||
)
|
||||
|
||||
var (
|
||||
// Adding arbitrary data to ping so that its ack can be
|
||||
// identified.
|
||||
// Easter-egg: what does the ping message say?
|
||||
bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
|
||||
)
|
||||
// Adding arbitrary data to ping so that its ack can be identified.
|
||||
// Easter-egg: what does the ping message say?
|
||||
var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
|
||||
|
||||
type bdpEstimator struct {
|
||||
// sentAt is the time when the ping was sent.
|
||||
|
||||
796
vendor/google.golang.org/grpc/transport/controlbuf.go
generated
vendored
Normal file
796
vendor/google.golang.org/grpc/transport/controlbuf.go
generated
vendored
Normal file
@@ -0,0 +1,796 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
|
||||
e.SetMaxDynamicTableSizeLimit(v)
|
||||
}
|
||||
|
||||
type itemNode struct {
|
||||
it interface{}
|
||||
next *itemNode
|
||||
}
|
||||
|
||||
type itemList struct {
|
||||
head *itemNode
|
||||
tail *itemNode
|
||||
}
|
||||
|
||||
func (il *itemList) enqueue(i interface{}) {
|
||||
n := &itemNode{it: i}
|
||||
if il.tail == nil {
|
||||
il.head, il.tail = n, n
|
||||
return
|
||||
}
|
||||
il.tail.next = n
|
||||
il.tail = n
|
||||
}
|
||||
|
||||
// peek returns the first item in the list without removing it from the
|
||||
// list.
|
||||
func (il *itemList) peek() interface{} {
|
||||
return il.head.it
|
||||
}
|
||||
|
||||
func (il *itemList) dequeue() interface{} {
|
||||
if il.head == nil {
|
||||
return nil
|
||||
}
|
||||
i := il.head.it
|
||||
il.head = il.head.next
|
||||
if il.head == nil {
|
||||
il.tail = nil
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (il *itemList) dequeueAll() *itemNode {
|
||||
h := il.head
|
||||
il.head, il.tail = nil, nil
|
||||
return h
|
||||
}
|
||||
|
||||
func (il *itemList) isEmpty() bool {
|
||||
return il.head == nil
|
||||
}
|
||||
|
||||
// The following defines various control items which could flow through
|
||||
// the control buffer of transport. They represent different aspects of
|
||||
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||
|
||||
// registerStream is used to register an incoming stream with loopy writer.
|
||||
type registerStream struct {
|
||||
streamID uint32
|
||||
wq *writeQuota
|
||||
}
|
||||
|
||||
// headerFrame is also used to register stream on the client-side.
|
||||
type headerFrame struct {
|
||||
streamID uint32
|
||||
hf []hpack.HeaderField
|
||||
endStream bool // Valid on server side.
|
||||
initStream func(uint32) (bool, error) // Used only on the client side.
|
||||
onWrite func()
|
||||
wq *writeQuota // write quota for the stream created.
|
||||
cleanup *cleanupStream // Valid on the server side.
|
||||
onOrphaned func(error) // Valid on client-side
|
||||
}
|
||||
|
||||
type cleanupStream struct {
|
||||
streamID uint32
|
||||
idPtr *uint32
|
||||
rst bool
|
||||
rstCode http2.ErrCode
|
||||
onWrite func()
|
||||
}
|
||||
|
||||
type dataFrame struct {
|
||||
streamID uint32
|
||||
endStream bool
|
||||
h []byte
|
||||
d []byte
|
||||
// onEachWrite is called every time
|
||||
// a part of d is written out.
|
||||
onEachWrite func()
|
||||
}
|
||||
|
||||
type incomingWindowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
type outgoingWindowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
type incomingSettings struct {
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
type outgoingSettings struct {
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
type settingsAck struct {
|
||||
}
|
||||
|
||||
type incomingGoAway struct {
|
||||
}
|
||||
|
||||
type goAway struct {
|
||||
code http2.ErrCode
|
||||
debugData []byte
|
||||
headsUp bool
|
||||
closeConn bool
|
||||
}
|
||||
|
||||
type ping struct {
|
||||
ack bool
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
type outFlowControlSizeRequest struct {
|
||||
resp chan uint32
|
||||
}
|
||||
|
||||
type outStreamState int
|
||||
|
||||
const (
|
||||
active outStreamState = iota
|
||||
empty
|
||||
waitingOnStreamQuota
|
||||
)
|
||||
|
||||
type outStream struct {
|
||||
id uint32
|
||||
state outStreamState
|
||||
itl *itemList
|
||||
bytesOutStanding int
|
||||
wq *writeQuota
|
||||
|
||||
next *outStream
|
||||
prev *outStream
|
||||
}
|
||||
|
||||
func (s *outStream) deleteSelf() {
|
||||
if s.prev != nil {
|
||||
s.prev.next = s.next
|
||||
}
|
||||
if s.next != nil {
|
||||
s.next.prev = s.prev
|
||||
}
|
||||
s.next, s.prev = nil, nil
|
||||
}
|
||||
|
||||
type outStreamList struct {
|
||||
// Following are sentinel objects that mark the
|
||||
// beginning and end of the list. They do not
|
||||
// contain any item lists. All valid objects are
|
||||
// inserted in between them.
|
||||
// This is needed so that an outStream object can
|
||||
// deleteSelf() in O(1) time without knowing which
|
||||
// list it belongs to.
|
||||
head *outStream
|
||||
tail *outStream
|
||||
}
|
||||
|
||||
func newOutStreamList() *outStreamList {
|
||||
head, tail := new(outStream), new(outStream)
|
||||
head.next = tail
|
||||
tail.prev = head
|
||||
return &outStreamList{
|
||||
head: head,
|
||||
tail: tail,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *outStreamList) enqueue(s *outStream) {
|
||||
e := l.tail.prev
|
||||
e.next = s
|
||||
s.prev = e
|
||||
s.next = l.tail
|
||||
l.tail.prev = s
|
||||
}
|
||||
|
||||
// remove from the beginning of the list.
|
||||
func (l *outStreamList) dequeue() *outStream {
|
||||
b := l.head.next
|
||||
if b == l.tail {
|
||||
return nil
|
||||
}
|
||||
b.deleteSelf()
|
||||
return b
|
||||
}
|
||||
|
||||
type controlBuffer struct {
|
||||
ch chan struct{}
|
||||
done <-chan struct{}
|
||||
mu sync.Mutex
|
||||
consumerWaiting bool
|
||||
list *itemList
|
||||
err error
|
||||
}
|
||||
|
||||
func newControlBuffer(done <-chan struct{}) *controlBuffer {
|
||||
return &controlBuffer{
|
||||
ch: make(chan struct{}, 1),
|
||||
list: &itemList{},
|
||||
done: done,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controlBuffer) put(it interface{}) error {
|
||||
_, err := c.executeAndPut(nil, it)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
|
||||
var wakeUp bool
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return false, c.err
|
||||
}
|
||||
if f != nil {
|
||||
if !f(it) { // f wasn't successful
|
||||
c.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
if c.consumerWaiting {
|
||||
wakeUp = true
|
||||
c.consumerWaiting = false
|
||||
}
|
||||
c.list.enqueue(it)
|
||||
c.mu.Unlock()
|
||||
if wakeUp {
|
||||
select {
|
||||
case c.ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||
for {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return nil, c.err
|
||||
}
|
||||
if !c.list.isEmpty() {
|
||||
h := c.list.dequeue()
|
||||
c.mu.Unlock()
|
||||
return h, nil
|
||||
}
|
||||
if !block {
|
||||
c.mu.Unlock()
|
||||
return nil, nil
|
||||
}
|
||||
c.consumerWaiting = true
|
||||
c.mu.Unlock()
|
||||
select {
|
||||
case <-c.ch:
|
||||
case <-c.done:
|
||||
c.finish()
|
||||
return nil, ErrConnClosing
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controlBuffer) finish() {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
c.err = ErrConnClosing
|
||||
// There may be headers for streams in the control buffer.
|
||||
// These streams need to be cleaned out since the transport
|
||||
// is still not aware of these yet.
|
||||
for head := c.list.dequeueAll(); head != nil; head = head.next {
|
||||
hdr, ok := head.it.(*headerFrame)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if hdr.onOrphaned != nil { // It will be nil on the server-side.
|
||||
hdr.onOrphaned(ErrConnClosing)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
type side int
|
||||
|
||||
const (
|
||||
clientSide side = iota
|
||||
serverSide
|
||||
)
|
||||
|
||||
type loopyWriter struct {
|
||||
side side
|
||||
cbuf *controlBuffer
|
||||
sendQuota uint32
|
||||
oiws uint32 // outbound initial window size.
|
||||
estdStreams map[uint32]*outStream // Established streams.
|
||||
activeStreams *outStreamList // Streams that are sending data.
|
||||
framer *framer
|
||||
hBuf *bytes.Buffer // The buffer for HPACK encoding.
|
||||
hEnc *hpack.Encoder // HPACK encoder.
|
||||
bdpEst *bdpEstimator
|
||||
draining bool
|
||||
|
||||
// Side-specific handlers
|
||||
ssGoAwayHandler func(*goAway) (bool, error)
|
||||
}
|
||||
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
|
||||
var buf bytes.Buffer
|
||||
l := &loopyWriter{
|
||||
side: s,
|
||||
cbuf: cbuf,
|
||||
sendQuota: defaultWindowSize,
|
||||
oiws: defaultWindowSize,
|
||||
estdStreams: make(map[uint32]*outStream),
|
||||
activeStreams: newOutStreamList(),
|
||||
framer: fr,
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
bdpEst: bdpEst,
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
const minBatchSize = 1000
|
||||
|
||||
// run should be run in a separate goroutine.
|
||||
func (l *loopyWriter) run() (err error) {
|
||||
defer func() {
|
||||
if err == ErrConnClosing {
|
||||
// Don't log ErrConnClosing as error since it happens
|
||||
// 1. When the connection is closed by some other known issue.
|
||||
// 2. User closed the connection.
|
||||
// 3. A graceful close of connection.
|
||||
infof("transport: loopyWriter.run returning. %v", err)
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
for {
|
||||
it, err := l.cbuf.get(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = l.handle(it); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = l.processData(); err != nil {
|
||||
return err
|
||||
}
|
||||
gosched := true
|
||||
hasdata:
|
||||
for {
|
||||
it, err := l.cbuf.get(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if it != nil {
|
||||
if err = l.handle(it); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = l.processData(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue hasdata
|
||||
}
|
||||
isEmpty, err := l.processData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isEmpty {
|
||||
continue hasdata
|
||||
}
|
||||
if gosched {
|
||||
gosched = false
|
||||
if l.framer.writer.offset < minBatchSize {
|
||||
runtime.Gosched()
|
||||
continue hasdata
|
||||
}
|
||||
}
|
||||
l.framer.writer.Flush()
|
||||
break hasdata
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
|
||||
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
|
||||
// Otherwise update the quota.
|
||||
if w.streamID == 0 {
|
||||
l.sendQuota += w.increment
|
||||
return nil
|
||||
}
|
||||
// Find the stream and update it.
|
||||
if str, ok := l.estdStreams[w.streamID]; ok {
|
||||
str.bytesOutStanding -= int(w.increment)
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
||||
return l.framer.fr.WriteSettings(s.ss...)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
|
||||
if err := l.applySettings(s.ss); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.framer.fr.WriteSettingsAck()
|
||||
}
|
||||
|
||||
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
||||
str := &outStream{
|
||||
id: h.streamID,
|
||||
state: empty,
|
||||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
l.estdStreams[h.streamID] = str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
if l.side == serverSide {
|
||||
str, ok := l.estdStreams[h.streamID]
|
||||
if !ok {
|
||||
warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||
return nil
|
||||
}
|
||||
// Case 1.A: Server is responding back with headers.
|
||||
if !h.endStream {
|
||||
return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
|
||||
}
|
||||
// else: Case 1.B: Server wants to close stream.
|
||||
|
||||
if str.state != empty { // either active or waiting on stream quota.
|
||||
// add it str's list of items.
|
||||
str.itl.enqueue(h)
|
||||
return nil
|
||||
}
|
||||
if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.cleanupStreamHandler(h.cleanup)
|
||||
}
|
||||
// Case 2: Client wants to originate stream.
|
||||
str := &outStream{
|
||||
id: h.streamID,
|
||||
state: empty,
|
||||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
str.itl.enqueue(h)
|
||||
return l.originateStream(str)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) originateStream(str *outStream) error {
|
||||
hdr := str.itl.dequeue().(*headerFrame)
|
||||
sendPing, err := hdr.initStream(str.id)
|
||||
if err != nil {
|
||||
if err == ErrConnClosing {
|
||||
return err
|
||||
}
|
||||
// Other errors(errStreamDrain) need not close transport.
|
||||
return nil
|
||||
}
|
||||
if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
l.estdStreams[str.id] = str
|
||||
if sendPing {
|
||||
return l.pingHandler(&ping{data: [8]byte{}})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
|
||||
if onWrite != nil {
|
||||
onWrite()
|
||||
}
|
||||
l.hBuf.Reset()
|
||||
for _, f := range hf {
|
||||
if err := l.hEnc.WriteField(f); err != nil {
|
||||
warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
|
||||
}
|
||||
}
|
||||
var (
|
||||
err error
|
||||
endHeaders, first bool
|
||||
)
|
||||
first = true
|
||||
for !endHeaders {
|
||||
size := l.hBuf.Len()
|
||||
if size > http2MaxFrameLen {
|
||||
size = http2MaxFrameLen
|
||||
} else {
|
||||
endHeaders = true
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
|
||||
StreamID: streamID,
|
||||
BlockFragment: l.hBuf.Next(size),
|
||||
EndStream: endStream,
|
||||
EndHeaders: endHeaders,
|
||||
})
|
||||
} else {
|
||||
err = l.framer.fr.WriteContinuation(
|
||||
streamID,
|
||||
endHeaders,
|
||||
l.hBuf.Next(size),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
||||
str, ok := l.estdStreams[df.streamID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
// If we got data for a stream it means that
|
||||
// stream was originated and the headers were sent out.
|
||||
str.itl.enqueue(df)
|
||||
if str.state == empty {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) pingHandler(p *ping) error {
|
||||
if !p.ack {
|
||||
l.bdpEst.timesnap(p.data)
|
||||
}
|
||||
return l.framer.fr.WritePing(p.ack, p.data)
|
||||
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
|
||||
o.resp <- l.sendQuota
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||
c.onWrite()
|
||||
if str, ok := l.estdStreams[c.streamID]; ok {
|
||||
// On the server side it could be a trailers-only response or
|
||||
// a RST_STREAM before stream initialization thus the stream might
|
||||
// not be established yet.
|
||||
delete(l.estdStreams, c.streamID)
|
||||
str.deleteSelf()
|
||||
}
|
||||
if c.rst { // If RST_STREAM needs to be sent.
|
||||
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
||||
if l.side == clientSide {
|
||||
l.draining = true
|
||||
if len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
||||
// Handling of outgoing GoAway is very specific to side.
|
||||
if l.ssGoAwayHandler != nil {
|
||||
draining, err := l.ssGoAwayHandler(g)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.draining = draining
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) handle(i interface{}) error {
|
||||
switch i := i.(type) {
|
||||
case *incomingWindowUpdate:
|
||||
return l.incomingWindowUpdateHandler(i)
|
||||
case *outgoingWindowUpdate:
|
||||
return l.outgoingWindowUpdateHandler(i)
|
||||
case *incomingSettings:
|
||||
return l.incomingSettingsHandler(i)
|
||||
case *outgoingSettings:
|
||||
return l.outgoingSettingsHandler(i)
|
||||
case *headerFrame:
|
||||
return l.headerHandler(i)
|
||||
case *registerStream:
|
||||
return l.registerStreamHandler(i)
|
||||
case *cleanupStream:
|
||||
return l.cleanupStreamHandler(i)
|
||||
case *incomingGoAway:
|
||||
return l.incomingGoAwayHandler(i)
|
||||
case *dataFrame:
|
||||
return l.preprocessData(i)
|
||||
case *ping:
|
||||
return l.pingHandler(i)
|
||||
case *goAway:
|
||||
return l.goAwayHandler(i)
|
||||
case *outFlowControlSizeRequest:
|
||||
return l.outFlowControlSizeRequestHandler(i)
|
||||
default:
|
||||
return fmt.Errorf("transport: unknown control message type %T", i)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
||||
for _, s := range ss {
|
||||
switch s.ID {
|
||||
case http2.SettingInitialWindowSize:
|
||||
o := l.oiws
|
||||
l.oiws = s.Val
|
||||
if o < l.oiws {
|
||||
// If the new limit is greater make all depleted streams active.
|
||||
for _, stream := range l.estdStreams {
|
||||
if stream.state == waitingOnStreamQuota {
|
||||
stream.state = active
|
||||
l.activeStreams.enqueue(stream)
|
||||
}
|
||||
}
|
||||
}
|
||||
case http2.SettingHeaderTableSize:
|
||||
updateHeaderTblSize(l.hEnc, s.Val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) processData() (bool, error) {
|
||||
if l.sendQuota == 0 {
|
||||
return true, nil
|
||||
}
|
||||
str := l.activeStreams.dequeue()
|
||||
if str == nil {
|
||||
return true, nil
|
||||
}
|
||||
dataItem := str.itl.peek().(*dataFrame)
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 {
|
||||
// Client sends out empty data frame with endStream = true
|
||||
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
|
||||
return false, err
|
||||
}
|
||||
str.itl.dequeue()
|
||||
if str.itl.isEmpty() {
|
||||
str.state = empty
|
||||
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
|
||||
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
} else {
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
var (
|
||||
idx int
|
||||
buf []byte
|
||||
)
|
||||
if len(dataItem.h) != 0 { // data header has not been written out yet.
|
||||
buf = dataItem.h
|
||||
} else {
|
||||
idx = 1
|
||||
buf = dataItem.d
|
||||
}
|
||||
size := http2MaxFrameLen
|
||||
if len(buf) < size {
|
||||
size = len(buf)
|
||||
}
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 {
|
||||
str.state = waitingOnStreamQuota
|
||||
return false, nil
|
||||
} else if strQuota < size {
|
||||
size = strQuota
|
||||
}
|
||||
|
||||
if l.sendQuota < uint32(size) {
|
||||
size = int(l.sendQuota)
|
||||
}
|
||||
// Now that outgoing flow controls are checked we can replenish str's write quota
|
||||
str.wq.replenish(size)
|
||||
var endStream bool
|
||||
// This last data message on this stream and all
|
||||
// of it can be written in this go.
|
||||
if dataItem.endStream && size == len(buf) {
|
||||
// buf contains either data or it contains header but data is empty.
|
||||
if idx == 1 || len(dataItem.d) == 0 {
|
||||
endStream = true
|
||||
}
|
||||
}
|
||||
if dataItem.onEachWrite != nil {
|
||||
dataItem.onEachWrite()
|
||||
}
|
||||
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
|
||||
return false, err
|
||||
}
|
||||
buf = buf[size:]
|
||||
str.bytesOutStanding += size
|
||||
l.sendQuota -= uint32(size)
|
||||
if idx == 0 {
|
||||
dataItem.h = buf
|
||||
} else {
|
||||
dataItem.d = buf
|
||||
}
|
||||
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
|
||||
str.itl.dequeue()
|
||||
}
|
||||
if str.itl.isEmpty() {
|
||||
str.state = empty
|
||||
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
|
||||
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
|
||||
str.state = waitingOnStreamQuota
|
||||
} else { // Otherwise add it back to the list of active streams.
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
@@ -24,9 +24,6 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -36,179 +33,115 @@ const (
|
||||
initialWindowSize = defaultWindowSize // for an RPC
|
||||
infinity = time.Duration(math.MaxInt64)
|
||||
defaultClientKeepaliveTime = infinity
|
||||
defaultClientKeepaliveTimeout = time.Duration(20 * time.Second)
|
||||
defaultClientKeepaliveTimeout = 20 * time.Second
|
||||
defaultMaxStreamsClient = 100
|
||||
defaultMaxConnectionIdle = infinity
|
||||
defaultMaxConnectionAge = infinity
|
||||
defaultMaxConnectionAgeGrace = infinity
|
||||
defaultServerKeepaliveTime = time.Duration(2 * time.Hour)
|
||||
defaultServerKeepaliveTimeout = time.Duration(20 * time.Second)
|
||||
defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute)
|
||||
defaultServerKeepaliveTime = 2 * time.Hour
|
||||
defaultServerKeepaliveTimeout = 20 * time.Second
|
||||
defaultKeepalivePolicyMinTime = 5 * time.Minute
|
||||
// max window limit set by HTTP2 Specs.
|
||||
maxWindowSize = math.MaxInt32
|
||||
// defaultLocalSendQuota sets is default value for number of data
|
||||
// defaultWriteQuota is the default value for number of data
|
||||
// bytes that each stream can schedule before some of it being
|
||||
// flushed out.
|
||||
defaultLocalSendQuota = 64 * 1024
|
||||
defaultWriteQuota = 64 * 1024
|
||||
)
|
||||
|
||||
// The following defines various control items which could flow through
|
||||
// the control buffer of transport. They represent different aspects of
|
||||
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||
|
||||
type headerFrame struct {
|
||||
streamID uint32
|
||||
hf []hpack.HeaderField
|
||||
endStream bool
|
||||
// writeQuota is a soft limit on the amount of data a stream can
|
||||
// schedule before some of it is written out.
|
||||
type writeQuota struct {
|
||||
quota int32
|
||||
// get waits on read from when quota goes less than or equal to zero.
|
||||
// replenish writes on it when quota goes positive again.
|
||||
ch chan struct{}
|
||||
// done is triggered in error case.
|
||||
done <-chan struct{}
|
||||
// replenish is called by loopyWriter to give quota back to.
|
||||
// It is implemented as a field so that it can be updated
|
||||
// by tests.
|
||||
replenish func(n int)
|
||||
}
|
||||
|
||||
func (*headerFrame) item() {}
|
||||
|
||||
type continuationFrame struct {
|
||||
streamID uint32
|
||||
endHeaders bool
|
||||
headerBlockFragment []byte
|
||||
}
|
||||
|
||||
type dataFrame struct {
|
||||
streamID uint32
|
||||
endStream bool
|
||||
d []byte
|
||||
f func()
|
||||
}
|
||||
|
||||
func (*dataFrame) item() {}
|
||||
|
||||
func (*continuationFrame) item() {}
|
||||
|
||||
type windowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
func (*windowUpdate) item() {}
|
||||
|
||||
type settings struct {
|
||||
ack bool
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
func (*settings) item() {}
|
||||
|
||||
type resetStream struct {
|
||||
streamID uint32
|
||||
code http2.ErrCode
|
||||
}
|
||||
|
||||
func (*resetStream) item() {}
|
||||
|
||||
type goAway struct {
|
||||
code http2.ErrCode
|
||||
debugData []byte
|
||||
headsUp bool
|
||||
closeConn bool
|
||||
}
|
||||
|
||||
func (*goAway) item() {}
|
||||
|
||||
type flushIO struct {
|
||||
}
|
||||
|
||||
func (*flushIO) item() {}
|
||||
|
||||
type ping struct {
|
||||
ack bool
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
func (*ping) item() {}
|
||||
|
||||
// quotaPool is a pool which accumulates the quota and sends it to acquire()
|
||||
// when it is available.
|
||||
type quotaPool struct {
|
||||
c chan int
|
||||
|
||||
mu sync.Mutex
|
||||
version uint32
|
||||
quota int
|
||||
}
|
||||
|
||||
// newQuotaPool creates a quotaPool which has quota q available to consume.
|
||||
func newQuotaPool(q int) *quotaPool {
|
||||
qb := "aPool{
|
||||
c: make(chan int, 1),
|
||||
func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
|
||||
w := &writeQuota{
|
||||
quota: sz,
|
||||
ch: make(chan struct{}, 1),
|
||||
done: done,
|
||||
}
|
||||
if q > 0 {
|
||||
qb.c <- q
|
||||
} else {
|
||||
qb.quota = q
|
||||
w.replenish = w.realReplenish
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *writeQuota) get(sz int32) error {
|
||||
for {
|
||||
if atomic.LoadInt32(&w.quota) > 0 {
|
||||
atomic.AddInt32(&w.quota, -sz)
|
||||
return nil
|
||||
}
|
||||
return qb
|
||||
}
|
||||
|
||||
// add cancels the pending quota sent on acquired, incremented by v and sends
|
||||
// it back on acquire.
|
||||
func (qb *quotaPool) add(v int) {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
qb.lockedAdd(v)
|
||||
}
|
||||
|
||||
func (qb *quotaPool) lockedAdd(v int) {
|
||||
select {
|
||||
case n := <-qb.c:
|
||||
qb.quota += n
|
||||
case <-w.ch:
|
||||
continue
|
||||
case <-w.done:
|
||||
return errStreamDone
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *writeQuota) realReplenish(n int) {
|
||||
sz := int32(n)
|
||||
a := atomic.AddInt32(&w.quota, sz)
|
||||
b := a - sz
|
||||
if b <= 0 && a > 0 {
|
||||
select {
|
||||
case w.ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
qb.quota += v
|
||||
if qb.quota <= 0 {
|
||||
return
|
||||
}
|
||||
// After the pool has been created, this is the only place that sends on
|
||||
// the channel. Since mu is held at this point and any quota that was sent
|
||||
// on the channel has been retrieved, we know that this code will always
|
||||
// place any positive quota value on the channel.
|
||||
select {
|
||||
case qb.c <- qb.quota:
|
||||
qb.quota = 0
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *quotaPool) addAndUpdate(v int) {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
qb.lockedAdd(v)
|
||||
// Update the version only after having added to the quota
|
||||
// so that if acquireWithVesrion sees the new vesrion it is
|
||||
// guaranteed to have seen the updated quota.
|
||||
// Also, still keep this inside of the lock, so that when
|
||||
// compareAndExecute is processing, this function doesn't
|
||||
// get executed partially (quota gets updated but the version
|
||||
// doesn't).
|
||||
atomic.AddUint32(&(qb.version), 1)
|
||||
type trInFlow struct {
|
||||
limit uint32
|
||||
unacked uint32
|
||||
effectiveWindowSize uint32
|
||||
}
|
||||
|
||||
func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) {
|
||||
return qb.c, atomic.LoadUint32(&(qb.version))
|
||||
func (f *trInFlow) newLimit(n uint32) uint32 {
|
||||
d := n - f.limit
|
||||
f.limit = n
|
||||
f.updateEffectiveWindowSize()
|
||||
return d
|
||||
}
|
||||
|
||||
func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
if version == atomic.LoadUint32(&(qb.version)) {
|
||||
success()
|
||||
return true
|
||||
func (f *trInFlow) onData(n uint32) uint32 {
|
||||
f.unacked += n
|
||||
if f.unacked >= f.limit/4 {
|
||||
w := f.unacked
|
||||
f.unacked = 0
|
||||
f.updateEffectiveWindowSize()
|
||||
return w
|
||||
}
|
||||
failure()
|
||||
return false
|
||||
f.updateEffectiveWindowSize()
|
||||
return 0
|
||||
}
|
||||
|
||||
// acquire returns the channel on which available quota amounts are sent.
|
||||
func (qb *quotaPool) acquire() <-chan int {
|
||||
return qb.c
|
||||
func (f *trInFlow) reset() uint32 {
|
||||
w := f.unacked
|
||||
f.unacked = 0
|
||||
f.updateEffectiveWindowSize()
|
||||
return w
|
||||
}
|
||||
|
||||
func (f *trInFlow) updateEffectiveWindowSize() {
|
||||
atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
|
||||
}
|
||||
|
||||
func (f *trInFlow) getSize() uint32 {
|
||||
return atomic.LoadUint32(&f.effectiveWindowSize)
|
||||
}
|
||||
|
||||
// TODO(mmukhi): Simplify this code.
|
||||
// inFlow deals with inbound flow control
|
||||
type inFlow struct {
|
||||
mu sync.Mutex
|
||||
@@ -229,9 +162,9 @@ type inFlow struct {
|
||||
// It assumes that n is always greater than the old limit.
|
||||
func (f *inFlow) newLimit(n uint32) uint32 {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
d := n - f.limit
|
||||
f.limit = n
|
||||
f.mu.Unlock()
|
||||
return d
|
||||
}
|
||||
|
||||
@@ -240,7 +173,6 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
||||
n = uint32(math.MaxInt32)
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
// estSenderQuota is the receiver's view of the maximum number of bytes the sender
|
||||
// can send without a window update.
|
||||
estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
|
||||
@@ -252,7 +184,7 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
||||
// for this message. Therefore we must send an update over the limit since there's an active read
|
||||
// request from the application.
|
||||
if estUntransmittedData > estSenderQuota {
|
||||
// Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec.
|
||||
// Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
|
||||
if f.limit+n > maxWindowSize {
|
||||
f.delta = maxWindowSize - f.limit
|
||||
} else {
|
||||
@@ -261,19 +193,24 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
||||
// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
|
||||
f.delta = n
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return f.delta
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
||||
|
||||
// onData is invoked when some data frame is received. It updates pendingData.
|
||||
func (f *inFlow) onData(n uint32) error {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.pendingData += n
|
||||
if f.pendingData+f.pendingUpdate > f.limit+f.delta {
|
||||
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
|
||||
limit := f.limit
|
||||
rcvd := f.pendingData + f.pendingUpdate
|
||||
f.mu.Unlock()
|
||||
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -281,8 +218,8 @@ func (f *inFlow) onData(n uint32) error {
|
||||
// to be sent to the peer.
|
||||
func (f *inFlow) onRead(n uint32) uint32 {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
if f.pendingData == 0 {
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
||||
f.pendingData -= n
|
||||
@@ -297,15 +234,9 @@ func (f *inFlow) onRead(n uint32) uint32 {
|
||||
if f.pendingUpdate >= f.limit/4 {
|
||||
wu := f.pendingUpdate
|
||||
f.pendingUpdate = 0
|
||||
f.mu.Unlock()
|
||||
return wu
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *inFlow) resetPendingUpdate() uint32 {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
n := f.pendingUpdate
|
||||
f.pendingUpdate = 0
|
||||
return n
|
||||
}
|
||||
51
vendor/google.golang.org/grpc/transport/go16.go
generated
vendored
Normal file
51
vendor/google.golang.org/grpc/transport/go16.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// +build go1.6,!go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a StreamError.
|
||||
func ContextErr(err error) StreamError {
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return streamErrorf(codes.DeadlineExceeded, "%v", err)
|
||||
case context.Canceled:
|
||||
return streamErrorf(codes.Canceled, "%v", err)
|
||||
}
|
||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
}
|
||||
|
||||
// contextFromRequest returns a background context.
|
||||
func contextFromRequest(r *http.Request) context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
52
vendor/google.golang.org/grpc/transport/go17.go
generated
vendored
Normal file
52
vendor/google.golang.org/grpc/transport/go17.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
// +build go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
netctx "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, network, address)
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a StreamError.
|
||||
func ContextErr(err error) StreamError {
|
||||
switch err {
|
||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
||||
return streamErrorf(codes.DeadlineExceeded, "%v", err)
|
||||
case context.Canceled, netctx.Canceled:
|
||||
return streamErrorf(codes.Canceled, "%v", err)
|
||||
}
|
||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
}
|
||||
|
||||
// contextFromRequest returns a context from the HTTP Request.
|
||||
func contextFromRequest(r *http.Request) context.Context {
|
||||
return r.Context()
|
||||
}
|
||||
64
vendor/google.golang.org/grpc/transport/handler_server.go
generated
vendored
64
vendor/google.golang.org/grpc/transport/handler_server.go
generated
vendored
@@ -40,20 +40,24 @@ import (
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||
// from inside an http.Handler. It requires that the http Server
|
||||
// supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
return nil, errors.New("gRPC requires HTTP/2")
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
return nil, errors.New("invalid gRPC request method")
|
||||
}
|
||||
if !validContentType(r.Header.Get("Content-Type")) {
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
// TODO: do we assume contentType is lowercase? we did before
|
||||
contentSubtype, validContentType := contentSubtype(contentType)
|
||||
if !validContentType {
|
||||
return nil, errors.New("invalid gRPC request content-type")
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
@@ -68,6 +72,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
|
||||
req: r,
|
||||
closedCh: make(chan struct{}),
|
||||
writes: make(chan func()),
|
||||
contentType: contentType,
|
||||
contentSubtype: contentSubtype,
|
||||
stats: stats,
|
||||
}
|
||||
|
||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
@@ -79,19 +86,19 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
|
||||
st.timeout = to
|
||||
}
|
||||
|
||||
var metakv []string
|
||||
metakv := []string{"content-type", contentType}
|
||||
if r.Host != "" {
|
||||
metakv = append(metakv, ":authority", r.Host)
|
||||
}
|
||||
for k, vv := range r.Header {
|
||||
k = strings.ToLower(k)
|
||||
if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) {
|
||||
if isReservedHeader(k) && !isWhitelistedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
v, err := decodeMetadataHeader(k, v)
|
||||
if err != nil {
|
||||
return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err)
|
||||
return nil, streamErrorf(codes.Internal, "malformed binary metadata: %v", err)
|
||||
}
|
||||
metakv = append(metakv, k, v)
|
||||
}
|
||||
@@ -126,6 +133,14 @@ type serverHandlerTransport struct {
|
||||
// block concurrent WriteStatus calls
|
||||
// e.g. grpc/(*serverStream).SendMsg/RecvMsg
|
||||
writeStatusMu sync.Mutex
|
||||
|
||||
// we just mirror the request content-type
|
||||
contentType string
|
||||
// we store both contentType and contentSubtype so we don't keep recreating them
|
||||
// TODO make sure this is consistent across handler_server and http2_server
|
||||
contentSubtype string
|
||||
|
||||
stats stats.Handler
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close() error {
|
||||
@@ -219,6 +234,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||||
})
|
||||
|
||||
if err == nil { // transport has not been closed
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
}
|
||||
ht.Close()
|
||||
close(ht.writes)
|
||||
}
|
||||
@@ -235,7 +253,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
||||
|
||||
h := ht.rw.Header()
|
||||
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||
h.Set("Content-Type", "application/grpc")
|
||||
h.Set("Content-Type", ht.contentType)
|
||||
|
||||
// Predeclare trailers we'll set later in WriteStatus (after the body).
|
||||
// This is a SHOULD in the HTTP RFC, and the way you add (known)
|
||||
@@ -263,7 +281,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
return ht.do(func() {
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
h := ht.rw.Header()
|
||||
for k, vv := range md {
|
||||
@@ -279,17 +297,24 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
ht.rw.WriteHeader(200)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
|
||||
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||
|
||||
var ctx context.Context
|
||||
ctx := contextFromRequest(ht.req)
|
||||
var cancel context.CancelFunc
|
||||
if ht.timeoutSet {
|
||||
ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
}
|
||||
|
||||
// requestOver is closed when either the request's context is done
|
||||
@@ -320,6 +345,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||||
st: ht,
|
||||
method: req.URL.Path,
|
||||
recvCompress: req.Header.Get("grpc-encoding"),
|
||||
contentSubtype: ht.contentSubtype,
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: ht.RemoteAddr(),
|
||||
@@ -328,10 +354,18 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
||||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
ctx = peer.NewContext(ctx, pr)
|
||||
s.ctx = newContextWithStream(ctx, s)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
if ht.stats != nil {
|
||||
s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: ht.RemoteAddr(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
ht.stats.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.trReader = &transportReader{
|
||||
reader: &recvBufferReader{ctx: s.ctx, recv: s.buf},
|
||||
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
|
||||
windowHandler: func(int) {},
|
||||
}
|
||||
|
||||
@@ -386,6 +420,10 @@ func (ht *serverHandlerTransport) runStream() {
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) IncrMsgSent() {}
|
||||
|
||||
func (ht *serverHandlerTransport) IncrMsgRecv() {}
|
||||
|
||||
func (ht *serverHandlerTransport) Drain() {
|
||||
panic("Drain() is not implemented")
|
||||
}
|
||||
|
||||
1050
vendor/google.golang.org/grpc/transport/http2_client.go
generated
vendored
1050
vendor/google.golang.org/grpc/transport/http2_client.go
generated
vendored
File diff suppressed because it is too large
Load Diff
772
vendor/google.golang.org/grpc/transport/http2_server.go
generated
vendored
772
vendor/google.golang.org/grpc/transport/http2_server.go
generated
vendored
File diff suppressed because it is too large
Load Diff
185
vendor/google.golang.org/grpc/transport/http_util.go
generated
vendored
185
vendor/google.golang.org/grpc/transport/http_util.go
generated
vendored
@@ -23,12 +23,12 @@ import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
@@ -46,6 +46,12 @@ const (
|
||||
// http2IOBufSize specifies the buffer size for sending frames.
|
||||
defaultWriteBufSize = 32 * 1024
|
||||
defaultReadBufSize = 32 * 1024
|
||||
// baseContentType is the base content-type for gRPC. This is a valid
|
||||
// content-type on it's own, but can also include a content-subtype such as
|
||||
// "proto" as a suffix after "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
baseContentType = "application/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -64,7 +70,7 @@ var (
|
||||
http2.ErrCodeConnect: codes.Internal,
|
||||
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
||||
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||
http2.ErrCodeHTTP11Required: codes.FailedPrecondition,
|
||||
http2.ErrCodeHTTP11Required: codes.Internal,
|
||||
}
|
||||
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
||||
codes.Internal: http2.ErrCodeInternal,
|
||||
@@ -114,6 +120,7 @@ type decodeState struct {
|
||||
mdata map[string][]string
|
||||
statsTags []byte
|
||||
statsTrace []byte
|
||||
contentSubtype string
|
||||
}
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
@@ -125,6 +132,7 @@ func isReservedHeader(hdr string) bool {
|
||||
}
|
||||
switch hdr {
|
||||
case "content-type",
|
||||
"user-agent",
|
||||
"grpc-message-type",
|
||||
"grpc-encoding",
|
||||
"grpc-message",
|
||||
@@ -138,28 +146,55 @@ func isReservedHeader(hdr string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders
|
||||
// that should be propagated into metadata visible to users.
|
||||
func isWhitelistedPseudoHeader(hdr string) bool {
|
||||
// isWhitelistedHeader checks whether hdr should be propagated
|
||||
// into metadata visible to users.
|
||||
func isWhitelistedHeader(hdr string) bool {
|
||||
switch hdr {
|
||||
case ":authority":
|
||||
case ":authority", "user-agent":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func validContentType(t string) bool {
|
||||
e := "application/grpc"
|
||||
if !strings.HasPrefix(t, e) {
|
||||
return false
|
||||
// contentSubtype returns the content-subtype for the given content-type. The
|
||||
// given content-type must be a valid content-type that starts with
|
||||
// "application/grpc". A content-subtype will follow "application/grpc" after a
|
||||
// "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// If contentType is not a valid content-type for gRPC, the boolean
|
||||
// will be false, otherwise true. If content-type == "application/grpc",
|
||||
// "application/grpc+", or "application/grpc;", the boolean will be true,
|
||||
// but no content-subtype will be returned.
|
||||
//
|
||||
// contentType is assumed to be lowercase already.
|
||||
func contentSubtype(contentType string) (string, bool) {
|
||||
if contentType == baseContentType {
|
||||
return "", true
|
||||
}
|
||||
// Support variations on the content-type
|
||||
// (e.g. "application/grpc+blah", "application/grpc;blah").
|
||||
if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' {
|
||||
return false
|
||||
if !strings.HasPrefix(contentType, baseContentType) {
|
||||
return "", false
|
||||
}
|
||||
return true
|
||||
// guaranteed since != baseContentType and has baseContentType prefix
|
||||
switch contentType[len(baseContentType)] {
|
||||
case '+', ';':
|
||||
// this will return true for "application/grpc+" or "application/grpc;"
|
||||
// which the previous validContentType function tested to be valid, so we
|
||||
// just say that no content-subtype is specified in this case
|
||||
return contentType[len(baseContentType)+1:], true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// contentSubtype is assumed to be lowercase
|
||||
func contentType(contentSubtype string) string {
|
||||
if contentSubtype == "" {
|
||||
return baseContentType
|
||||
}
|
||||
return baseContentType + "+" + contentSubtype
|
||||
}
|
||||
|
||||
func (d *decodeState) status() *status.Status {
|
||||
@@ -228,9 +263,9 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error
|
||||
// gRPC status doesn't exist and http status is OK.
|
||||
// Set rawStatusCode to be unknown and return nil error.
|
||||
// So that, if the stream has ended this Unknown status
|
||||
// will be propogated to the user.
|
||||
// will be propagated to the user.
|
||||
// Otherwise, it will be ignored. In which case, status from
|
||||
// a later trailer, that has StreamEnded flag set, is propogated.
|
||||
// a later trailer, that has StreamEnded flag set, is propagated.
|
||||
code := int(codes.Unknown)
|
||||
d.rawStatusCode = &code
|
||||
return nil
|
||||
@@ -247,9 +282,16 @@ func (d *decodeState) addMetadata(k, v string) {
|
||||
func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
||||
switch f.Name {
|
||||
case "content-type":
|
||||
if !validContentType(f.Value) {
|
||||
return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)
|
||||
contentSubtype, validContentType := contentSubtype(f.Value)
|
||||
if !validContentType {
|
||||
return streamErrorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
|
||||
}
|
||||
d.contentSubtype = contentSubtype
|
||||
// TODO: do we want to propagate the whole content-type in the metadata,
|
||||
// or come up with a way to just propagate the content-subtype if it was set?
|
||||
// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
|
||||
// in the metadata?
|
||||
d.addMetadata(f.Name, f.Value)
|
||||
case "grpc-encoding":
|
||||
d.encoding = f.Value
|
||||
case "grpc-status":
|
||||
@@ -299,7 +341,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
||||
d.statsTrace = v
|
||||
d.addMetadata(f.Name, string(v))
|
||||
default:
|
||||
if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) {
|
||||
if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
|
||||
break
|
||||
}
|
||||
v, err := decodeMetadataHeader(f.Name, f.Value)
|
||||
@@ -307,7 +349,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
||||
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
|
||||
return nil
|
||||
}
|
||||
d.addMetadata(f.Name, string(v))
|
||||
d.addMetadata(f.Name, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -396,16 +438,17 @@ func decodeTimeout(s string) (time.Duration, error) {
|
||||
|
||||
const (
|
||||
spaceByte = ' '
|
||||
tildaByte = '~'
|
||||
tildeByte = '~'
|
||||
percentByte = '%'
|
||||
)
|
||||
|
||||
// encodeGrpcMessage is used to encode status code in header field
|
||||
// "grpc-message".
|
||||
// It checks to see if each individual byte in msg is an
|
||||
// allowable byte, and then either percent encoding or passing it through.
|
||||
// When percent encoding, the byte is converted into hexadecimal notation
|
||||
// with a '%' prepended.
|
||||
// "grpc-message". It does percent encoding and also replaces invalid utf-8
|
||||
// characters with Unicode replacement character.
|
||||
//
|
||||
// It checks to see if each individual byte in msg is an allowable byte, and
|
||||
// then either percent encoding or passing it through. When percent encoding,
|
||||
// the byte is converted into hexadecimal notation with a '%' prepended.
|
||||
func encodeGrpcMessage(msg string) string {
|
||||
if msg == "" {
|
||||
return ""
|
||||
@@ -413,7 +456,7 @@ func encodeGrpcMessage(msg string) string {
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if !(c >= spaceByte && c < tildaByte && c != percentByte) {
|
||||
if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
|
||||
return encodeGrpcMessageUnchecked(msg)
|
||||
}
|
||||
}
|
||||
@@ -422,14 +465,26 @@ func encodeGrpcMessage(msg string) string {
|
||||
|
||||
func encodeGrpcMessageUnchecked(msg string) string {
|
||||
var buf bytes.Buffer
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if c >= spaceByte && c < tildaByte && c != percentByte {
|
||||
buf.WriteByte(c)
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", c))
|
||||
for len(msg) > 0 {
|
||||
r, size := utf8.DecodeRuneInString(msg)
|
||||
for _, b := range []byte(string(r)) {
|
||||
if size > 1 {
|
||||
// If size > 1, r is not ascii. Always do percent encoding.
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||
continue
|
||||
}
|
||||
|
||||
// The for loop is necessary even if size == 1. r could be
|
||||
// utf8.RuneError.
|
||||
//
|
||||
// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
|
||||
if b >= spaceByte && b <= tildeByte && b != percentByte {
|
||||
buf.WriteByte(b)
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||
}
|
||||
}
|
||||
msg = msg[size:]
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
@@ -468,19 +523,67 @@ func decodeGrpcMessageUnchecked(msg string) string {
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type bufWriter struct {
|
||||
buf []byte
|
||||
offset int
|
||||
batchSize int
|
||||
conn net.Conn
|
||||
err error
|
||||
|
||||
onFlush func()
|
||||
}
|
||||
|
||||
func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
|
||||
return &bufWriter{
|
||||
buf: make([]byte, batchSize*2),
|
||||
batchSize: batchSize,
|
||||
conn: conn,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
for len(b) > 0 {
|
||||
nn := copy(w.buf[w.offset:], b)
|
||||
b = b[nn:]
|
||||
w.offset += nn
|
||||
n += nn
|
||||
if w.offset >= w.batchSize {
|
||||
err = w.Flush()
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *bufWriter) Flush() error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
if w.offset == 0 {
|
||||
return nil
|
||||
}
|
||||
if w.onFlush != nil {
|
||||
w.onFlush()
|
||||
}
|
||||
_, w.err = w.conn.Write(w.buf[:w.offset])
|
||||
w.offset = 0
|
||||
return w.err
|
||||
}
|
||||
|
||||
type framer struct {
|
||||
numWriters int32
|
||||
reader io.Reader
|
||||
writer *bufio.Writer
|
||||
writer *bufWriter
|
||||
fr *http2.Framer
|
||||
}
|
||||
|
||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
|
||||
r := bufio.NewReaderSize(conn, readBufferSize)
|
||||
w := newBufWriter(conn, writeBufferSize)
|
||||
f := &framer{
|
||||
reader: bufio.NewReaderSize(conn, readBufferSize),
|
||||
writer: bufio.NewWriterSize(conn, writeBufferSize),
|
||||
writer: w,
|
||||
fr: http2.NewFramer(w, r),
|
||||
}
|
||||
f.fr = http2.NewFramer(f.writer, f.reader)
|
||||
// Opt-in to Frame reuse API on framer to reduce garbage.
|
||||
// Frames aren't safe to read from after a subsequent call to ReadFrame.
|
||||
f.fr.SetReuseFrames()
|
||||
|
||||
417
vendor/google.golang.org/grpc/transport/transport.go
generated
vendored
417
vendor/google.golang.org/grpc/transport/transport.go
generated
vendored
@@ -17,19 +17,19 @@
|
||||
*/
|
||||
|
||||
// Package transport defines and implements message oriented communication
|
||||
// channel to complete various transactions (e.g., an RPC).
|
||||
package transport
|
||||
// channel to complete various transactions (e.g., an RPC). It is meant for
|
||||
// grpc-internal usage and is not intended to be imported directly by users.
|
||||
package transport // externally used as import "google.golang.org/grpc/transport"
|
||||
|
||||
import (
|
||||
stdctx "context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
@@ -58,6 +58,7 @@ type recvBuffer struct {
|
||||
c chan recvMsg
|
||||
mu sync.Mutex
|
||||
backlog []recvMsg
|
||||
err error
|
||||
}
|
||||
|
||||
func newRecvBuffer() *recvBuffer {
|
||||
@@ -69,6 +70,13 @@ func newRecvBuffer() *recvBuffer {
|
||||
|
||||
func (b *recvBuffer) put(r recvMsg) {
|
||||
b.mu.Lock()
|
||||
if b.err != nil {
|
||||
b.mu.Unlock()
|
||||
// An error had occurred earlier, don't accept more
|
||||
// data or errors.
|
||||
return
|
||||
}
|
||||
b.err = r.err
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- r:
|
||||
@@ -102,11 +110,12 @@ func (b *recvBuffer) get() <-chan recvMsg {
|
||||
return b.c
|
||||
}
|
||||
|
||||
//
|
||||
// recvBufferReader implements io.Reader interface to read the data from
|
||||
// recvBuffer.
|
||||
type recvBufferReader struct {
|
||||
ctx context.Context
|
||||
goAway chan struct{}
|
||||
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
|
||||
recv *recvBuffer
|
||||
last []byte // Stores the remaining data in the previous calls.
|
||||
err error
|
||||
@@ -131,10 +140,8 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
||||
return copied, nil
|
||||
}
|
||||
select {
|
||||
case <-r.ctx.Done():
|
||||
case <-r.ctxDone:
|
||||
return 0, ContextErr(r.ctx.Err())
|
||||
case <-r.goAway:
|
||||
return 0, ErrStreamDrain
|
||||
case m := <-r.recv.get():
|
||||
r.recv.load()
|
||||
if m.err != nil {
|
||||
@@ -146,61 +153,7 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// All items in an out of a controlBuffer should be the same type.
|
||||
type item interface {
|
||||
item()
|
||||
}
|
||||
|
||||
// controlBuffer is an unbounded channel of item.
|
||||
type controlBuffer struct {
|
||||
c chan item
|
||||
mu sync.Mutex
|
||||
backlog []item
|
||||
}
|
||||
|
||||
func newControlBuffer() *controlBuffer {
|
||||
b := &controlBuffer{
|
||||
c: make(chan item, 1),
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *controlBuffer) put(r item) {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- r:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, r)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *controlBuffer) load() {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = nil
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// get returns the channel that receives an item in the buffer.
|
||||
//
|
||||
// Upon receipt of an item, the caller should call load to send another
|
||||
// item onto the channel if there is any.
|
||||
func (b *controlBuffer) get() <-chan item {
|
||||
return b.c
|
||||
}
|
||||
|
||||
type streamState uint8
|
||||
type streamState uint32
|
||||
|
||||
const (
|
||||
streamActive streamState = iota
|
||||
@@ -212,65 +165,92 @@ const (
|
||||
// Stream represents an RPC in the transport layer.
|
||||
type Stream struct {
|
||||
id uint32
|
||||
// nil for client side Stream.
|
||||
st ServerTransport
|
||||
// ctx is the associated context of the stream.
|
||||
ctx context.Context
|
||||
// cancel is always nil for client side Stream.
|
||||
cancel context.CancelFunc
|
||||
// done is closed when the final status arrives.
|
||||
done chan struct{}
|
||||
// goAway is closed when the server sent GoAways signal before this stream was initiated.
|
||||
goAway chan struct{}
|
||||
// method records the associated RPC method of the stream.
|
||||
method string
|
||||
st ServerTransport // nil for client side Stream
|
||||
ctx context.Context // the associated context of the stream
|
||||
cancel context.CancelFunc // always nil for client side Stream
|
||||
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
|
||||
ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
|
||||
method string // the associated RPC method of the stream
|
||||
recvCompress string
|
||||
sendCompress string
|
||||
buf *recvBuffer
|
||||
trReader io.Reader
|
||||
fc *inFlow
|
||||
recvQuota uint32
|
||||
|
||||
// TODO: Remote this unused variable.
|
||||
// The accumulated inbound quota pending for window update.
|
||||
updateQuota uint32
|
||||
wq *writeQuota
|
||||
|
||||
// Callback to state application's intentions to read data. This
|
||||
// is used to adjust flow control, if need be.
|
||||
// is used to adjust flow control, if needed.
|
||||
requestRead func(int)
|
||||
|
||||
sendQuotaPool *quotaPool
|
||||
localSendQuota *quotaPool
|
||||
// Close headerChan to indicate the end of reception of header metadata.
|
||||
headerChan chan struct{}
|
||||
// header caches the received header metadata.
|
||||
header metadata.MD
|
||||
// The key-value map of trailer metadata.
|
||||
trailer metadata.MD
|
||||
headerChan chan struct{} // closed to indicate the end of header metadata.
|
||||
headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
||||
|
||||
// hdrMu protects header and trailer metadata on the server-side.
|
||||
hdrMu sync.Mutex
|
||||
header metadata.MD // the received header metadata.
|
||||
trailer metadata.MD // the key-value map of trailer metadata.
|
||||
|
||||
// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
|
||||
headerSent uint32
|
||||
|
||||
mu sync.RWMutex // guard the following
|
||||
// headerOK becomes true from the first header is about to send.
|
||||
headerOk bool
|
||||
state streamState
|
||||
// true iff headerChan is closed. Used to avoid closing headerChan
|
||||
// multiple times.
|
||||
headerDone bool
|
||||
// the status error received from the server.
|
||||
|
||||
// On client-side it is the status error received from the server.
|
||||
// On server-side it is unused.
|
||||
status *status.Status
|
||||
// rstStream indicates whether a RST_STREAM frame needs to be sent
|
||||
// to the server to signify that this stream is closing.
|
||||
rstStream bool
|
||||
// rstError is the error that needs to be sent along with the RST_STREAM frame.
|
||||
rstError http2.ErrCode
|
||||
// bytesSent and bytesReceived indicates whether any bytes have been sent or
|
||||
// received on this stream.
|
||||
bytesSent bool
|
||||
bytesReceived bool
|
||||
|
||||
bytesReceived uint32 // indicates whether any bytes have been received on this stream
|
||||
unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
|
||||
|
||||
// contentSubtype is the content-subtype for requests.
|
||||
// this must be lowercase or the behavior is undefined.
|
||||
contentSubtype string
|
||||
}
|
||||
|
||||
// isHeaderSent is only valid on the server-side.
|
||||
func (s *Stream) isHeaderSent() bool {
|
||||
return atomic.LoadUint32(&s.headerSent) == 1
|
||||
}
|
||||
|
||||
// updateHeaderSent updates headerSent and returns true
|
||||
// if it was alreay set. It is valid only on server-side.
|
||||
func (s *Stream) updateHeaderSent() bool {
|
||||
return atomic.SwapUint32(&s.headerSent, 1) == 1
|
||||
}
|
||||
|
||||
func (s *Stream) swapState(st streamState) streamState {
|
||||
return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
|
||||
}
|
||||
|
||||
func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
|
||||
return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
|
||||
}
|
||||
|
||||
func (s *Stream) getState() streamState {
|
||||
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
|
||||
}
|
||||
|
||||
func (s *Stream) waitOnHeader() error {
|
||||
if s.headerChan == nil {
|
||||
// On the server headerChan is always nil since a stream originates
|
||||
// only after having received headers.
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return ContextErr(s.ctx.Err())
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RecvCompress returns the compression algorithm applied to the inbound
|
||||
// message. It is empty string if there is no compression applied.
|
||||
func (s *Stream) RecvCompress() string {
|
||||
if err := s.waitOnHeader(); err != nil {
|
||||
return ""
|
||||
}
|
||||
return s.recvCompress
|
||||
}
|
||||
|
||||
@@ -285,28 +265,17 @@ func (s *Stream) Done() <-chan struct{} {
|
||||
return s.done
|
||||
}
|
||||
|
||||
// GoAway returns a channel which is closed when the server sent GoAways signal
|
||||
// before this stream was initiated.
|
||||
func (s *Stream) GoAway() <-chan struct{} {
|
||||
return s.goAway
|
||||
}
|
||||
|
||||
// Header acquires the key-value pairs of header metadata once it
|
||||
// is available. It blocks until i) the metadata is ready or ii) there is no
|
||||
// header metadata or iii) the stream is canceled/expired.
|
||||
func (s *Stream) Header() (metadata.MD, error) {
|
||||
var err error
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
err = ContextErr(s.ctx.Err())
|
||||
case <-s.goAway:
|
||||
err = ErrStreamDrain
|
||||
case <-s.headerChan:
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
err := s.waitOnHeader()
|
||||
// Even if the stream is closed, header is returned if available.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
if s.header == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return s.header.Copy(), nil
|
||||
default:
|
||||
}
|
||||
@@ -316,10 +285,10 @@ func (s *Stream) Header() (metadata.MD, error) {
|
||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||
// after the entire stream is done, it could return an empty MD. Client
|
||||
// side only.
|
||||
// It can be safely read only after stream has ended that is either read
|
||||
// or write have returned io.EOF.
|
||||
func (s *Stream) Trailer() metadata.MD {
|
||||
s.mu.RLock()
|
||||
c := s.trailer.Copy()
|
||||
s.mu.RUnlock()
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -329,6 +298,15 @@ func (s *Stream) ServerTransport() ServerTransport {
|
||||
return s.st
|
||||
}
|
||||
|
||||
// ContentSubtype returns the content-subtype for a request. For example, a
|
||||
// content-subtype of "proto" will result in a content-type of
|
||||
// "application/grpc+proto". This will always be lowercase. See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
func (s *Stream) ContentSubtype() string {
|
||||
return s.contentSubtype
|
||||
}
|
||||
|
||||
// Context returns the context of the stream.
|
||||
func (s *Stream) Context() context.Context {
|
||||
return s.ctx
|
||||
@@ -340,36 +318,49 @@ func (s *Stream) Method() string {
|
||||
}
|
||||
|
||||
// Status returns the status received from the server.
|
||||
// Status can be read safely only after the stream has ended,
|
||||
// that is, read or write has returned io.EOF.
|
||||
func (s *Stream) Status() *status.Status {
|
||||
return s.status
|
||||
}
|
||||
|
||||
// SetHeader sets the header metadata. This can be called multiple times.
|
||||
// Server side only.
|
||||
// This should not be called in parallel to other data writes.
|
||||
func (s *Stream) SetHeader(md metadata.MD) error {
|
||||
s.mu.Lock()
|
||||
if s.headerOk || s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
if s.isHeaderSent() || s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
if md.Len() == 0 {
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
s.hdrMu.Lock()
|
||||
s.header = metadata.Join(s.header, md)
|
||||
s.mu.Unlock()
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendHeader sends the given header metadata. The given metadata is
|
||||
// combined with any metadata set by previous calls to SetHeader and
|
||||
// then written to the transport stream.
|
||||
func (s *Stream) SendHeader(md metadata.MD) error {
|
||||
t := s.ServerTransport()
|
||||
return t.WriteHeader(s, md)
|
||||
}
|
||||
|
||||
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
||||
// by the server. This can be called multiple times. Server side only.
|
||||
// This should not be called parallel to other data writes.
|
||||
func (s *Stream) SetTrailer(md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
s.mu.Lock()
|
||||
if s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
s.hdrMu.Lock()
|
||||
s.trailer = metadata.Join(s.trailer, md)
|
||||
s.mu.Unlock()
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -409,28 +400,15 @@ func (t *transportReader) Read(p []byte) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// finish sets the stream's state and status, and closes the done channel.
|
||||
// s.mu must be held by the caller. st must always be non-nil.
|
||||
func (s *Stream) finish(st *status.Status) {
|
||||
s.status = st
|
||||
s.state = streamDone
|
||||
close(s.done)
|
||||
}
|
||||
|
||||
// BytesSent indicates whether any bytes have been sent on this stream.
|
||||
func (s *Stream) BytesSent() bool {
|
||||
s.mu.Lock()
|
||||
bs := s.bytesSent
|
||||
s.mu.Unlock()
|
||||
return bs
|
||||
}
|
||||
|
||||
// BytesReceived indicates whether any bytes have been received on this stream.
|
||||
func (s *Stream) BytesReceived() bool {
|
||||
s.mu.Lock()
|
||||
br := s.bytesReceived
|
||||
s.mu.Unlock()
|
||||
return br
|
||||
return atomic.LoadUint32(&s.bytesReceived) == 1
|
||||
}
|
||||
|
||||
// Unprocessed indicates whether the server did not process this stream --
|
||||
// i.e. it sent a refused stream or GOAWAY including this stream ID.
|
||||
func (s *Stream) Unprocessed() bool {
|
||||
return atomic.LoadUint32(&s.unprocessed) == 1
|
||||
}
|
||||
|
||||
// GoString is implemented by Stream so context.String() won't
|
||||
@@ -439,21 +417,6 @@ func (s *Stream) GoString() string {
|
||||
return fmt.Sprintf("<stream: %p, %v>", s, s.method)
|
||||
}
|
||||
|
||||
// The key to save transport.Stream in the context.
|
||||
type streamKey struct{}
|
||||
|
||||
// newContextWithStream creates a new context from ctx and attaches stream
|
||||
// to it.
|
||||
func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
|
||||
return context.WithValue(ctx, streamKey{}, stream)
|
||||
}
|
||||
|
||||
// StreamFromContext returns the stream saved in ctx.
|
||||
func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
|
||||
s, ok = ctx.Value(streamKey{}).(*Stream)
|
||||
return
|
||||
}
|
||||
|
||||
// state of transport
|
||||
type transportState int
|
||||
|
||||
@@ -475,6 +438,7 @@ type ServerConfig struct {
|
||||
InitialConnWindowSize int32
|
||||
WriteBufferSize int
|
||||
ReadBufferSize int
|
||||
ChannelzParentID int64
|
||||
}
|
||||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
@@ -510,18 +474,21 @@ type ConnectOptions struct {
|
||||
WriteBufferSize int
|
||||
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
||||
ReadBufferSize int
|
||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||
ChannelzParentID int64
|
||||
}
|
||||
|
||||
// TargetInfo contains the information of the target such as network address and metadata.
|
||||
type TargetInfo struct {
|
||||
Addr string
|
||||
Metadata interface{}
|
||||
Authority string
|
||||
}
|
||||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) {
|
||||
return newHTTP2Client(ctx, target, opts, timeout)
|
||||
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
@@ -545,10 +512,6 @@ type CallHdr struct {
|
||||
// Method specifies the operation to perform.
|
||||
Method string
|
||||
|
||||
// RecvCompress specifies the compression algorithm applied on
|
||||
// inbound messages.
|
||||
RecvCompress string
|
||||
|
||||
// SendCompress specifies the compression algorithm applied on
|
||||
// outbound message.
|
||||
SendCompress string
|
||||
@@ -563,6 +526,14 @@ type CallHdr struct {
|
||||
// for performance purposes.
|
||||
// If it's false, new stream will never be flushed.
|
||||
Flush bool
|
||||
|
||||
// ContentSubtype specifies the content-subtype for a request. For example, a
|
||||
// content-subtype of "proto" will result in a content-type of
|
||||
// "application/grpc+proto". The value of ContentSubtype must be all
|
||||
// lowercase, otherwise the behavior is undefined. See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
ContentSubtype string
|
||||
}
|
||||
|
||||
// ClientTransport is the common interface for all gRPC client-side transport
|
||||
@@ -604,6 +575,12 @@ type ClientTransport interface {
|
||||
|
||||
// GetGoAwayReason returns the reason why GoAway frame was received.
|
||||
GetGoAwayReason() GoAwayReason
|
||||
|
||||
// IncrMsgSent increments the number of message sent through this transport.
|
||||
IncrMsgSent()
|
||||
|
||||
// IncrMsgRecv increments the number of message received through this transport.
|
||||
IncrMsgRecv()
|
||||
}
|
||||
|
||||
// ServerTransport is the common interface for all gRPC server-side transport
|
||||
@@ -637,6 +614,12 @@ type ServerTransport interface {
|
||||
|
||||
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
||||
Drain()
|
||||
|
||||
// IncrMsgSent increments the number of message sent through this transport.
|
||||
IncrMsgSent()
|
||||
|
||||
// IncrMsgRecv increments the number of message received through this transport.
|
||||
IncrMsgRecv()
|
||||
}
|
||||
|
||||
// streamErrorf creates an StreamError with the specified error code and description.
|
||||
@@ -686,9 +669,16 @@ func (e ConnectionError) Origin() error {
|
||||
var (
|
||||
// ErrConnClosing indicates that the transport is closing.
|
||||
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
|
||||
// ErrStreamDrain indicates that the stream is rejected by the server because
|
||||
// the server stops accepting new RPCs.
|
||||
ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
|
||||
// errStreamDrain indicates that the stream is rejected because the
|
||||
// connection is draining. This could be caused by goaway or balancer
|
||||
// removing the address.
|
||||
errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining")
|
||||
// errStreamDone is returned from write at the client side to indiacte application
|
||||
// layer of an error.
|
||||
errStreamDone = errors.New("the stream is done")
|
||||
// StatusGoAway indicates that the server sent a GOAWAY that included this
|
||||
// stream's ID in unprocessed RPCs.
|
||||
statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
|
||||
)
|
||||
|
||||
// TODO: See if we can replace StreamError with status package errors.
|
||||
@@ -703,75 +693,16 @@ func (e StreamError) Error() string {
|
||||
return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
|
||||
}
|
||||
|
||||
// wait blocks until it can receive from one of the provided contexts or channels
|
||||
func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ContextErr(ctx.Err())
|
||||
case <-done:
|
||||
return 0, io.EOF
|
||||
case <-goAway:
|
||||
return 0, ErrStreamDrain
|
||||
case <-tctx.Done():
|
||||
return 0, ErrConnClosing
|
||||
case i := <-proceed:
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a StreamError.
|
||||
func ContextErr(err error) StreamError {
|
||||
switch err {
|
||||
case context.DeadlineExceeded, stdctx.DeadlineExceeded:
|
||||
return streamErrorf(codes.DeadlineExceeded, "%v", err)
|
||||
case context.Canceled, stdctx.Canceled:
|
||||
return streamErrorf(codes.Canceled, "%v", err)
|
||||
}
|
||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
}
|
||||
|
||||
// GoAwayReason contains the reason for the GoAway frame received.
|
||||
type GoAwayReason uint8
|
||||
|
||||
const (
|
||||
// Invalid indicates that no GoAway frame is received.
|
||||
Invalid GoAwayReason = 0
|
||||
// NoReason is the default value when GoAway frame is received.
|
||||
NoReason GoAwayReason = 1
|
||||
// TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm
|
||||
// was received and that the debug data said "too_many_pings".
|
||||
TooManyPings GoAwayReason = 2
|
||||
// GoAwayInvalid indicates that no GoAway frame is received.
|
||||
GoAwayInvalid GoAwayReason = 0
|
||||
// GoAwayNoReason is the default value when GoAway frame is received.
|
||||
GoAwayNoReason GoAwayReason = 1
|
||||
// GoAwayTooManyPings indicates that a GoAway frame with
|
||||
// ErrCodeEnhanceYourCalm was received and that the debug data said
|
||||
// "too_many_pings".
|
||||
GoAwayTooManyPings GoAwayReason = 2
|
||||
)
|
||||
|
||||
// loopyWriter is run in a separate go routine. It is the single code path that will
|
||||
// write data on wire.
|
||||
func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) {
|
||||
for {
|
||||
select {
|
||||
case i := <-cbuf.get():
|
||||
cbuf.load()
|
||||
if err := handler(i); err != nil {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
hasData:
|
||||
for {
|
||||
select {
|
||||
case i := <-cbuf.get():
|
||||
cbuf.load()
|
||||
if err := handler(i); err != nil {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
if err := handler(&flushIO{}); err != nil {
|
||||
return
|
||||
}
|
||||
break hasData
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
22
vendor/google.golang.org/grpc/version.go
generated
vendored
Normal file
22
vendor/google.golang.org/grpc/version.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.13.0"
|
||||
38
vendor/google.golang.org/grpc/vet.sh
generated
vendored
38
vendor/google.golang.org/grpc/vet.sh
generated
vendored
@@ -1,5 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ `uname -a` = *"Darwin"* ]]; then
|
||||
echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -ex # Exit on error; debugging enabled.
|
||||
set -o pipefail # Fail a pipe if any sub-command fails.
|
||||
|
||||
@@ -8,12 +13,6 @@ die() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
# TODO: Remove this check and the mangling below once "context" is imported
|
||||
# directly.
|
||||
if git status --porcelain | read; then
|
||||
die "Uncommitted or untracked files found; commit changes first"
|
||||
fi
|
||||
|
||||
PATH="$GOPATH/bin:$GOROOT/bin:$PATH"
|
||||
|
||||
# Check proto in manual runs or cron runs.
|
||||
@@ -28,8 +27,8 @@ if [ "$1" = "-install" ]; then
|
||||
github.com/golang/lint/golint \
|
||||
golang.org/x/tools/cmd/goimports \
|
||||
honnef.co/go/tools/cmd/staticcheck \
|
||||
github.com/golang/protobuf/protoc-gen-go \
|
||||
golang.org/x/tools/cmd/stringer
|
||||
github.com/client9/misspell/cmd/misspell \
|
||||
github.com/golang/protobuf/protoc-gen-go
|
||||
if [[ "$check_proto" = "true" ]]; then
|
||||
if [[ "$TRAVIS" = "true" ]]; then
|
||||
PROTOBUF_VERSION=3.3.0
|
||||
@@ -48,10 +47,18 @@ elif [[ "$#" -ne 0 ]]; then
|
||||
die "Unknown argument(s): $*"
|
||||
fi
|
||||
|
||||
# TODO: Remove this check and the mangling below once "context" is imported
|
||||
# directly.
|
||||
if git status --porcelain | read; then
|
||||
die "Uncommitted or untracked files found; commit changes first"
|
||||
fi
|
||||
|
||||
git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read)
|
||||
git ls-files "*.go" | xargs grep -l '"unsafe"' 2>&1 | (! grep -v '_test.go') | tee /dev/stderr | (! read)
|
||||
git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') | tee /dev/stderr | (! read)
|
||||
gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read)
|
||||
goimports -l . 2>&1 | tee /dev/stderr | (! read)
|
||||
golint ./... 2>&1 | (grep -vE "(_mock|_string|grpc_lb_v1/doc|\.pb)\.go:" || true) | tee /dev/stderr | (! read)
|
||||
golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read)
|
||||
|
||||
# Undo any edits made by this script.
|
||||
cleanup() {
|
||||
@@ -64,7 +71,7 @@ trap cleanup EXIT
|
||||
git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":'
|
||||
set +o pipefail
|
||||
# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed.
|
||||
go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read)
|
||||
go tool vet -all . 2>&1 | grep -vE '(clientconn|transport\/transport_test).go:.*cancel (function|var)' | grep -vF '.pb.go:' | tee /dev/stderr | (! read)
|
||||
set -o pipefail
|
||||
git reset --hard HEAD
|
||||
|
||||
@@ -75,4 +82,13 @@ if [[ "$check_proto" = "true" ]]; then
|
||||
fi
|
||||
|
||||
# TODO(menghanl): fix errors in transport_test.
|
||||
staticcheck -ignore google.golang.org/grpc/transport/transport_test.go:SA2002 ./...
|
||||
staticcheck -ignore '
|
||||
google.golang.org/grpc/transport/transport_test.go:SA2002
|
||||
google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
|
||||
google.golang.org/grpc/stats/stats_test.go:SA1019
|
||||
google.golang.org/grpc/test/end2end_test.go:SA1019
|
||||
google.golang.org/grpc/balancer_test.go:SA1019
|
||||
google.golang.org/grpc/balancer.go:SA1019
|
||||
google.golang.org/grpc/clientconn_test.go:SA1019
|
||||
' ./...
|
||||
misspell -error .
|
||||
|
||||
Reference in New Issue
Block a user