update OpenTelemetry dependencies and grpc
This update dropped the otelgrpc → cloud.google.com/go/compute dependency, among others. This dropped out because genproto cleaned up it's dependencies on google cloud libraries, and otel updated - details in #113366. Signed-off-by: Davanum Srinivas <davanum@gmail.com> Co-Authored-By: David Ashpole <dashpole@google.com>
This commit is contained in:
2
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
2
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
@@ -66,7 +66,7 @@ How to get your contributions merged smoothly and quickly.
|
||||
- **All tests need to be passing** before your change can be merged. We
|
||||
recommend you **run tests locally** before creating your PR to catch breakages
|
||||
early on.
|
||||
- `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors
|
||||
- `./scripts/vet.sh` to catch vet errors
|
||||
- `go test -cpu 1,4 -timeout 7m ./...` to run the tests
|
||||
- `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
|
||||
|
||||
|
||||
1
vendor/google.golang.org/grpc/MAINTAINERS.md
generated
vendored
1
vendor/google.golang.org/grpc/MAINTAINERS.md
generated
vendored
@@ -9,6 +9,7 @@ for general contribution guidelines.
|
||||
|
||||
## Maintainers (in alphabetical order)
|
||||
|
||||
- [atollena](https://github.com/atollena), Datadog, Inc.
|
||||
- [cesarghali](https://github.com/cesarghali), Google LLC
|
||||
- [dfawley](https://github.com/dfawley), Google LLC
|
||||
- [easwars](https://github.com/easwars), Google LLC
|
||||
|
||||
7
vendor/google.golang.org/grpc/Makefile
generated
vendored
7
vendor/google.golang.org/grpc/Makefile
generated
vendored
@@ -30,17 +30,20 @@ testdeps:
|
||||
GO111MODULE=on go get -d -v -t google.golang.org/grpc/...
|
||||
|
||||
vet: vetdeps
|
||||
./vet.sh
|
||||
./scripts/vet.sh
|
||||
|
||||
vetdeps:
|
||||
./vet.sh -install
|
||||
./scripts/vet.sh -install
|
||||
|
||||
.PHONY: \
|
||||
all \
|
||||
build \
|
||||
clean \
|
||||
deps \
|
||||
proto \
|
||||
test \
|
||||
testsubmodule \
|
||||
testrace \
|
||||
testdeps \
|
||||
vet \
|
||||
vetdeps
|
||||
|
||||
2
vendor/google.golang.org/grpc/README.md
generated
vendored
2
vendor/google.golang.org/grpc/README.md
generated
vendored
@@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **[Go][]**: any one of the **three latest major** [releases][go-releases].
|
||||
- **[Go][]**: any one of the **two latest major** [releases][go-releases].
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
9
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
9
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
@@ -54,13 +54,14 @@ var (
|
||||
// an init() function), and is not thread-safe. If multiple Balancers are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func Register(b Builder) {
|
||||
if strings.ToLower(b.Name()) != b.Name() {
|
||||
name := strings.ToLower(b.Name())
|
||||
if name != b.Name() {
|
||||
// TODO: Skip the use of strings.ToLower() to index the map after v1.59
|
||||
// is released to switch to case sensitive balancer registry. Also,
|
||||
// remove this warning and update the docstrings for Register and Get.
|
||||
logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name())
|
||||
}
|
||||
m[strings.ToLower(b.Name())] = b
|
||||
m[name] = b
|
||||
}
|
||||
|
||||
// unregisterForTesting deletes the balancer with the given name from the
|
||||
@@ -232,8 +233,8 @@ type BuildOptions struct {
|
||||
// implementations which do not communicate with a remote load balancer
|
||||
// server can ignore this field.
|
||||
Authority string
|
||||
// ChannelzParentID is the parent ClientConn's channelz ID.
|
||||
ChannelzParentID *channelz.Identifier
|
||||
// ChannelzParent is the parent ClientConn's channelz channel.
|
||||
ChannelzParent channelz.Identifier
|
||||
// CustomUserAgent is the custom user agent set on the parent ClientConn.
|
||||
// The balancer should set the same custom user agent if it creates a
|
||||
// ClientConn.
|
||||
|
||||
@@ -16,68 +16,60 @@
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
// Package pickfirst contains the pick_first load balancing policy.
|
||||
package pickfirst
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
// PickFirstBalancerName is the name of the pick_first balancer.
|
||||
PickFirstBalancerName = "pick_first"
|
||||
logPrefix = "[pick-first-lb %p] "
|
||||
)
|
||||
|
||||
func newPickfirstBuilder() balancer.Builder {
|
||||
return &pickfirstBuilder{}
|
||||
func init() {
|
||||
balancer.Register(pickfirstBuilder{})
|
||||
internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
|
||||
}
|
||||
|
||||
var logger = grpclog.Component("pick-first-lb")
|
||||
|
||||
const (
|
||||
// Name is the name of the pick_first balancer.
|
||||
Name = "pick_first"
|
||||
logPrefix = "[pick-first-lb %p] "
|
||||
)
|
||||
|
||||
type pickfirstBuilder struct{}
|
||||
|
||||
func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
b := &pickfirstBalancer{cc: cc}
|
||||
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
|
||||
return b
|
||||
}
|
||||
|
||||
func (*pickfirstBuilder) Name() string {
|
||||
return PickFirstBalancerName
|
||||
func (pickfirstBuilder) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
type pfConfig struct {
|
||||
serviceconfig.LoadBalancingConfig `json:"-"`
|
||||
|
||||
// If set to true, instructs the LB policy to shuffle the order of the list
|
||||
// of addresses received from the name resolver before attempting to
|
||||
// of endpoints received from the name resolver before attempting to
|
||||
// connect to them.
|
||||
ShuffleAddressList bool `json:"shuffleAddressList"`
|
||||
}
|
||||
|
||||
func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
||||
if !envconfig.PickFirstLBConfig {
|
||||
// Prior to supporting loadbalancing configuration, the pick_first LB
|
||||
// policy did not implement the balancer.ConfigParser interface. This
|
||||
// meant that if a non-empty configuration was passed to it, the service
|
||||
// config unmarshaling code would throw a warning log, but would
|
||||
// continue using the pick_first LB policy. The code below ensures the
|
||||
// same behavior is retained if the env var is not set.
|
||||
if string(js) != "{}" {
|
||||
logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js))
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
||||
var cfg pfConfig
|
||||
if err := json.Unmarshal(js, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
|
||||
@@ -111,9 +103,14 @@ func (b *pickfirstBalancer) ResolverError(err error) {
|
||||
})
|
||||
}
|
||||
|
||||
type Shuffler interface {
|
||||
ShuffleAddressListForTesting(n int, swap func(i, j int))
|
||||
}
|
||||
|
||||
func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
|
||||
|
||||
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||
addrs := state.ResolverState.Addresses
|
||||
if len(addrs) == 0 {
|
||||
if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
|
||||
// The resolver reported an empty address list. Treat it like an error by
|
||||
// calling b.ResolverError.
|
||||
if b.subConn != nil {
|
||||
@@ -125,22 +122,49 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
// We don't have to guard this block with the env var because ParseConfig
|
||||
// already does so.
|
||||
cfg, ok := state.BalancerConfig.(pfConfig)
|
||||
if state.BalancerConfig != nil && !ok {
|
||||
return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
|
||||
}
|
||||
if cfg.ShuffleAddressList {
|
||||
addrs = append([]resolver.Address{}, addrs...)
|
||||
grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
|
||||
}
|
||||
|
||||
if b.logger.V(2) {
|
||||
b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
|
||||
}
|
||||
|
||||
var addrs []resolver.Address
|
||||
if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
|
||||
// Perform the optional shuffling described in gRFC A62. The shuffling will
|
||||
// change the order of endpoints but not touch the order of the addresses
|
||||
// within each endpoint. - A61
|
||||
if cfg.ShuffleAddressList {
|
||||
endpoints = append([]resolver.Endpoint{}, endpoints...)
|
||||
internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
|
||||
}
|
||||
|
||||
// "Flatten the list by concatenating the ordered list of addresses for each
|
||||
// of the endpoints, in order." - A61
|
||||
for _, endpoint := range endpoints {
|
||||
// "In the flattened list, interleave addresses from the two address
|
||||
// families, as per RFC-8304 section 4." - A61
|
||||
// TODO: support the above language.
|
||||
addrs = append(addrs, endpoint.Addresses...)
|
||||
}
|
||||
} else {
|
||||
// Endpoints not set, process addresses until we migrate resolver
|
||||
// emissions fully to Endpoints. The top channel does wrap emitted
|
||||
// addresses with endpoints, however some balancers such as weighted
|
||||
// target do not forwarrd the corresponding correct endpoints down/split
|
||||
// endpoints properly. Once all balancers correctly forward endpoints
|
||||
// down, can delete this else conditional.
|
||||
addrs = state.ResolverState.Addresses
|
||||
if cfg.ShuffleAddressList {
|
||||
addrs = append([]resolver.Address{}, addrs...)
|
||||
rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
|
||||
}
|
||||
}
|
||||
|
||||
if b.subConn != nil {
|
||||
b.cc.UpdateAddresses(b.subConn, addrs)
|
||||
return nil
|
||||
@@ -257,7 +281,3 @@ func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||
i.subConn.Connect()
|
||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
|
||||
func init() {
|
||||
balancer.Register(newPickfirstBuilder())
|
||||
}
|
||||
4
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
4
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
@@ -22,12 +22,12 @@
|
||||
package roundrobin
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
)
|
||||
|
||||
// Name is the name of round_robin balancer.
|
||||
@@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
|
||||
// Start at a random index, as the same RR balancer rebuilds a new
|
||||
// picker when SubConn states change, and we don't want to apply excess
|
||||
// load to the first server in the list.
|
||||
next: uint32(grpcrand.Intn(len(scs))),
|
||||
next: uint32(rand.Intn(len(scs))),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
454
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
454
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
@@ -1,454 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
type ccbMode int
|
||||
|
||||
const (
|
||||
ccbModeActive = iota
|
||||
ccbModeIdle
|
||||
ccbModeClosed
|
||||
ccbModeExitingIdle
|
||||
)
|
||||
|
||||
// ccBalancerWrapper sits between the ClientConn and the Balancer.
|
||||
//
|
||||
// ccBalancerWrapper implements methods corresponding to the ones on the
|
||||
// balancer.Balancer interface. The ClientConn is free to call these methods
|
||||
// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
|
||||
// to the Balancer happen synchronously and in order.
|
||||
//
|
||||
// ccBalancerWrapper also implements the balancer.ClientConn interface and is
|
||||
// passed to the Balancer implementations. It invokes unexported methods on the
|
||||
// ClientConn to handle these calls from the Balancer.
|
||||
//
|
||||
// It uses the gracefulswitch.Balancer internally to ensure that balancer
|
||||
// switches happen in a graceful manner.
|
||||
type ccBalancerWrapper struct {
|
||||
// The following fields are initialized when the wrapper is created and are
|
||||
// read-only afterwards, and therefore can be accessed without a mutex.
|
||||
cc *ClientConn
|
||||
opts balancer.BuildOptions
|
||||
|
||||
// Outgoing (gRPC --> balancer) calls are guaranteed to execute in a
|
||||
// mutually exclusive manner as they are scheduled in the serializer. Fields
|
||||
// accessed *only* in these serializer callbacks, can therefore be accessed
|
||||
// without a mutex.
|
||||
balancer *gracefulswitch.Balancer
|
||||
curBalancerName string
|
||||
|
||||
// mu guards access to the below fields. Access to the serializer and its
|
||||
// cancel function needs to be mutex protected because they are overwritten
|
||||
// when the wrapper exits idle mode.
|
||||
mu sync.Mutex
|
||||
serializer *grpcsync.CallbackSerializer // To serialize all outoing calls.
|
||||
serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time.
|
||||
mode ccbMode // Tracks the current mode of the wrapper.
|
||||
}
|
||||
|
||||
// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
|
||||
// is not created until the switchTo() method is invoked.
|
||||
func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ccb := &ccBalancerWrapper{
|
||||
cc: cc,
|
||||
opts: bopts,
|
||||
serializer: grpcsync.NewCallbackSerializer(ctx),
|
||||
serializerCancel: cancel,
|
||||
}
|
||||
ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
|
||||
return ccb
|
||||
}
|
||||
|
||||
// updateClientConnState is invoked by grpc to push a ClientConnState update to
|
||||
// the underlying balancer.
|
||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||
ccb.mu.Lock()
|
||||
errCh := make(chan error, 1)
|
||||
// Here and everywhere else where Schedule() is called, it is done with the
|
||||
// lock held. But the lock guards only the scheduling part. The actual
|
||||
// callback is called asynchronously without the lock being held.
|
||||
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
||||
errCh <- ccb.balancer.UpdateClientConnState(*ccs)
|
||||
})
|
||||
if !ok {
|
||||
// If we are unable to schedule a function with the serializer, it
|
||||
// indicates that it has been closed. A serializer is only closed when
|
||||
// the wrapper is closed or is in idle.
|
||||
ccb.mu.Unlock()
|
||||
return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer")
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
|
||||
// We get here only if the above call to Schedule succeeds, in which case it
|
||||
// is guaranteed that the scheduled function will run. Therefore it is safe
|
||||
// to block on this channel.
|
||||
err := <-errCh
|
||||
if logger.V(2) && err != nil {
|
||||
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// updateSubConnState is invoked by grpc to push a subConn state update to the
|
||||
// underlying balancer.
|
||||
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// Even though it is optional for balancers, gracefulswitch ensures
|
||||
// opts.StateListener is set, so this cannot ever be nil.
|
||||
sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
ccb.balancer.ResolverError(err)
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
|
||||
// LB policy identified by name.
|
||||
//
|
||||
// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
|
||||
// first good update from the name resolver, it determines the LB policy to use
|
||||
// and invokes the switchTo() method. Upon receipt of every subsequent update
|
||||
// from the name resolver, it invokes this method.
|
||||
//
|
||||
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
|
||||
// the graceful balancer switching process if the name does not change.
|
||||
func (ccb *ccBalancerWrapper) switchTo(name string) {
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// TODO: Other languages use case-sensitive balancer registries. We should
|
||||
// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
|
||||
if strings.EqualFold(ccb.curBalancerName, name) {
|
||||
return
|
||||
}
|
||||
ccb.buildLoadBalancingPolicy(name)
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
// buildLoadBalancingPolicy performs the following:
|
||||
// - retrieve a balancer builder for the given name. Use the default LB
|
||||
// policy, pick_first, if no LB policy with name is found in the registry.
|
||||
// - instruct the gracefulswitch balancer to switch to the above builder. This
|
||||
// will actually build the new balancer.
|
||||
// - update the `curBalancerName` field
|
||||
//
|
||||
// Must be called from a serializer callback.
|
||||
func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
|
||||
builder := balancer.Get(name)
|
||||
if builder == nil {
|
||||
channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
|
||||
builder = newPickfirstBuilder()
|
||||
} else {
|
||||
channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name)
|
||||
}
|
||||
|
||||
if err := ccb.balancer.SwitchTo(builder); err != nil {
|
||||
channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err)
|
||||
return
|
||||
}
|
||||
ccb.curBalancerName = builder.Name()
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) close() {
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
|
||||
ccb.closeBalancer(ccbModeClosed)
|
||||
}
|
||||
|
||||
// enterIdleMode is invoked by grpc when the channel enters idle mode upon
|
||||
// expiry of idle_timeout. This call blocks until the balancer is closed.
|
||||
func (ccb *ccBalancerWrapper) enterIdleMode() {
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode")
|
||||
ccb.closeBalancer(ccbModeIdle)
|
||||
}
|
||||
|
||||
// closeBalancer is invoked when the channel is being closed or when it enters
|
||||
// idle mode upon expiry of idle_timeout.
|
||||
func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
|
||||
ccb.mu.Lock()
|
||||
if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle {
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
ccb.mode = m
|
||||
done := ccb.serializer.Done()
|
||||
b := ccb.balancer
|
||||
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// Close the serializer to ensure that no more calls from gRPC are sent
|
||||
// to the balancer.
|
||||
ccb.serializerCancel()
|
||||
// Empty the current balancer name because we don't have a balancer
|
||||
// anymore and also so that we act on the next call to switchTo by
|
||||
// creating a new balancer specified by the new resolver.
|
||||
ccb.curBalancerName = ""
|
||||
})
|
||||
if !ok {
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
|
||||
// Give enqueued callbacks a chance to finish before closing the balancer.
|
||||
<-done
|
||||
b.Close()
|
||||
}
|
||||
|
||||
// exitIdleMode is invoked by grpc when the channel exits idle mode either
|
||||
// because of an RPC or because of an invocation of the Connect() API. This
|
||||
// recreates the balancer that was closed previously when entering idle mode.
|
||||
//
|
||||
// If the channel is not in idle mode, we know for a fact that we are here as a
|
||||
// result of the user calling the Connect() method on the ClientConn. In this
|
||||
// case, we can simply forward the call to the underlying balancer, instructing
|
||||
// it to reconnect to the backends.
|
||||
func (ccb *ccBalancerWrapper) exitIdleMode() {
|
||||
ccb.mu.Lock()
|
||||
if ccb.mode == ccbModeClosed {
|
||||
// Request to exit idle is a no-op when wrapper is already closed.
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
if ccb.mode == ccbModeIdle {
|
||||
// Recreate the serializer which was closed when we entered idle.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ccb.serializer = grpcsync.NewCallbackSerializer(ctx)
|
||||
ccb.serializerCancel = cancel
|
||||
}
|
||||
|
||||
// The ClientConn guarantees that mutual exclusion between close() and
|
||||
// exitIdleMode(), and since we just created a new serializer, we can be
|
||||
// sure that the below function will be scheduled.
|
||||
done := make(chan struct{})
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
defer close(done)
|
||||
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
|
||||
if ccb.mode != ccbModeIdle {
|
||||
ccb.balancer.ExitIdle()
|
||||
return
|
||||
}
|
||||
|
||||
// Gracefulswitch balancer does not support a switchTo operation after
|
||||
// being closed. Hence we need to create a new one here.
|
||||
ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
|
||||
ccb.mode = ccbModeActive
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode")
|
||||
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
|
||||
<-done
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) isIdleOrClosed() bool {
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle")
|
||||
}
|
||||
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||
}
|
||||
ac, err := ccb.cc.newAddrConn(addrs, opts)
|
||||
if err != nil {
|
||||
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
acbw := &acBalancerWrapper{
|
||||
ccb: ccb,
|
||||
ac: ac,
|
||||
producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
|
||||
stateListener: opts.StateListener,
|
||||
}
|
||||
ac.acbw = acbw
|
||||
return acbw, nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
// The graceful switch balancer will never call this.
|
||||
logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
acbw.UpdateAddresses(addrs)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
// Update picker before updating state. Even though the ordering here does
|
||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||
// case where we wait for ready and then perform an RPC. If the picker is
|
||||
// updated later, we could call the "connecting" picker when the state is
|
||||
// updated, and then call the "ready" picker after the picker gets updated.
|
||||
ccb.cc.blockingpicker.updatePicker(s.Picker)
|
||||
ccb.cc.csMgr.updateState(s.ConnectivityState)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
ccb.cc.resolveNow(o)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) Target() string {
|
||||
return ccb.cc.target
|
||||
}
|
||||
|
||||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||||
// It implements balancer.SubConn interface.
|
||||
type acBalancerWrapper struct {
|
||||
ac *addrConn // read-only
|
||||
ccb *ccBalancerWrapper // read-only
|
||||
stateListener func(balancer.SubConnState)
|
||||
|
||||
mu sync.Mutex
|
||||
producers map[balancer.ProducerBuilder]*refCountedProducer
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) String() string {
|
||||
return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||
acbw.ac.updateAddrs(addrs)
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) Connect() {
|
||||
go acbw.ac.connect()
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) Shutdown() {
|
||||
ccb := acbw.ccb
|
||||
if ccb.isIdleOrClosed() {
|
||||
// It it safe to ignore this call when the balancer is closed or in idle
|
||||
// because the ClientConn takes care of closing the connections.
|
||||
//
|
||||
// Not returning early from here when the balancer is closed or in idle
|
||||
// leads to a deadlock though, because of the following sequence of
|
||||
// calls when holding cc.mu:
|
||||
// cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
|
||||
// ccb.RemoveAddrConn --> cc.removeAddrConn
|
||||
return
|
||||
}
|
||||
|
||||
ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
}
|
||||
|
||||
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
|
||||
// ready, blocks until it is or ctx expires. Returns an error when the context
|
||||
// expires or the addrConn is shut down.
|
||||
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||
transport, err := acbw.ac.getTransport(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
|
||||
}
|
||||
|
||||
// Invoke performs a unary RPC. If the addrConn is not ready, returns
|
||||
// errSubConnNotReady.
|
||||
func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
|
||||
cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cs.SendMsg(args); err != nil {
|
||||
return err
|
||||
}
|
||||
return cs.RecvMsg(reply)
|
||||
}
|
||||
|
||||
type refCountedProducer struct {
|
||||
producer balancer.Producer
|
||||
refs int // number of current refs to the producer
|
||||
close func() // underlying producer's close function
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
|
||||
// Look up existing producer from this builder.
|
||||
pData := acbw.producers[pb]
|
||||
if pData == nil {
|
||||
// Not found; create a new one and add it to the producers map.
|
||||
p, close := pb.Build(acbw)
|
||||
pData = &refCountedProducer{producer: p, close: close}
|
||||
acbw.producers[pb] = pData
|
||||
}
|
||||
// Account for this new reference.
|
||||
pData.refs++
|
||||
|
||||
// Return a cleanup function wrapped in a OnceFunc to remove this reference
|
||||
// and delete the refCountedProducer from the map if the total reference
|
||||
// count goes to zero.
|
||||
unref := func() {
|
||||
acbw.mu.Lock()
|
||||
pData.refs--
|
||||
if pData.refs == 0 {
|
||||
defer pData.close() // Run outside the acbw mutex
|
||||
delete(acbw.producers, pb)
|
||||
}
|
||||
acbw.mu.Unlock()
|
||||
}
|
||||
return pData.producer, grpcsync.OnceFunc(unref)
|
||||
}
|
||||
341
vendor/google.golang.org/grpc/balancer_wrapper.go
generated
vendored
Normal file
341
vendor/google.golang.org/grpc/balancer_wrapper.go
generated
vendored
Normal file
@@ -0,0 +1,341 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// ccBalancerWrapper sits between the ClientConn and the Balancer.
|
||||
//
|
||||
// ccBalancerWrapper implements methods corresponding to the ones on the
|
||||
// balancer.Balancer interface. The ClientConn is free to call these methods
|
||||
// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
|
||||
// to the Balancer happen in order by performing them in the serializer, without
|
||||
// any mutexes held.
|
||||
//
|
||||
// ccBalancerWrapper also implements the balancer.ClientConn interface and is
|
||||
// passed to the Balancer implementations. It invokes unexported methods on the
|
||||
// ClientConn to handle these calls from the Balancer.
|
||||
//
|
||||
// It uses the gracefulswitch.Balancer internally to ensure that balancer
|
||||
// switches happen in a graceful manner.
|
||||
type ccBalancerWrapper struct {
|
||||
// The following fields are initialized when the wrapper is created and are
|
||||
// read-only afterwards, and therefore can be accessed without a mutex.
|
||||
cc *ClientConn
|
||||
opts balancer.BuildOptions
|
||||
serializer *grpcsync.CallbackSerializer
|
||||
serializerCancel context.CancelFunc
|
||||
|
||||
// The following fields are only accessed within the serializer or during
|
||||
// initialization.
|
||||
curBalancerName string
|
||||
balancer *gracefulswitch.Balancer
|
||||
|
||||
// The following field is protected by mu. Caller must take cc.mu before
|
||||
// taking mu.
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// newCCBalancerWrapper creates a new balancer wrapper in idle state. The
|
||||
// underlying balancer is not created until the updateClientConnState() method
|
||||
// is invoked.
|
||||
func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
|
||||
ctx, cancel := context.WithCancel(cc.ctx)
|
||||
ccb := &ccBalancerWrapper{
|
||||
cc: cc,
|
||||
opts: balancer.BuildOptions{
|
||||
DialCreds: cc.dopts.copts.TransportCredentials,
|
||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||
Dialer: cc.dopts.copts.Dialer,
|
||||
Authority: cc.authority,
|
||||
CustomUserAgent: cc.dopts.copts.UserAgent,
|
||||
ChannelzParent: cc.channelz,
|
||||
Target: cc.parsedTarget,
|
||||
},
|
||||
serializer: grpcsync.NewCallbackSerializer(ctx),
|
||||
serializerCancel: cancel,
|
||||
}
|
||||
ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
|
||||
return ccb
|
||||
}
|
||||
|
||||
// updateClientConnState is invoked by grpc to push a ClientConnState update to
|
||||
// the underlying balancer. This is always executed from the serializer, so
|
||||
// it is safe to call into the balancer here.
|
||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||
errCh := make(chan error)
|
||||
ok := ccb.serializer.Schedule(func(ctx context.Context) {
|
||||
defer close(errCh)
|
||||
if ctx.Err() != nil || ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
name := gracefulswitch.ChildName(ccs.BalancerConfig)
|
||||
if ccb.curBalancerName != name {
|
||||
ccb.curBalancerName = name
|
||||
channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name)
|
||||
}
|
||||
err := ccb.balancer.UpdateClientConnState(*ccs)
|
||||
if logger.V(2) && err != nil {
|
||||
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
|
||||
}
|
||||
errCh <- err
|
||||
})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return <-errCh
|
||||
}
|
||||
|
||||
// resolverError is invoked by grpc to push a resolver error to the underlying
|
||||
// balancer. The call to the balancer is executed from the serializer.
|
||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||
ccb.serializer.Schedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil || ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
ccb.balancer.ResolverError(err)
|
||||
})
|
||||
}
|
||||
|
||||
// close initiates async shutdown of the wrapper. cc.mu must be held when
|
||||
// calling this function. To determine the wrapper has finished shutting down,
|
||||
// the channel should block on ccb.serializer.Done() without cc.mu held.
|
||||
func (ccb *ccBalancerWrapper) close() {
|
||||
ccb.mu.Lock()
|
||||
ccb.closed = true
|
||||
ccb.mu.Unlock()
|
||||
channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
|
||||
ccb.serializer.Schedule(func(context.Context) {
|
||||
if ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
ccb.balancer.Close()
|
||||
ccb.balancer = nil
|
||||
})
|
||||
ccb.serializerCancel()
|
||||
}
|
||||
|
||||
// exitIdle invokes the balancer's exitIdle method in the serializer.
|
||||
func (ccb *ccBalancerWrapper) exitIdle() {
|
||||
ccb.serializer.Schedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil || ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
ccb.balancer.ExitIdle()
|
||||
})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
ccb.cc.mu.Lock()
|
||||
defer ccb.cc.mu.Unlock()
|
||||
|
||||
ccb.mu.Lock()
|
||||
if ccb.closed {
|
||||
ccb.mu.Unlock()
|
||||
return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed")
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||
}
|
||||
ac, err := ccb.cc.newAddrConnLocked(addrs, opts)
|
||||
if err != nil {
|
||||
channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
acbw := &acBalancerWrapper{
|
||||
ccb: ccb,
|
||||
ac: ac,
|
||||
producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
|
||||
stateListener: opts.StateListener,
|
||||
}
|
||||
ac.acbw = acbw
|
||||
return acbw, nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
// The graceful switch balancer will never call this.
|
||||
logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
acbw.UpdateAddresses(addrs)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
ccb.cc.mu.Lock()
|
||||
defer ccb.cc.mu.Unlock()
|
||||
if ccb.cc.conns == nil {
|
||||
// The CC has been closed; ignore this update.
|
||||
return
|
||||
}
|
||||
|
||||
ccb.mu.Lock()
|
||||
if ccb.closed {
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
// Update picker before updating state. Even though the ordering here does
|
||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||
// case where we wait for ready and then perform an RPC. If the picker is
|
||||
// updated later, we could call the "connecting" picker when the state is
|
||||
// updated, and then call the "ready" picker after the picker gets updated.
|
||||
|
||||
// Note that there is no need to check if the balancer wrapper was closed,
|
||||
// as we know the graceful switch LB policy will not call cc if it has been
|
||||
// closed.
|
||||
ccb.cc.pickerWrapper.updatePicker(s.Picker)
|
||||
ccb.cc.csMgr.updateState(s.ConnectivityState)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
|
||||
ccb.cc.mu.RLock()
|
||||
defer ccb.cc.mu.RUnlock()
|
||||
|
||||
ccb.mu.Lock()
|
||||
if ccb.closed {
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
ccb.cc.resolveNowLocked(o)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) Target() string {
|
||||
return ccb.cc.target
|
||||
}
|
||||
|
||||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||||
// It implements balancer.SubConn interface.
|
||||
type acBalancerWrapper struct {
|
||||
ac *addrConn // read-only
|
||||
ccb *ccBalancerWrapper // read-only
|
||||
stateListener func(balancer.SubConnState)
|
||||
|
||||
mu sync.Mutex
|
||||
producers map[balancer.ProducerBuilder]*refCountedProducer
|
||||
}
|
||||
|
||||
// updateState is invoked by grpc to push a subConn state update to the
|
||||
// underlying balancer.
|
||||
func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) {
|
||||
acbw.ccb.serializer.Schedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil || acbw.ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
// Even though it is optional for balancers, gracefulswitch ensures
|
||||
// opts.StateListener is set, so this cannot ever be nil.
|
||||
// TODO: delete this comment when UpdateSubConnState is removed.
|
||||
acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
|
||||
})
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) String() string {
|
||||
return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID)
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||
acbw.ac.updateAddrs(addrs)
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) Connect() {
|
||||
go acbw.ac.connect()
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) Shutdown() {
|
||||
acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
}
|
||||
|
||||
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
|
||||
// ready, blocks until it is or ctx expires. Returns an error when the context
|
||||
// expires or the addrConn is shut down.
|
||||
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||
transport, err := acbw.ac.getTransport(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
|
||||
}
|
||||
|
||||
// Invoke performs a unary RPC. If the addrConn is not ready, returns
|
||||
// errSubConnNotReady.
|
||||
func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
|
||||
cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cs.SendMsg(args); err != nil {
|
||||
return err
|
||||
}
|
||||
return cs.RecvMsg(reply)
|
||||
}
|
||||
|
||||
type refCountedProducer struct {
|
||||
producer balancer.Producer
|
||||
refs int // number of current refs to the producer
|
||||
close func() // underlying producer's close function
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
|
||||
// Look up existing producer from this builder.
|
||||
pData := acbw.producers[pb]
|
||||
if pData == nil {
|
||||
// Not found; create a new one and add it to the producers map.
|
||||
p, close := pb.Build(acbw)
|
||||
pData = &refCountedProducer{producer: p, close: close}
|
||||
acbw.producers[pb] = pData
|
||||
}
|
||||
// Account for this new reference.
|
||||
pData.refs++
|
||||
|
||||
// Return a cleanup function wrapped in a OnceFunc to remove this reference
|
||||
// and delete the refCountedProducer from the map if the total reference
|
||||
// count goes to zero.
|
||||
unref := func() {
|
||||
acbw.mu.Lock()
|
||||
pData.refs--
|
||||
if pData.refs == 0 {
|
||||
defer pData.close() // Run outside the acbw mutex
|
||||
delete(acbw.producers, pb)
|
||||
}
|
||||
acbw.mu.Unlock()
|
||||
}
|
||||
return pData.producer, grpcsync.OnceFunc(unref)
|
||||
}
|
||||
6
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
6
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
@@ -18,8 +18,8 @@
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.22.0
|
||||
// protoc-gen-go v1.34.1
|
||||
// protoc v4.25.2
|
||||
// source: grpc/binlog/v1/binarylog.proto
|
||||
|
||||
package grpc_binarylog_v1
|
||||
@@ -430,7 +430,7 @@ type ClientHeader struct {
|
||||
MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
|
||||
// A single process may be used to run multiple virtual
|
||||
// servers with different identities.
|
||||
// The authority is the name of such a server identitiy.
|
||||
// The authority is the name of such a server identity.
|
||||
// It is typically a portion of the URI in the form of
|
||||
// <host> or <host>:<port> .
|
||||
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
|
||||
|
||||
791
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
791
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
File diff suppressed because it is too large
Load Diff
17
vendor/google.golang.org/grpc/codegen.sh
generated
vendored
17
vendor/google.golang.org/grpc/codegen.sh
generated
vendored
@@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script serves as an example to demonstrate how to generate the gRPC-Go
|
||||
# interface and the related messages from .proto file.
|
||||
#
|
||||
# It assumes the installation of i) Google proto buffer compiler at
|
||||
# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
|
||||
# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
|
||||
# not, please install them first.
|
||||
#
|
||||
# We recommend running this script at $GOPATH/src.
|
||||
#
|
||||
# If this is not what you need, feel free to make your own scripts. Again, this
|
||||
# script is for demonstration purpose.
|
||||
#
|
||||
proto=$1
|
||||
protoc --go_out=plugins=grpc:. $proto
|
||||
10
vendor/google.golang.org/grpc/codes/codes.go
generated
vendored
10
vendor/google.golang.org/grpc/codes/codes.go
generated
vendored
@@ -25,7 +25,13 @@ import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
|
||||
// A Code is a status code defined according to the [gRPC documentation].
|
||||
//
|
||||
// Only the codes defined as consts in this package are valid codes. Do not use
|
||||
// other code values. Behavior of other codes is implementation-specific and
|
||||
// interoperability between implementations is not guaranteed.
|
||||
//
|
||||
// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
|
||||
type Code uint32
|
||||
|
||||
const (
|
||||
@@ -229,7 +235,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
|
||||
|
||||
if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
|
||||
if ci >= _maxCode {
|
||||
return fmt.Errorf("invalid code: %q", ci)
|
||||
return fmt.Errorf("invalid code: %d", ci)
|
||||
}
|
||||
|
||||
*c = Code(ci)
|
||||
|
||||
4
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
4
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
@@ -28,9 +28,9 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/attributes"
|
||||
icredentials "google.golang.org/grpc/internal/credentials"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||
@@ -237,7 +237,7 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
|
||||
}
|
||||
|
||||
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
|
||||
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
|
||||
// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
|
||||
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
|
||||
//
|
||||
// This API is experimental.
|
||||
|
||||
109
vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
109
vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
@@ -27,9 +27,13 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
credinternal "google.golang.org/grpc/internal/credentials"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("credentials")
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
// It implements the AuthInfo interface.
|
||||
type TLSInfo struct {
|
||||
@@ -44,10 +48,25 @@ func (t TLSInfo) AuthType() string {
|
||||
return "tls"
|
||||
}
|
||||
|
||||
// cipherSuiteLookup returns the string version of a TLS cipher suite ID.
|
||||
func cipherSuiteLookup(cipherSuiteID uint16) string {
|
||||
for _, s := range tls.CipherSuites() {
|
||||
if s.ID == cipherSuiteID {
|
||||
return s.Name
|
||||
}
|
||||
}
|
||||
for _, s := range tls.InsecureCipherSuites() {
|
||||
if s.ID == cipherSuiteID {
|
||||
return s.Name
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("unknown ID: %v", cipherSuiteID)
|
||||
}
|
||||
|
||||
// GetSecurityValue returns security info requested by channelz.
|
||||
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
|
||||
v := &TLSChannelzSecurityValue{
|
||||
StandardName: cipherSuiteLookup[t.State.CipherSuite],
|
||||
StandardName: cipherSuiteLookup(t.State.CipherSuite),
|
||||
}
|
||||
// Currently there's no way to get LocalCertificate info from tls package.
|
||||
if len(t.State.PeerCertificates) > 0 {
|
||||
@@ -97,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
|
||||
conn.Close()
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
|
||||
// The negotiated protocol can be either of the following:
|
||||
// 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since
|
||||
// it is the only protocol advertised by the client during the handshake.
|
||||
// The tls library ensures that the server chooses a protocol advertised
|
||||
// by the client.
|
||||
// 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement
|
||||
// for using HTTP/2 over TLS. We can terminate the connection immediately.
|
||||
np := conn.ConnectionState().NegotiatedProtocol
|
||||
if np == "" {
|
||||
if envconfig.EnforceALPNEnabled {
|
||||
conn.Close()
|
||||
return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
|
||||
}
|
||||
logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName)
|
||||
}
|
||||
tlsInfo := TLSInfo{
|
||||
State: conn.ConnectionState(),
|
||||
CommonAuthInfo: CommonAuthInfo{
|
||||
@@ -116,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
|
||||
conn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
cs := conn.ConnectionState()
|
||||
// The negotiated application protocol can be empty only if the client doesn't
|
||||
// support ALPN. In such cases, we can close the connection since ALPN is required
|
||||
// for using HTTP/2 over TLS.
|
||||
if cs.NegotiatedProtocol == "" {
|
||||
if envconfig.EnforceALPNEnabled {
|
||||
conn.Close()
|
||||
return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
|
||||
} else if logger.V(2) {
|
||||
logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases")
|
||||
}
|
||||
}
|
||||
tlsInfo := TLSInfo{
|
||||
State: conn.ConnectionState(),
|
||||
State: cs,
|
||||
CommonAuthInfo: CommonAuthInfo{
|
||||
SecurityLevel: PrivacyAndIntegrity,
|
||||
},
|
||||
@@ -138,10 +185,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The following cipher suites are forbidden for use with HTTP/2 by
|
||||
// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
|
||||
var tls12ForbiddenCipherSuites = map[uint16]struct{}{
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA: {},
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA: {},
|
||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {},
|
||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {},
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {},
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {},
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {},
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {},
|
||||
}
|
||||
|
||||
// NewTLS uses c to construct a TransportCredentials based on TLS.
|
||||
func NewTLS(c *tls.Config) TransportCredentials {
|
||||
tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
|
||||
tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
|
||||
// If the user did not configure a MinVersion and did not configure a
|
||||
// MaxVersion < 1.2, use MinVersion=1.2, which is required by
|
||||
// https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
|
||||
if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) {
|
||||
tc.config.MinVersion = tls.VersionTLS12
|
||||
}
|
||||
// If the user did not configure CipherSuites, use all "secure" cipher
|
||||
// suites reported by the TLS package, but remove some explicitly forbidden
|
||||
// by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
|
||||
if tc.config.CipherSuites == nil {
|
||||
for _, cs := range tls.CipherSuites() {
|
||||
if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
|
||||
tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return tc
|
||||
}
|
||||
|
||||
@@ -205,32 +281,3 @@ type TLSChannelzSecurityValue struct {
|
||||
LocalCertificate []byte
|
||||
RemoteCertificate []byte
|
||||
}
|
||||
|
||||
var cipherSuiteLookup = map[uint16]string{
|
||||
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256",
|
||||
tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384",
|
||||
tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256",
|
||||
}
|
||||
|
||||
150
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
150
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
@@ -21,6 +21,7 @@ package grpc
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/backoff"
|
||||
@@ -36,6 +37,11 @@ import (
|
||||
"google.golang.org/grpc/stats"
|
||||
)
|
||||
|
||||
const (
|
||||
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges
|
||||
defaultMaxCallAttempts = 5
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.AddGlobalDialOptions = func(opt ...DialOption) {
|
||||
globalDialOptions = append(globalDialOptions, opt...)
|
||||
@@ -43,9 +49,18 @@ func init() {
|
||||
internal.ClearGlobalDialOptions = func() {
|
||||
globalDialOptions = nil
|
||||
}
|
||||
internal.AddGlobalPerTargetDialOptions = func(opt any) {
|
||||
if ptdo, ok := opt.(perTargetDialOption); ok {
|
||||
globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo)
|
||||
}
|
||||
}
|
||||
internal.ClearGlobalPerTargetDialOptions = func() {
|
||||
globalPerTargetDialOptions = nil
|
||||
}
|
||||
internal.WithBinaryLogger = withBinaryLogger
|
||||
internal.JoinDialOptions = newJoinDialOption
|
||||
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
|
||||
internal.WithRecvBufferPool = withRecvBufferPool
|
||||
}
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
@@ -63,12 +78,11 @@ type dialOptions struct {
|
||||
block bool
|
||||
returnLastError bool
|
||||
timeout time.Duration
|
||||
scChan <-chan ServiceConfig
|
||||
authority string
|
||||
binaryLogger binarylog.Logger
|
||||
copts transport.ConnectOptions
|
||||
callOptions []CallOption
|
||||
channelzParentID *channelz.Identifier
|
||||
channelzParent channelz.Identifier
|
||||
disableServiceConfig bool
|
||||
disableRetry bool
|
||||
disableHealthCheck bool
|
||||
@@ -79,6 +93,8 @@ type dialOptions struct {
|
||||
resolvers []resolver.Builder
|
||||
idleTimeout time.Duration
|
||||
recvBufferPool SharedBufferPool
|
||||
defaultScheme string
|
||||
maxCallAttempts int
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
@@ -88,6 +104,19 @@ type DialOption interface {
|
||||
|
||||
var globalDialOptions []DialOption
|
||||
|
||||
// perTargetDialOption takes a parsed target and returns a dial option to apply.
|
||||
//
|
||||
// This gets called after NewClient() parses the target, and allows per target
|
||||
// configuration set through a returned DialOption. The DialOption will not take
|
||||
// effect if specifies a resolver builder, as that Dial Option is factored in
|
||||
// while parsing target.
|
||||
type perTargetDialOption interface {
|
||||
// DialOption returns a Dial Option to apply.
|
||||
DialOptionForTarget(parsedTarget url.URL) DialOption
|
||||
}
|
||||
|
||||
var globalPerTargetDialOptions []perTargetDialOption
|
||||
|
||||
// EmptyDialOption does not alter the dial configuration. It can be embedded in
|
||||
// another structure to build custom dial options.
|
||||
//
|
||||
@@ -154,9 +183,7 @@ func WithSharedWriteBuffer(val bool) DialOption {
|
||||
}
|
||||
|
||||
// WithWriteBufferSize determines how much data can be batched before doing a
|
||||
// write on the wire. The corresponding memory allocation for this buffer will
|
||||
// be twice the size to keep syscalls low. The default value for this buffer is
|
||||
// 32KB.
|
||||
// write on the wire. The default value for this buffer is 32KB.
|
||||
//
|
||||
// Zero or negative values will disable the write buffer such that each write
|
||||
// will be on underlying connection. Note: A Send call may not directly
|
||||
@@ -250,19 +277,6 @@ func WithDecompressor(dc Decompressor) DialOption {
|
||||
})
|
||||
}
|
||||
|
||||
// WithServiceConfig returns a DialOption which has a channel to read the
|
||||
// service configuration.
|
||||
//
|
||||
// Deprecated: service config should be received through name resolver or via
|
||||
// WithDefaultServiceConfig, as specified at
|
||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be
|
||||
// removed in a future 1.x release.
|
||||
func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.scChan = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithConnectParams configures the ClientConn to use the provided ConnectParams
|
||||
// for creating and maintaining connections to servers.
|
||||
//
|
||||
@@ -314,6 +328,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption {
|
||||
//
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
//
|
||||
// Deprecated: this DialOption is not supported by NewClient.
|
||||
// Will be supported throughout 1.x.
|
||||
func WithBlock() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.block = true
|
||||
@@ -328,10 +345,8 @@ func WithBlock() DialOption {
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
// Deprecated: this DialOption is not supported by NewClient.
|
||||
// Will be supported throughout 1.x.
|
||||
func WithReturnConnectionError() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.block = true
|
||||
@@ -401,8 +416,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption {
|
||||
// WithTimeout returns a DialOption that configures a timeout for dialing a
|
||||
// ClientConn initially. This is valid if and only if WithBlock() is present.
|
||||
//
|
||||
// Deprecated: use DialContext instead of Dial and context.WithTimeout
|
||||
// instead. Will be supported throughout 1.x.
|
||||
// Deprecated: this DialOption is not supported by NewClient.
|
||||
// Will be supported throughout 1.x.
|
||||
func WithTimeout(d time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.timeout = d
|
||||
@@ -413,6 +428,17 @@ func WithTimeout(d time.Duration) DialOption {
|
||||
// connections. If FailOnNonTempDialError() is set to true, and an error is
|
||||
// returned by f, gRPC checks the error's Temporary() method to decide if it
|
||||
// should try to reconnect to the network address.
|
||||
//
|
||||
// Note: All supported releases of Go (as of December 2023) override the OS
|
||||
// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
|
||||
// with OS defaults for keepalive time and interval, use a net.Dialer that sets
|
||||
// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
|
||||
// option to true from the Control field. For a concrete example of how to do
|
||||
// this, see internal.NetDialerWithTCPKeepalive().
|
||||
//
|
||||
// For more information, please see [issue 23459] in the Go github repo.
|
||||
//
|
||||
// [issue 23459]: https://github.com/golang/go/issues/23459
|
||||
func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.Dialer = f
|
||||
@@ -473,9 +499,8 @@ func withBinaryLogger(bl binarylog.Logger) DialOption {
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// Deprecated: this DialOption is not supported by NewClient.
|
||||
// This API may be changed or removed in a
|
||||
// later release.
|
||||
func FailOnNonTempDialError(f bool) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
@@ -487,7 +512,7 @@ func FailOnNonTempDialError(f bool) DialOption {
|
||||
// the RPCs.
|
||||
func WithUserAgent(s string) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.UserAgent = s
|
||||
o.copts.UserAgent = s + " " + grpcUA
|
||||
})
|
||||
}
|
||||
|
||||
@@ -557,9 +582,9 @@ func WithAuthority(a string) DialOption {
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithChannelzParentID(id *channelz.Identifier) DialOption {
|
||||
func WithChannelzParentID(c channelz.Identifier) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.channelzParentID = id
|
||||
o.channelzParent = c
|
||||
})
|
||||
}
|
||||
|
||||
@@ -604,12 +629,22 @@ func WithDisableRetry() DialOption {
|
||||
})
|
||||
}
|
||||
|
||||
// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum
|
||||
// (uncompressed) size of header list that the client is prepared to accept.
|
||||
type MaxHeaderListSizeDialOption struct {
|
||||
MaxHeaderListSize uint32
|
||||
}
|
||||
|
||||
func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) {
|
||||
do.copts.MaxHeaderListSize = &o.MaxHeaderListSize
|
||||
}
|
||||
|
||||
// WithMaxHeaderListSize returns a DialOption that specifies the maximum
|
||||
// (uncompressed) size of header list that the client is prepared to accept.
|
||||
func WithMaxHeaderListSize(s uint32) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.MaxHeaderListSize = &s
|
||||
})
|
||||
return MaxHeaderListSizeDialOption{
|
||||
MaxHeaderListSize: s,
|
||||
}
|
||||
}
|
||||
|
||||
// WithDisableHealthCheck disables the LB channel health checking for all
|
||||
@@ -637,18 +672,22 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption {
|
||||
|
||||
func defaultDialOptions() dialOptions {
|
||||
return dialOptions{
|
||||
healthCheckFunc: internal.HealthCheckFunc,
|
||||
copts: transport.ConnectOptions{
|
||||
WriteBufferSize: defaultWriteBufSize,
|
||||
ReadBufferSize: defaultReadBufSize,
|
||||
WriteBufferSize: defaultWriteBufSize,
|
||||
UseProxy: true,
|
||||
UserAgent: grpcUA,
|
||||
},
|
||||
recvBufferPool: nopBufferPool{},
|
||||
idleTimeout: 30 * time.Minute,
|
||||
bs: internalbackoff.DefaultExponential,
|
||||
healthCheckFunc: internal.HealthCheckFunc,
|
||||
idleTimeout: 30 * time.Minute,
|
||||
recvBufferPool: nopBufferPool{},
|
||||
defaultScheme: "dns",
|
||||
maxCallAttempts: defaultMaxCallAttempts,
|
||||
}
|
||||
}
|
||||
|
||||
// withGetMinConnectDeadline specifies the function that clientconn uses to
|
||||
// withMinConnectDeadline specifies the function that clientconn uses to
|
||||
// get minConnectDeadline. This can be used to make connection attempts happen
|
||||
// faster/slower.
|
||||
//
|
||||
@@ -659,6 +698,14 @@ func withMinConnectDeadline(f func() time.Duration) DialOption {
|
||||
})
|
||||
}
|
||||
|
||||
// withDefaultScheme is used to allow Dial to use "passthrough" as the default
|
||||
// name resolver, while NewClient uses "dns" otherwise.
|
||||
func withDefaultScheme(s string) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.defaultScheme = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithResolvers allows a list of resolver implementations to be registered
|
||||
// locally with the ClientConn without needing to be globally registered via
|
||||
// resolver.Register. They will be matched against the scheme used for the
|
||||
@@ -694,6 +741,23 @@ func WithIdleTimeout(d time.Duration) DialOption {
|
||||
})
|
||||
}
|
||||
|
||||
// WithMaxCallAttempts returns a DialOption that configures the maximum number
|
||||
// of attempts per call (including retries and hedging) using the channel.
|
||||
// Service owners may specify a higher value for these parameters, but higher
|
||||
// values will be treated as equal to the maximum value by the client
|
||||
// implementation. This mitigates security concerns related to the service
|
||||
// config being transferred to the client via DNS.
|
||||
//
|
||||
// A value of 5 will be used if this dial option is not set or n < 2.
|
||||
func WithMaxCallAttempts(n int) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
if n < 2 {
|
||||
n = defaultMaxCallAttempts
|
||||
}
|
||||
o.maxCallAttempts = n
|
||||
})
|
||||
}
|
||||
|
||||
// WithRecvBufferPool returns a DialOption that configures the ClientConn
|
||||
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||
// on the application's workload, this could result in reduced memory allocation.
|
||||
@@ -705,11 +769,13 @@ func WithIdleTimeout(d time.Duration) DialOption {
|
||||
// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
|
||||
// cases, the shared buffer pool will be ignored.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
|
||||
// v1.60.0 or later.
|
||||
func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
|
||||
return withRecvBufferPool(bufferPool)
|
||||
}
|
||||
|
||||
func withRecvBufferPool(bufferPool SharedBufferPool) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.recvBufferPool = bufferPool
|
||||
})
|
||||
|
||||
24
vendor/google.golang.org/grpc/encoding/proto/proto.go
generated
vendored
24
vendor/google.golang.org/grpc/encoding/proto/proto.go
generated
vendored
@@ -23,8 +23,9 @@ package proto
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/protoadapt"
|
||||
)
|
||||
|
||||
// Name is the name registered for the proto compressor.
|
||||
@@ -38,21 +39,34 @@ func init() {
|
||||
type codec struct{}
|
||||
|
||||
func (codec) Marshal(v any) ([]byte, error) {
|
||||
vv, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
vv := messageV2Of(v)
|
||||
if vv == nil {
|
||||
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
|
||||
}
|
||||
|
||||
return proto.Marshal(vv)
|
||||
}
|
||||
|
||||
func (codec) Unmarshal(data []byte, v any) error {
|
||||
vv, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
vv := messageV2Of(v)
|
||||
if vv == nil {
|
||||
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
|
||||
}
|
||||
|
||||
return proto.Unmarshal(data, vv)
|
||||
}
|
||||
|
||||
func messageV2Of(v any) proto.Message {
|
||||
switch v := v.(type) {
|
||||
case protoadapt.MessageV1:
|
||||
return protoadapt.MessageV2Of(v)
|
||||
case protoadapt.MessageV2:
|
||||
return v
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (codec) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
4
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
4
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
@@ -17,8 +17,8 @@
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.22.0
|
||||
// protoc-gen-go v1.34.1
|
||||
// protoc v4.25.2
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
package grpc_health_v1
|
||||
|
||||
26
vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
generated
vendored
26
vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
generated
vendored
@@ -17,8 +17,8 @@
|
||||
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.3.0
|
||||
// - protoc v4.22.0
|
||||
// - protoc-gen-go-grpc v1.4.0
|
||||
// - protoc v4.25.2
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
package grpc_health_v1
|
||||
@@ -32,8 +32,8 @@ import (
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
// Requires gRPC-Go v1.62.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion8
|
||||
|
||||
const (
|
||||
Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
|
||||
@@ -43,6 +43,10 @@ const (
|
||||
// HealthClient is the client API for Health service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
//
|
||||
// Health is gRPC's mechanism for checking whether a server is able to handle
|
||||
// RPCs. Its semantics are documented in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
|
||||
type HealthClient interface {
|
||||
// Check gets the health of the specified service. If the requested service
|
||||
// is unknown, the call will fail with status NOT_FOUND. If the caller does
|
||||
@@ -81,8 +85,9 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
|
||||
}
|
||||
|
||||
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(HealthCheckResponse)
|
||||
err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -90,11 +95,12 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts .
|
||||
}
|
||||
|
||||
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...)
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &healthWatchClient{stream}
|
||||
x := &healthWatchClient{ClientStream: stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -124,6 +130,10 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
|
||||
// HealthServer is the server API for Health service.
|
||||
// All implementations should embed UnimplementedHealthServer
|
||||
// for forward compatibility
|
||||
//
|
||||
// Health is gRPC's mechanism for checking whether a server is able to handle
|
||||
// RPCs. Its semantics are documented in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
|
||||
type HealthServer interface {
|
||||
// Check gets the health of the specified service. If the requested service
|
||||
// is unknown, the call will fail with status NOT_FOUND. If the caller does
|
||||
@@ -198,7 +208,7 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
|
||||
return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream})
|
||||
}
|
||||
|
||||
type Health_WatchServer interface {
|
||||
|
||||
4
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
@@ -25,10 +25,10 @@ package backoff
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
grpcbackoff "google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
)
|
||||
|
||||
// Strategy defines the methodology for backing off after a grpc connection
|
||||
@@ -67,7 +67,7 @@ func (bc Exponential) Backoff(retries int) time.Duration {
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
|
||||
backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
82
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
generated
vendored
Normal file
82
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package gracefulswitch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
type lbConfig struct {
|
||||
serviceconfig.LoadBalancingConfig
|
||||
|
||||
childBuilder balancer.Builder
|
||||
childConfig serviceconfig.LoadBalancingConfig
|
||||
}
|
||||
|
||||
func ChildName(l serviceconfig.LoadBalancingConfig) string {
|
||||
return l.(*lbConfig).childBuilder.Name()
|
||||
}
|
||||
|
||||
// ParseConfig parses a child config list and returns a LB config for the
|
||||
// gracefulswitch Balancer.
|
||||
//
|
||||
// cfg is expected to be a json.RawMessage containing a JSON array of LB policy
|
||||
// names + configs as the format of the "loadBalancingConfig" field in
|
||||
// ServiceConfig. It returns a type that should be passed to
|
||||
// UpdateClientConnState in the BalancerConfig field.
|
||||
func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
||||
var lbCfg []map[string]json.RawMessage
|
||||
if err := json.Unmarshal(cfg, &lbCfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, e := range lbCfg {
|
||||
if len(e) != 1 {
|
||||
return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i)
|
||||
}
|
||||
|
||||
var name string
|
||||
var jsonCfg json.RawMessage
|
||||
for name, jsonCfg = range e {
|
||||
}
|
||||
|
||||
builder := balancer.Get(name)
|
||||
if builder == nil {
|
||||
// Skip unregistered balancer names.
|
||||
continue
|
||||
}
|
||||
|
||||
parser, ok := builder.(balancer.ConfigParser)
|
||||
if !ok {
|
||||
// This is a valid child with no config.
|
||||
return &lbConfig{childBuilder: builder}, nil
|
||||
}
|
||||
|
||||
cfg, err := parser.ParseConfig(jsonCfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err)
|
||||
}
|
||||
return &lbConfig{childBuilder: builder, childConfig: cfg}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg))
|
||||
}
|
||||
44
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
44
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
@@ -94,14 +94,23 @@ func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
|
||||
// process is not complete when this method returns. This method must be called
|
||||
// synchronously alongside the rest of the balancer.Balancer methods this
|
||||
// Graceful Switch Balancer implements.
|
||||
//
|
||||
// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState
|
||||
// to cause the Balancer to automatically change to the new child when necessary.
|
||||
func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
|
||||
_, err := gsb.switchTo(builder)
|
||||
return err
|
||||
}
|
||||
|
||||
func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) {
|
||||
gsb.mu.Lock()
|
||||
if gsb.closed {
|
||||
gsb.mu.Unlock()
|
||||
return errBalancerClosed
|
||||
return nil, errBalancerClosed
|
||||
}
|
||||
bw := &balancerWrapper{
|
||||
gsb: gsb,
|
||||
builder: builder,
|
||||
gsb: gsb,
|
||||
lastState: balancer.State{
|
||||
ConnectivityState: connectivity.Connecting,
|
||||
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||
@@ -129,7 +138,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
|
||||
gsb.balancerCurrent = nil
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
return balancer.ErrBadResolverState
|
||||
return nil, balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
// This write doesn't need to take gsb.mu because this field never gets read
|
||||
@@ -138,7 +147,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
|
||||
// bw.Balancer field will never be forwarded to until this SwitchTo()
|
||||
// function returns.
|
||||
bw.Balancer = newBalancer
|
||||
return nil
|
||||
return bw, nil
|
||||
}
|
||||
|
||||
// Returns nil if the graceful switch balancer is closed.
|
||||
@@ -152,12 +161,32 @@ func (gsb *Balancer) latestBalancer() *balancerWrapper {
|
||||
}
|
||||
|
||||
// UpdateClientConnState forwards the update to the latest balancer created.
|
||||
//
|
||||
// If the state's BalancerConfig is the config returned by a call to
|
||||
// gracefulswitch.ParseConfig, then this function will automatically SwitchTo
|
||||
// the balancer indicated by the config before forwarding its config to it, if
|
||||
// necessary.
|
||||
func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||
// The resolver data is only relevant to the most recent LB Policy.
|
||||
balToUpdate := gsb.latestBalancer()
|
||||
gsbCfg, ok := state.BalancerConfig.(*lbConfig)
|
||||
if ok {
|
||||
// Switch to the child in the config unless it is already active.
|
||||
if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() {
|
||||
var err error
|
||||
balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not switch to new child balancer: %w", err)
|
||||
}
|
||||
}
|
||||
// Unwrap the child balancer's config.
|
||||
state.BalancerConfig = gsbCfg.childConfig
|
||||
}
|
||||
|
||||
if balToUpdate == nil {
|
||||
return errBalancerClosed
|
||||
}
|
||||
|
||||
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||
// back into the channel. The latest balancer can never be closed during a
|
||||
// call from the channel, even without gsb.mu held.
|
||||
@@ -169,6 +198,10 @@ func (gsb *Balancer) ResolverError(err error) {
|
||||
// The resolver data is only relevant to the most recent LB Policy.
|
||||
balToUpdate := gsb.latestBalancer()
|
||||
if balToUpdate == nil {
|
||||
gsb.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: connectivity.TransientFailure,
|
||||
Picker: base.NewErrPicker(err),
|
||||
})
|
||||
return
|
||||
}
|
||||
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||
@@ -261,7 +294,8 @@ func (gsb *Balancer) Close() {
|
||||
// graceful switch logic.
|
||||
type balancerWrapper struct {
|
||||
balancer.Balancer
|
||||
gsb *Balancer
|
||||
gsb *Balancer
|
||||
builder balancer.Builder
|
||||
|
||||
lastState balancer.State
|
||||
subconns map[balancer.SubConn]bool // subconns created by this balancer
|
||||
|
||||
15
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
15
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
@@ -25,11 +25,12 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
type callIDGenerator struct {
|
||||
@@ -64,7 +65,7 @@ type TruncatingMethodLogger struct {
|
||||
callID uint64
|
||||
idWithinCallGen *callIDGenerator
|
||||
|
||||
sink Sink // TODO(blog): make this plugable.
|
||||
sink Sink // TODO(blog): make this pluggable.
|
||||
}
|
||||
|
||||
// NewTruncatingMethodLogger returns a new truncating method logger.
|
||||
@@ -79,7 +80,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
|
||||
callID: idGen.next(),
|
||||
idWithinCallGen: &callIDGenerator{},
|
||||
|
||||
sink: DefaultSink, // TODO(blog): make it plugable.
|
||||
sink: DefaultSink, // TODO(blog): make it pluggable.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,7 +89,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
|
||||
// in TruncatingMethodLogger as possible.
|
||||
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
|
||||
m := c.toProto()
|
||||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||
timestamp := timestamppb.Now()
|
||||
m.Timestamp = timestamp
|
||||
m.CallId = ml.callID
|
||||
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
|
||||
@@ -178,7 +179,7 @@ func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
|
||||
Authority: c.Authority,
|
||||
}
|
||||
if c.Timeout > 0 {
|
||||
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
|
||||
clientHeader.Timeout = durationpb.New(c.Timeout)
|
||||
}
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
@@ -396,7 +397,7 @@ func metadataKeyOmit(key string) bool {
|
||||
switch key {
|
||||
case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
|
||||
return true
|
||||
case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
|
||||
case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users.
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(key, "grpc-")
|
||||
|
||||
2
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
@@ -25,8 +25,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
41
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
41
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
@@ -18,7 +18,10 @@
|
||||
// Package buffer provides an implementation of an unbounded buffer.
|
||||
package buffer
|
||||
|
||||
import "sync"
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Unbounded is an implementation of an unbounded buffer which does not use
|
||||
// extra goroutines. This is typically used for passing updates from one entity
|
||||
@@ -36,6 +39,7 @@ import "sync"
|
||||
type Unbounded struct {
|
||||
c chan any
|
||||
closed bool
|
||||
closing bool
|
||||
mu sync.Mutex
|
||||
backlog []any
|
||||
}
|
||||
@@ -45,32 +49,32 @@ func NewUnbounded() *Unbounded {
|
||||
return &Unbounded{c: make(chan any, 1)}
|
||||
}
|
||||
|
||||
var errBufferClosed = errors.New("Put called on closed buffer.Unbounded")
|
||||
|
||||
// Put adds t to the unbounded buffer.
|
||||
func (b *Unbounded) Put(t any) {
|
||||
func (b *Unbounded) Put(t any) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
if b.closing {
|
||||
return errBufferClosed
|
||||
}
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
return
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load sends the earliest buffered data, if any, onto the read channel
|
||||
// returned by Get(). Users are expected to call this every time they read a
|
||||
// Load sends the earliest buffered data, if any, onto the read channel returned
|
||||
// by Get(). Users are expected to call this every time they successfully read a
|
||||
// value from the read channel.
|
||||
func (b *Unbounded) Load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
}
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
@@ -78,6 +82,8 @@ func (b *Unbounded) Load() {
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
} else if b.closing && !b.closed {
|
||||
close(b.c)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,18 +94,23 @@ func (b *Unbounded) Load() {
|
||||
// send the next buffered value onto the channel if there is any.
|
||||
//
|
||||
// If the unbounded buffer is closed, the read channel returned by this method
|
||||
// is closed.
|
||||
// is closed after all data is drained.
|
||||
func (b *Unbounded) Get() <-chan any {
|
||||
return b.c
|
||||
}
|
||||
|
||||
// Close closes the unbounded buffer.
|
||||
// Close closes the unbounded buffer. No subsequent data may be Put(), and the
|
||||
// channel returned from Get() will be closed after all the data is read and
|
||||
// Load() is called for the final time.
|
||||
func (b *Unbounded) Close() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
if b.closing {
|
||||
return
|
||||
}
|
||||
b.closed = true
|
||||
close(b.c)
|
||||
b.closing = true
|
||||
if len(b.backlog) == 0 {
|
||||
b.closed = true
|
||||
close(b.c)
|
||||
}
|
||||
}
|
||||
|
||||
255
vendor/google.golang.org/grpc/internal/channelz/channel.go
generated
vendored
Normal file
255
vendor/google.golang.org/grpc/internal/channelz/channel.go
generated
vendored
Normal file
@@ -0,0 +1,255 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
)
|
||||
|
||||
// Channel represents a channel within channelz, which includes metrics and
|
||||
// internal channelz data, such as channelz id, child list, etc.
|
||||
type Channel struct {
|
||||
Entity
|
||||
// ID is the channelz id of this channel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this channel.
|
||||
RefName string
|
||||
|
||||
closeCalled bool
|
||||
nestedChans map[int64]string
|
||||
subChans map[int64]string
|
||||
Parent *Channel
|
||||
trace *ChannelTrace
|
||||
// traceRefCount is the number of trace events that reference this channel.
|
||||
// Non-zero traceRefCount means the trace of this channel cannot be deleted.
|
||||
traceRefCount int32
|
||||
|
||||
ChannelMetrics ChannelMetrics
|
||||
}
|
||||
|
||||
// Implemented to make Channel implement the Identifier interface used for
|
||||
// nesting.
|
||||
func (c *Channel) channelzIdentifier() {}
|
||||
|
||||
func (c *Channel) String() string {
|
||||
if c.Parent == nil {
|
||||
return fmt.Sprintf("Channel #%d", c.ID)
|
||||
}
|
||||
return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID)
|
||||
}
|
||||
|
||||
func (c *Channel) id() int64 {
|
||||
return c.ID
|
||||
}
|
||||
|
||||
func (c *Channel) SubChans() map[int64]string {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
return copyMap(c.subChans)
|
||||
}
|
||||
|
||||
func (c *Channel) NestedChans() map[int64]string {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
return copyMap(c.nestedChans)
|
||||
}
|
||||
|
||||
func (c *Channel) Trace() *ChannelTrace {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
return c.trace.copy()
|
||||
}
|
||||
|
||||
type ChannelMetrics struct {
|
||||
// The current connectivity state of the channel.
|
||||
State atomic.Pointer[connectivity.State]
|
||||
// The target this channel originally tried to connect to. May be absent
|
||||
Target atomic.Pointer[string]
|
||||
// The number of calls started on the channel.
|
||||
CallsStarted atomic.Int64
|
||||
// The number of calls that have completed with an OK status.
|
||||
CallsSucceeded atomic.Int64
|
||||
// The number of calls that have a completed with a non-OK status.
|
||||
CallsFailed atomic.Int64
|
||||
// The last time a call was started on the channel.
|
||||
LastCallStartedTimestamp atomic.Int64
|
||||
}
|
||||
|
||||
// CopyFrom copies the metrics in o to c. For testing only.
|
||||
func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) {
|
||||
c.State.Store(o.State.Load())
|
||||
c.Target.Store(o.Target.Load())
|
||||
c.CallsStarted.Store(o.CallsStarted.Load())
|
||||
c.CallsSucceeded.Store(o.CallsSucceeded.Load())
|
||||
c.CallsFailed.Store(o.CallsFailed.Load())
|
||||
c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
|
||||
}
|
||||
|
||||
// Equal returns true iff the metrics of c are the same as the metrics of o.
|
||||
// For testing only.
|
||||
func (c *ChannelMetrics) Equal(o any) bool {
|
||||
oc, ok := o.(*ChannelMetrics)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if (c.State.Load() == nil) != (oc.State.Load() == nil) {
|
||||
return false
|
||||
}
|
||||
if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() {
|
||||
return false
|
||||
}
|
||||
if (c.Target.Load() == nil) != (oc.Target.Load() == nil) {
|
||||
return false
|
||||
}
|
||||
if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() {
|
||||
return false
|
||||
}
|
||||
return c.CallsStarted.Load() == oc.CallsStarted.Load() &&
|
||||
c.CallsFailed.Load() == oc.CallsFailed.Load() &&
|
||||
c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() &&
|
||||
c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load()
|
||||
}
|
||||
|
||||
func strFromPointer(s *string) string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return *s
|
||||
}
|
||||
|
||||
func (c *ChannelMetrics) String() string {
|
||||
return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v",
|
||||
c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(),
|
||||
)
|
||||
}
|
||||
|
||||
func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics {
|
||||
c := &ChannelMetrics{}
|
||||
c.State.Store(&state)
|
||||
c.Target.Store(&target)
|
||||
c.CallsStarted.Store(started)
|
||||
c.CallsSucceeded.Store(succeeded)
|
||||
c.CallsFailed.Store(failed)
|
||||
c.LastCallStartedTimestamp.Store(timestamp)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Channel) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *SubChannel:
|
||||
c.subChans[id] = v.RefName
|
||||
case *Channel:
|
||||
c.nestedChans[id] = v.RefName
|
||||
default:
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Channel) deleteChild(id int64) {
|
||||
delete(c.subChans, id)
|
||||
delete(c.nestedChans, id)
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *Channel) triggerDelete() {
|
||||
c.closeCalled = true
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *Channel) getParentID() int64 {
|
||||
if c.Parent == nil {
|
||||
return -1
|
||||
}
|
||||
return c.Parent.ID
|
||||
}
|
||||
|
||||
// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
|
||||
// deleting the channel reference from its parent's child list.
|
||||
//
|
||||
// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
|
||||
// corresponding grpc object has been invoked, and the channel does not have any children left.
|
||||
//
|
||||
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||
func (c *Channel) deleteSelfFromTree() (deleted bool) {
|
||||
if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
|
||||
return false
|
||||
}
|
||||
// not top channel
|
||||
if c.Parent != nil {
|
||||
c.Parent.deleteChild(c.ID)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
|
||||
// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
|
||||
// channel, and its memory will be garbage collected.
|
||||
//
|
||||
// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
|
||||
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||
//
|
||||
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||
//
|
||||
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||
func (c *Channel) deleteSelfFromMap() (delete bool) {
|
||||
return c.getTraceRefCount() == 0
|
||||
}
|
||||
|
||||
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||
// parent's child list.
|
||||
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||
// will return entry not found error.
|
||||
func (c *Channel) deleteSelfIfReady() {
|
||||
if !c.deleteSelfFromTree() {
|
||||
return
|
||||
}
|
||||
if !c.deleteSelfFromMap() {
|
||||
return
|
||||
}
|
||||
db.deleteEntry(c.ID)
|
||||
c.trace.clear()
|
||||
}
|
||||
|
||||
func (c *Channel) getChannelTrace() *ChannelTrace {
|
||||
return c.trace
|
||||
}
|
||||
|
||||
func (c *Channel) incrTraceRefCount() {
|
||||
atomic.AddInt32(&c.traceRefCount, 1)
|
||||
}
|
||||
|
||||
func (c *Channel) decrTraceRefCount() {
|
||||
atomic.AddInt32(&c.traceRefCount, -1)
|
||||
}
|
||||
|
||||
func (c *Channel) getTraceRefCount() int {
|
||||
i := atomic.LoadInt32(&c.traceRefCount)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func (c *Channel) getRefName() string {
|
||||
return c.RefName
|
||||
}
|
||||
402
vendor/google.golang.org/grpc/internal/channelz/channelmap.go
generated
vendored
Normal file
402
vendor/google.golang.org/grpc/internal/channelz/channelmap.go
generated
vendored
Normal file
@@ -0,0 +1,402 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// entry represents a node in the channelz database.
|
||||
type entry interface {
|
||||
// addChild adds a child e, whose channelz id is id to child list
|
||||
addChild(id int64, e entry)
|
||||
// deleteChild deletes a child with channelz id to be id from child list
|
||||
deleteChild(id int64)
|
||||
// triggerDelete tries to delete self from channelz database. However, if
|
||||
// child list is not empty, then deletion from the database is on hold until
|
||||
// the last child is deleted from database.
|
||||
triggerDelete()
|
||||
// deleteSelfIfReady check whether triggerDelete() has been called before,
|
||||
// and whether child list is now empty. If both conditions are met, then
|
||||
// delete self from database.
|
||||
deleteSelfIfReady()
|
||||
// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
|
||||
getParentID() int64
|
||||
Entity
|
||||
}
|
||||
|
||||
// channelMap is the storage data structure for channelz.
|
||||
//
|
||||
// Methods of channelMap can be divided in two two categories with respect to
|
||||
// locking.
|
||||
//
|
||||
// 1. Methods acquire the global lock.
|
||||
// 2. Methods that can only be called when global lock is held.
|
||||
//
|
||||
// A second type of method need always to be called inside a first type of method.
|
||||
type channelMap struct {
|
||||
mu sync.RWMutex
|
||||
topLevelChannels map[int64]struct{}
|
||||
channels map[int64]*Channel
|
||||
subChannels map[int64]*SubChannel
|
||||
sockets map[int64]*Socket
|
||||
servers map[int64]*Server
|
||||
}
|
||||
|
||||
func newChannelMap() *channelMap {
|
||||
return &channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*Channel),
|
||||
subChannels: make(map[int64]*SubChannel),
|
||||
sockets: make(map[int64]*Socket),
|
||||
servers: make(map[int64]*Server),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) addServer(id int64, s *Server) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
s.cm = c
|
||||
c.servers[id] = s
|
||||
}
|
||||
|
||||
func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
cn.trace.cm = c
|
||||
c.channels[id] = cn
|
||||
if isTopChannel {
|
||||
c.topLevelChannels[id] = struct{}{}
|
||||
} else if p := c.channels[pid]; p != nil {
|
||||
p.addChild(id, cn)
|
||||
} else {
|
||||
logger.Infof("channel %d references invalid parent ID %d", id, pid)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
sc.trace.cm = c
|
||||
c.subChannels[id] = sc
|
||||
if p := c.channels[pid]; p != nil {
|
||||
p.addChild(id, sc)
|
||||
} else {
|
||||
logger.Infof("subchannel %d references invalid parent ID %d", id, pid)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) addSocket(s *Socket) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
s.cm = c
|
||||
c.sockets[s.ID] = s
|
||||
if s.Parent == nil {
|
||||
logger.Infof("normal socket %d has no parent", s.ID)
|
||||
}
|
||||
s.Parent.(entry).addChild(s.ID, s)
|
||||
}
|
||||
|
||||
// removeEntry triggers the removal of an entry, which may not indeed delete the
|
||||
// entry, if it has to wait on the deletion of its children and until no other
|
||||
// entity's channel trace references it. It may lead to a chain of entry
|
||||
// deletion. For example, deleting the last socket of a gracefully shutting down
|
||||
// server will lead to the server being also deleted.
|
||||
func (c *channelMap) removeEntry(id int64) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.findEntry(id).triggerDelete()
|
||||
}
|
||||
|
||||
// tracedChannel represents tracing operations which are present on both
|
||||
// channels and subChannels.
|
||||
type tracedChannel interface {
|
||||
getChannelTrace() *ChannelTrace
|
||||
incrTraceRefCount()
|
||||
decrTraceRefCount()
|
||||
getRefName() string
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller
|
||||
func (c *channelMap) decrTraceRefCount(id int64) {
|
||||
e := c.findEntry(id)
|
||||
if v, ok := e.(tracedChannel); ok {
|
||||
v.decrTraceRefCount()
|
||||
e.deleteSelfIfReady()
|
||||
}
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller.
|
||||
func (c *channelMap) findEntry(id int64) entry {
|
||||
if v, ok := c.channels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok := c.subChannels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok := c.servers[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok := c.sockets[id]; ok {
|
||||
return v
|
||||
}
|
||||
return &dummyEntry{idNotFound: id}
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller
|
||||
//
|
||||
// deleteEntry deletes an entry from the channelMap. Before calling this method,
|
||||
// caller must check this entry is ready to be deleted, i.e removeEntry() has
|
||||
// been called on it, and no children still exist.
|
||||
func (c *channelMap) deleteEntry(id int64) entry {
|
||||
if v, ok := c.sockets[id]; ok {
|
||||
delete(c.sockets, id)
|
||||
return v
|
||||
}
|
||||
if v, ok := c.subChannels[id]; ok {
|
||||
delete(c.subChannels, id)
|
||||
return v
|
||||
}
|
||||
if v, ok := c.channels[id]; ok {
|
||||
delete(c.channels, id)
|
||||
delete(c.topLevelChannels, id)
|
||||
return v
|
||||
}
|
||||
if v, ok := c.servers[id]; ok {
|
||||
delete(c.servers, id)
|
||||
return v
|
||||
}
|
||||
return &dummyEntry{idNotFound: id}
|
||||
}
|
||||
|
||||
func (c *channelMap) traceEvent(id int64, desc *TraceEvent) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
child := c.findEntry(id)
|
||||
childTC, ok := child.(tracedChannel)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
|
||||
if desc.Parent != nil {
|
||||
parent := c.findEntry(child.getParentID())
|
||||
var chanType RefChannelType
|
||||
switch child.(type) {
|
||||
case *Channel:
|
||||
chanType = RefChannel
|
||||
case *SubChannel:
|
||||
chanType = RefSubChannel
|
||||
}
|
||||
if parentTC, ok := parent.(tracedChannel); ok {
|
||||
parentTC.getChannelTrace().append(&traceEvent{
|
||||
Desc: desc.Parent.Desc,
|
||||
Severity: desc.Parent.Severity,
|
||||
Timestamp: time.Now(),
|
||||
RefID: id,
|
||||
RefName: childTC.getRefName(),
|
||||
RefType: chanType,
|
||||
})
|
||||
childTC.incrTraceRefCount()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type int64Slice []int64
|
||||
|
||||
func (s int64Slice) Len() int { return len(s) }
|
||||
func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
func copyMap(m map[int64]string) map[int64]string {
|
||||
n := make(map[int64]string)
|
||||
for k, v := range m {
|
||||
n[k] = v
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntriesPerPage
|
||||
}
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
l := int64(len(c.topLevelChannels))
|
||||
ids := make([]int64, 0, l)
|
||||
|
||||
for k := range c.topLevelChannels {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
end := true
|
||||
var t []*Channel
|
||||
for _, v := range ids[idx:] {
|
||||
if len(t) == maxResults {
|
||||
end = false
|
||||
break
|
||||
}
|
||||
if cn, ok := c.channels[v]; ok {
|
||||
t = append(t, cn)
|
||||
}
|
||||
}
|
||||
return t, end
|
||||
}
|
||||
|
||||
func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntriesPerPage
|
||||
}
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
ids := make([]int64, 0, len(c.servers))
|
||||
for k := range c.servers {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
end := true
|
||||
var s []*Server
|
||||
for _, v := range ids[idx:] {
|
||||
if len(s) == maxResults {
|
||||
end = false
|
||||
break
|
||||
}
|
||||
if svr, ok := c.servers[v]; ok {
|
||||
s = append(s, svr)
|
||||
}
|
||||
}
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntriesPerPage
|
||||
}
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
svr, ok := c.servers[id]
|
||||
if !ok {
|
||||
// server with id doesn't exist.
|
||||
return nil, true
|
||||
}
|
||||
svrskts := svr.sockets
|
||||
ids := make([]int64, 0, len(svrskts))
|
||||
sks := make([]*Socket, 0, min(len(svrskts), maxResults))
|
||||
for k := range svrskts {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
|
||||
end := true
|
||||
for _, v := range ids[idx:] {
|
||||
if len(sks) == maxResults {
|
||||
end = false
|
||||
break
|
||||
}
|
||||
if ns, ok := c.sockets[v]; ok {
|
||||
sks = append(sks, ns)
|
||||
}
|
||||
}
|
||||
return sks, end
|
||||
}
|
||||
|
||||
func (c *channelMap) getChannel(id int64) *Channel {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.channels[id]
|
||||
}
|
||||
|
||||
func (c *channelMap) getSubChannel(id int64) *SubChannel {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.subChannels[id]
|
||||
}
|
||||
|
||||
func (c *channelMap) getSocket(id int64) *Socket {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.sockets[id]
|
||||
}
|
||||
|
||||
func (c *channelMap) getServer(id int64) *Server {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.servers[id]
|
||||
}
|
||||
|
||||
type dummyEntry struct {
|
||||
// dummyEntry is a fake entry to handle entry not found case.
|
||||
idNotFound int64
|
||||
Entity
|
||||
}
|
||||
|
||||
func (d *dummyEntry) String() string {
|
||||
return fmt.Sprintf("non-existent entity #%d", d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) ID() int64 { return d.idNotFound }
|
||||
|
||||
func (d *dummyEntry) addChild(id int64, e entry) {
|
||||
// Note: It is possible for a normal program to reach here under race
|
||||
// condition. For example, there could be a race between ClientConn.Close()
|
||||
// info being propagated to addrConn and http2Client. ClientConn.Close()
|
||||
// cancel the context and result in http2Client to error. The error info is
|
||||
// then caught by transport monitor and before addrConn.tearDown() is called
|
||||
// in side ClientConn.Close(). Therefore, the addrConn will create a new
|
||||
// transport. And when registering the new transport in channelz, its parent
|
||||
// addrConn could have already been torn down and deleted from channelz
|
||||
// tracking, and thus reach the code here.
|
||||
logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) deleteChild(id int64) {
|
||||
// It is possible for a normal program to reach here under race condition.
|
||||
// Refer to the example described in addChild().
|
||||
logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) triggerDelete() {
|
||||
logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
|
||||
}
|
||||
|
||||
func (*dummyEntry) deleteSelfIfReady() {
|
||||
// code should not reach here. deleteSelfIfReady is always called on an existing entry.
|
||||
}
|
||||
|
||||
func (*dummyEntry) getParentID() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Entity is implemented by all channelz types.
|
||||
type Entity interface {
|
||||
isEntity()
|
||||
fmt.Stringer
|
||||
id() int64
|
||||
}
|
||||
702
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
702
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
@@ -16,25 +16,16 @@
|
||||
*
|
||||
*/
|
||||
|
||||
// Package channelz defines APIs for enabling channelz service, entry
|
||||
// Package channelz defines internal APIs for enabling channelz service, entry
|
||||
// registration/deletion, and accessing channelz data. It also defines channelz
|
||||
// metric struct formats.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxTraceEntry int32 = 30
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -42,19 +33,20 @@ var (
|
||||
// outside this package except by tests.
|
||||
IDGen IDGenerator
|
||||
|
||||
db dbWrapper
|
||||
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
||||
EntryPerPage = int64(50)
|
||||
curState int32
|
||||
maxTraceEntry = defaultMaxTraceEntry
|
||||
db *channelMap = newChannelMap()
|
||||
// EntriesPerPage defines the number of channelz entries to be shown on a web page.
|
||||
EntriesPerPage = 50
|
||||
curState int32
|
||||
)
|
||||
|
||||
// TurnOn turns on channelz data collection.
|
||||
func TurnOn() {
|
||||
if !IsOn() {
|
||||
db.set(newChannelMap())
|
||||
IDGen.Reset()
|
||||
atomic.StoreInt32(&curState, 1)
|
||||
atomic.StoreInt32(&curState, 1)
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.ChannelzTurnOffForTesting = func() {
|
||||
atomic.StoreInt32(&curState, 0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,49 +55,15 @@ func IsOn() bool {
|
||||
return atomic.LoadInt32(&curState) == 1
|
||||
}
|
||||
|
||||
// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
|
||||
// Setting it to 0 will disable channel tracing.
|
||||
func SetMaxTraceEntry(i int32) {
|
||||
atomic.StoreInt32(&maxTraceEntry, i)
|
||||
}
|
||||
|
||||
// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
|
||||
func ResetMaxTraceEntryToDefault() {
|
||||
atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
|
||||
}
|
||||
|
||||
func getMaxTraceEntry() int {
|
||||
i := atomic.LoadInt32(&maxTraceEntry)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
// dbWarpper wraps around a reference to internal channelz data storage, and
|
||||
// provide synchronized functionality to set and get the reference.
|
||||
type dbWrapper struct {
|
||||
mu sync.RWMutex
|
||||
DB *channelMap
|
||||
}
|
||||
|
||||
func (d *dbWrapper) set(db *channelMap) {
|
||||
d.mu.Lock()
|
||||
d.DB = db
|
||||
d.mu.Unlock()
|
||||
}
|
||||
|
||||
func (d *dbWrapper) get() *channelMap {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
return d.DB
|
||||
}
|
||||
|
||||
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
|
||||
// boolean indicating whether there's more top channels to be queried for.
|
||||
//
|
||||
// The arg id specifies that only top channel with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of the arg maxResults or
|
||||
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
|
||||
return db.get().GetTopChannels(id, maxResults)
|
||||
// The arg id specifies that only top channel with id at or above it will be
|
||||
// included in the result. The returned slice is up to a length of the arg
|
||||
// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending
|
||||
// id order.
|
||||
func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) {
|
||||
return db.getTopChannels(id, maxResults)
|
||||
}
|
||||
|
||||
// GetServers returns a slice of server's ServerMetric, along with a
|
||||
@@ -113,73 +71,69 @@ func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
|
||||
//
|
||||
// The arg id specifies that only server with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of the arg maxResults or
|
||||
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
|
||||
return db.get().GetServers(id, maxResults)
|
||||
// EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetServers(id int64, maxResults int) ([]*Server, bool) {
|
||||
return db.getServers(id, maxResults)
|
||||
}
|
||||
|
||||
// GetServerSockets returns a slice of server's (identified by id) normal socket's
|
||||
// SocketMetric, along with a boolean indicating whether there's more sockets to
|
||||
// SocketMetrics, along with a boolean indicating whether there's more sockets to
|
||||
// be queried for.
|
||||
//
|
||||
// The arg startID specifies that only sockets with id at or above it will be
|
||||
// included in the result. The returned slice is up to a length of the arg maxResults
|
||||
// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
|
||||
return db.get().GetServerSockets(id, startID, maxResults)
|
||||
// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
|
||||
return db.getServerSockets(id, startID, maxResults)
|
||||
}
|
||||
|
||||
// GetChannel returns the ChannelMetric for the channel (identified by id).
|
||||
func GetChannel(id int64) *ChannelMetric {
|
||||
return db.get().GetChannel(id)
|
||||
// GetChannel returns the Channel for the channel (identified by id).
|
||||
func GetChannel(id int64) *Channel {
|
||||
return db.getChannel(id)
|
||||
}
|
||||
|
||||
// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
|
||||
func GetSubChannel(id int64) *SubChannelMetric {
|
||||
return db.get().GetSubChannel(id)
|
||||
// GetSubChannel returns the SubChannel for the subchannel (identified by id).
|
||||
func GetSubChannel(id int64) *SubChannel {
|
||||
return db.getSubChannel(id)
|
||||
}
|
||||
|
||||
// GetSocket returns the SocketInternalMetric for the socket (identified by id).
|
||||
func GetSocket(id int64) *SocketMetric {
|
||||
return db.get().GetSocket(id)
|
||||
// GetSocket returns the Socket for the socket (identified by id).
|
||||
func GetSocket(id int64) *Socket {
|
||||
return db.getSocket(id)
|
||||
}
|
||||
|
||||
// GetServer returns the ServerMetric for the server (identified by id).
|
||||
func GetServer(id int64) *ServerMetric {
|
||||
return db.get().GetServer(id)
|
||||
func GetServer(id int64) *Server {
|
||||
return db.getServer(id)
|
||||
}
|
||||
|
||||
// RegisterChannel registers the given channel c in the channelz database with
|
||||
// ref as its reference name, and adds it to the child list of its parent
|
||||
// (identified by pid). pid == nil means no parent.
|
||||
// target as its target and reference name, and adds it to the child list of its
|
||||
// parent. parent == nil means no parent.
|
||||
//
|
||||
// Returns a unique channelz identifier assigned to this channel.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
|
||||
func RegisterChannel(parent *Channel, target string) *Channel {
|
||||
id := IDGen.genID()
|
||||
var parent int64
|
||||
isTopChannel := true
|
||||
if pid != nil {
|
||||
isTopChannel = false
|
||||
parent = pid.Int()
|
||||
}
|
||||
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefChannel, id, pid)
|
||||
return &Channel{ID: id}
|
||||
}
|
||||
|
||||
cn := &channel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
subChans: make(map[int64]string),
|
||||
isTopChannel := parent == nil
|
||||
|
||||
cn := &Channel{
|
||||
ID: id,
|
||||
RefName: target,
|
||||
nestedChans: make(map[int64]string),
|
||||
id: id,
|
||||
pid: parent,
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
subChans: make(map[int64]string),
|
||||
Parent: parent,
|
||||
trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())},
|
||||
}
|
||||
db.get().addChannel(id, cn, isTopChannel, parent)
|
||||
return newIdentifer(RefChannel, id, pid)
|
||||
cn.ChannelMetrics.Target.Store(&target)
|
||||
db.addChannel(id, cn, isTopChannel, cn.getParentID())
|
||||
return cn
|
||||
}
|
||||
|
||||
// RegisterSubChannel registers the given subChannel c in the channelz database
|
||||
@@ -189,555 +143,67 @@ func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
|
||||
// Returns a unique channelz identifier assigned to this subChannel.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a SubChannel's parent id cannot be nil")
|
||||
}
|
||||
func RegisterSubChannel(parent *Channel, ref string) *SubChannel {
|
||||
id := IDGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefSubChannel, id, pid), nil
|
||||
sc := &SubChannel{
|
||||
ID: id,
|
||||
RefName: ref,
|
||||
parent: parent,
|
||||
}
|
||||
|
||||
sc := &subChannel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
sockets: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid.Int(),
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
if !IsOn() {
|
||||
return sc
|
||||
}
|
||||
db.get().addSubChannel(id, sc, pid.Int())
|
||||
return newIdentifer(RefSubChannel, id, pid), nil
|
||||
|
||||
sc.sockets = make(map[int64]string)
|
||||
sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}
|
||||
db.addSubChannel(id, sc, parent.ID)
|
||||
return sc
|
||||
}
|
||||
|
||||
// RegisterServer registers the given server s in channelz database. It returns
|
||||
// the unique channelz tracking id assigned to this server.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterServer(s Server, ref string) *Identifier {
|
||||
func RegisterServer(ref string) *Server {
|
||||
id := IDGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefServer, id, nil)
|
||||
return &Server{ID: id}
|
||||
}
|
||||
|
||||
svr := &server{
|
||||
refName: ref,
|
||||
s: s,
|
||||
svr := &Server{
|
||||
RefName: ref,
|
||||
sockets: make(map[int64]string),
|
||||
listenSockets: make(map[int64]string),
|
||||
id: id,
|
||||
ID: id,
|
||||
}
|
||||
db.get().addServer(id, svr)
|
||||
return newIdentifer(RefServer, id, nil)
|
||||
db.addServer(id, svr)
|
||||
return svr
|
||||
}
|
||||
|
||||
// RegisterListenSocket registers the given listen socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this listen socket.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a ListenSocket's parent id cannot be 0")
|
||||
}
|
||||
id := IDGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefListenSocket, id, pid), nil
|
||||
}
|
||||
|
||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||
db.get().addListenSocket(id, ls, pid.Int())
|
||||
return newIdentifer(RefListenSocket, id, pid), nil
|
||||
}
|
||||
|
||||
// RegisterNormalSocket registers the given normal socket s in channelz database
|
||||
// RegisterSocket registers the given normal socket s in channelz database
|
||||
// with ref as its reference name, and adds it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this normal socket.
|
||||
// (identified by skt.Parent, which must be set). It returns the unique channelz
|
||||
// tracking id assigned to this normal socket.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a NormalSocket's parent id cannot be 0")
|
||||
func RegisterSocket(skt *Socket) *Socket {
|
||||
skt.ID = IDGen.genID()
|
||||
if IsOn() {
|
||||
db.addSocket(skt)
|
||||
}
|
||||
id := IDGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||
}
|
||||
|
||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||
db.get().addNormalSocket(id, ns, pid.Int())
|
||||
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||
return skt
|
||||
}
|
||||
|
||||
// RemoveEntry removes an entry with unique channelz tracking id to be id from
|
||||
// channelz database.
|
||||
//
|
||||
// If channelz is not turned ON, this function is a no-op.
|
||||
func RemoveEntry(id *Identifier) {
|
||||
func RemoveEntry(id int64) {
|
||||
if !IsOn() {
|
||||
return
|
||||
}
|
||||
db.get().removeEntry(id.Int())
|
||||
}
|
||||
|
||||
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe
|
||||
// the event to be added to the channel trace.
|
||||
//
|
||||
// The Parent field is optional. It is used for an event that will be recorded
|
||||
// in the entity's parent trace.
|
||||
type TraceEventDesc struct {
|
||||
Desc string
|
||||
Severity Severity
|
||||
Parent *TraceEventDesc
|
||||
}
|
||||
|
||||
// AddTraceEvent adds trace related to the entity with specified id, using the
|
||||
// provided TraceEventDesc.
|
||||
//
|
||||
// If channelz is not turned ON, this will simply log the event descriptions.
|
||||
func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) {
|
||||
// Log only the trace description associated with the bottom most entity.
|
||||
switch desc.Severity {
|
||||
case CtUnknown, CtInfo:
|
||||
l.InfoDepth(depth+1, withParens(id)+desc.Desc)
|
||||
case CtWarning:
|
||||
l.WarningDepth(depth+1, withParens(id)+desc.Desc)
|
||||
case CtError:
|
||||
l.ErrorDepth(depth+1, withParens(id)+desc.Desc)
|
||||
}
|
||||
|
||||
if getMaxTraceEntry() == 0 {
|
||||
return
|
||||
}
|
||||
if IsOn() {
|
||||
db.get().traceEvent(id.Int(), desc)
|
||||
}
|
||||
}
|
||||
|
||||
// channelMap is the storage data structure for channelz.
|
||||
// Methods of channelMap can be divided in two two categories with respect to locking.
|
||||
// 1. Methods acquire the global lock.
|
||||
// 2. Methods that can only be called when global lock is held.
|
||||
// A second type of method need always to be called inside a first type of method.
|
||||
type channelMap struct {
|
||||
mu sync.RWMutex
|
||||
topLevelChannels map[int64]struct{}
|
||||
servers map[int64]*server
|
||||
channels map[int64]*channel
|
||||
subChannels map[int64]*subChannel
|
||||
listenSockets map[int64]*listenSocket
|
||||
normalSockets map[int64]*normalSocket
|
||||
}
|
||||
|
||||
func newChannelMap() *channelMap {
|
||||
return &channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*channel),
|
||||
listenSockets: make(map[int64]*listenSocket),
|
||||
normalSockets: make(map[int64]*normalSocket),
|
||||
servers: make(map[int64]*server),
|
||||
subChannels: make(map[int64]*subChannel),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) addServer(id int64, s *server) {
|
||||
c.mu.Lock()
|
||||
s.cm = c
|
||||
c.servers[id] = s
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
|
||||
c.mu.Lock()
|
||||
cn.cm = c
|
||||
cn.trace.cm = c
|
||||
c.channels[id] = cn
|
||||
if isTopChannel {
|
||||
c.topLevelChannels[id] = struct{}{}
|
||||
} else {
|
||||
c.findEntry(pid).addChild(id, cn)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
|
||||
c.mu.Lock()
|
||||
sc.cm = c
|
||||
sc.trace.cm = c
|
||||
c.subChannels[id] = sc
|
||||
c.findEntry(pid).addChild(id, sc)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
|
||||
c.mu.Lock()
|
||||
ls.cm = c
|
||||
c.listenSockets[id] = ls
|
||||
c.findEntry(pid).addChild(id, ls)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
|
||||
c.mu.Lock()
|
||||
ns.cm = c
|
||||
c.normalSockets[id] = ns
|
||||
c.findEntry(pid).addChild(id, ns)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
|
||||
// wait on the deletion of its children and until no other entity's channel trace references it.
|
||||
// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
|
||||
// shutting down server will lead to the server being also deleted.
|
||||
func (c *channelMap) removeEntry(id int64) {
|
||||
c.mu.Lock()
|
||||
c.findEntry(id).triggerDelete()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller
|
||||
func (c *channelMap) decrTraceRefCount(id int64) {
|
||||
e := c.findEntry(id)
|
||||
if v, ok := e.(tracedChannel); ok {
|
||||
v.decrTraceRefCount()
|
||||
e.deleteSelfIfReady()
|
||||
}
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller.
|
||||
func (c *channelMap) findEntry(id int64) entry {
|
||||
var v entry
|
||||
var ok bool
|
||||
if v, ok = c.channels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.subChannels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.servers[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.listenSockets[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.normalSockets[id]; ok {
|
||||
return v
|
||||
}
|
||||
return &dummyEntry{idNotFound: id}
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller
|
||||
// deleteEntry simply deletes an entry from the channelMap. Before calling this
|
||||
// method, caller must check this entry is ready to be deleted, i.e removeEntry()
|
||||
// has been called on it, and no children still exist.
|
||||
// Conditionals are ordered by the expected frequency of deletion of each entity
|
||||
// type, in order to optimize performance.
|
||||
func (c *channelMap) deleteEntry(id int64) {
|
||||
var ok bool
|
||||
if _, ok = c.normalSockets[id]; ok {
|
||||
delete(c.normalSockets, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.subChannels[id]; ok {
|
||||
delete(c.subChannels, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.channels[id]; ok {
|
||||
delete(c.channels, id)
|
||||
delete(c.topLevelChannels, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.listenSockets[id]; ok {
|
||||
delete(c.listenSockets, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.servers[id]; ok {
|
||||
delete(c.servers, id)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
|
||||
c.mu.Lock()
|
||||
child := c.findEntry(id)
|
||||
childTC, ok := child.(tracedChannel)
|
||||
if !ok {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
|
||||
if desc.Parent != nil {
|
||||
parent := c.findEntry(child.getParentID())
|
||||
var chanType RefChannelType
|
||||
switch child.(type) {
|
||||
case *channel:
|
||||
chanType = RefChannel
|
||||
case *subChannel:
|
||||
chanType = RefSubChannel
|
||||
}
|
||||
if parentTC, ok := parent.(tracedChannel); ok {
|
||||
parentTC.getChannelTrace().append(&TraceEvent{
|
||||
Desc: desc.Parent.Desc,
|
||||
Severity: desc.Parent.Severity,
|
||||
Timestamp: time.Now(),
|
||||
RefID: id,
|
||||
RefName: childTC.getRefName(),
|
||||
RefType: chanType,
|
||||
})
|
||||
childTC.incrTraceRefCount()
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
type int64Slice []int64
|
||||
|
||||
func (s int64Slice) Len() int { return len(s) }
|
||||
func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
func copyMap(m map[int64]string) map[int64]string {
|
||||
n := make(map[int64]string)
|
||||
for k, v := range m {
|
||||
n[k] = v
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func min(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
c.mu.RLock()
|
||||
l := int64(len(c.topLevelChannels))
|
||||
ids := make([]int64, 0, l)
|
||||
cns := make([]*channel, 0, min(l, maxResults))
|
||||
|
||||
for k := range c.topLevelChannels {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := int64(0)
|
||||
var end bool
|
||||
var t []*ChannelMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if cn, ok := c.channels[v]; ok {
|
||||
cns = append(cns, cn)
|
||||
t = append(t, &ChannelMetric{
|
||||
NestedChans: copyMap(cn.nestedChans),
|
||||
SubChans: copyMap(cn.subChans),
|
||||
})
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
|
||||
for i, cn := range cns {
|
||||
t[i].ChannelData = cn.c.ChannelzMetric()
|
||||
t[i].ID = cn.id
|
||||
t[i].RefName = cn.refName
|
||||
t[i].Trace = cn.trace.dumpData()
|
||||
}
|
||||
return t, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
c.mu.RLock()
|
||||
l := int64(len(c.servers))
|
||||
ids := make([]int64, 0, l)
|
||||
ss := make([]*server, 0, min(l, maxResults))
|
||||
for k := range c.servers {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := int64(0)
|
||||
var end bool
|
||||
var s []*ServerMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if svr, ok := c.servers[v]; ok {
|
||||
ss = append(ss, svr)
|
||||
s = append(s, &ServerMetric{
|
||||
ListenSockets: copyMap(svr.listenSockets),
|
||||
})
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
|
||||
for i, svr := range ss {
|
||||
s[i].ServerData = svr.s.ChannelzMetric()
|
||||
s[i].ID = svr.id
|
||||
s[i].RefName = svr.refName
|
||||
}
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
var svr *server
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if svr, ok = c.servers[id]; !ok {
|
||||
// server with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil, true
|
||||
}
|
||||
svrskts := svr.sockets
|
||||
l := int64(len(svrskts))
|
||||
ids := make([]int64, 0, l)
|
||||
sks := make([]*normalSocket, 0, min(l, maxResults))
|
||||
for k := range svrskts {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
|
||||
count := int64(0)
|
||||
var end bool
|
||||
for i, v := range ids[idx:] {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if ns, ok := c.normalSockets[v]; ok {
|
||||
sks = append(sks, ns)
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
s := make([]*SocketMetric, 0, len(sks))
|
||||
for _, ns := range sks {
|
||||
sm := &SocketMetric{}
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
sm.ID = ns.id
|
||||
sm.RefName = ns.refName
|
||||
s = append(s, sm)
|
||||
}
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetChannel(id int64) *ChannelMetric {
|
||||
cm := &ChannelMetric{}
|
||||
var cn *channel
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if cn, ok = c.channels[id]; !ok {
|
||||
// channel with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
cm.NestedChans = copyMap(cn.nestedChans)
|
||||
cm.SubChans = copyMap(cn.subChans)
|
||||
// cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
|
||||
// holding the lock to prevent potential data race.
|
||||
chanCopy := cn.c
|
||||
c.mu.RUnlock()
|
||||
cm.ChannelData = chanCopy.ChannelzMetric()
|
||||
cm.ID = cn.id
|
||||
cm.RefName = cn.refName
|
||||
cm.Trace = cn.trace.dumpData()
|
||||
return cm
|
||||
}
|
||||
|
||||
func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
|
||||
cm := &SubChannelMetric{}
|
||||
var sc *subChannel
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if sc, ok = c.subChannels[id]; !ok {
|
||||
// subchannel with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
cm.Sockets = copyMap(sc.sockets)
|
||||
// sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
|
||||
// holding the lock to prevent potential data race.
|
||||
chanCopy := sc.c
|
||||
c.mu.RUnlock()
|
||||
cm.ChannelData = chanCopy.ChannelzMetric()
|
||||
cm.ID = sc.id
|
||||
cm.RefName = sc.refName
|
||||
cm.Trace = sc.trace.dumpData()
|
||||
return cm
|
||||
}
|
||||
|
||||
func (c *channelMap) GetSocket(id int64) *SocketMetric {
|
||||
sm := &SocketMetric{}
|
||||
c.mu.RLock()
|
||||
if ls, ok := c.listenSockets[id]; ok {
|
||||
c.mu.RUnlock()
|
||||
sm.SocketData = ls.s.ChannelzMetric()
|
||||
sm.ID = ls.id
|
||||
sm.RefName = ls.refName
|
||||
return sm
|
||||
}
|
||||
if ns, ok := c.normalSockets[id]; ok {
|
||||
c.mu.RUnlock()
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
sm.ID = ns.id
|
||||
sm.RefName = ns.refName
|
||||
return sm
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServer(id int64) *ServerMetric {
|
||||
sm := &ServerMetric{}
|
||||
var svr *server
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if svr, ok = c.servers[id]; !ok {
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
sm.ListenSockets = copyMap(svr.listenSockets)
|
||||
c.mu.RUnlock()
|
||||
sm.ID = svr.id
|
||||
sm.RefName = svr.refName
|
||||
sm.ServerData = svr.s.ChannelzMetric()
|
||||
return sm
|
||||
db.removeEntry(id)
|
||||
}
|
||||
|
||||
// IDGenerator is an incrementing atomic that tracks IDs for channelz entities.
|
||||
@@ -754,3 +220,11 @@ func (i *IDGenerator) Reset() {
|
||||
func (i *IDGenerator) genID() int64 {
|
||||
return atomic.AddInt64(&i.id, 1)
|
||||
}
|
||||
|
||||
// Identifier is an opaque channelz identifier used to expose channelz symbols
|
||||
// outside of grpc. Currently only implemented by Channel since no other
|
||||
// types require exposure outside grpc.
|
||||
type Identifier interface {
|
||||
Entity
|
||||
channelzIdentifier()
|
||||
}
|
||||
|
||||
75
vendor/google.golang.org/grpc/internal/channelz/id.go
generated
vendored
75
vendor/google.golang.org/grpc/internal/channelz/id.go
generated
vendored
@@ -1,75 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Identifier is an opaque identifier which uniquely identifies an entity in the
|
||||
// channelz database.
|
||||
type Identifier struct {
|
||||
typ RefChannelType
|
||||
id int64
|
||||
str string
|
||||
pid *Identifier
|
||||
}
|
||||
|
||||
// Type returns the entity type corresponding to id.
|
||||
func (id *Identifier) Type() RefChannelType {
|
||||
return id.typ
|
||||
}
|
||||
|
||||
// Int returns the integer identifier corresponding to id.
|
||||
func (id *Identifier) Int() int64 {
|
||||
return id.id
|
||||
}
|
||||
|
||||
// String returns a string representation of the entity corresponding to id.
|
||||
//
|
||||
// This includes some information about the parent as well. Examples:
|
||||
// Top-level channel: [Channel #channel-number]
|
||||
// Nested channel: [Channel #parent-channel-number Channel #channel-number]
|
||||
// Sub channel: [Channel #parent-channel SubChannel #subchannel-number]
|
||||
func (id *Identifier) String() string {
|
||||
return id.str
|
||||
}
|
||||
|
||||
// Equal returns true if other is the same as id.
|
||||
func (id *Identifier) Equal(other *Identifier) bool {
|
||||
if (id != nil) != (other != nil) {
|
||||
return false
|
||||
}
|
||||
if id == nil && other == nil {
|
||||
return true
|
||||
}
|
||||
return id.typ == other.typ && id.id == other.id && id.pid == other.pid
|
||||
}
|
||||
|
||||
// NewIdentifierForTesting returns a new opaque identifier to be used only for
|
||||
// testing purposes.
|
||||
func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||
return newIdentifer(typ, id, pid)
|
||||
}
|
||||
|
||||
func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||
str := fmt.Sprintf("%s #%d", typ, id)
|
||||
if pid != nil {
|
||||
str = fmt.Sprintf("%s %s", pid, str)
|
||||
}
|
||||
return &Identifier{typ: typ, id: id, str: str, pid: pid}
|
||||
}
|
||||
28
vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
28
vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
@@ -26,53 +26,49 @@ import (
|
||||
|
||||
var logger = grpclog.Component("channelz")
|
||||
|
||||
func withParens(id *Identifier) string {
|
||||
return "[" + id.String() + "] "
|
||||
}
|
||||
|
||||
// Info logs and adds a trace event if channelz is on.
|
||||
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) {
|
||||
AddTraceEvent(l, e, 1, &TraceEvent{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// Infof logs and adds a trace event if channelz is on.
|
||||
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
|
||||
AddTraceEvent(l, e, 1, &TraceEvent{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// Warning logs and adds a trace event if channelz is on.
|
||||
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) {
|
||||
AddTraceEvent(l, e, 1, &TraceEvent{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
}
|
||||
|
||||
// Warningf logs and adds a trace event if channelz is on.
|
||||
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
|
||||
AddTraceEvent(l, e, 1, &TraceEvent{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
}
|
||||
|
||||
// Error logs and adds a trace event if channelz is on.
|
||||
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) {
|
||||
AddTraceEvent(l, e, 1, &TraceEvent{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
}
|
||||
|
||||
// Errorf logs and adds a trace event if channelz is on.
|
||||
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
|
||||
AddTraceEvent(l, e, 1, &TraceEvent{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
|
||||
119
vendor/google.golang.org/grpc/internal/channelz/server.go
generated
vendored
Normal file
119
vendor/google.golang.org/grpc/internal/channelz/server.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Server is the channelz representation of a server.
|
||||
type Server struct {
|
||||
Entity
|
||||
ID int64
|
||||
RefName string
|
||||
|
||||
ServerMetrics ServerMetrics
|
||||
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
listenSockets map[int64]string
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
// ServerMetrics defines a struct containing metrics for servers.
|
||||
type ServerMetrics struct {
|
||||
// The number of incoming calls started on the server.
|
||||
CallsStarted atomic.Int64
|
||||
// The number of incoming calls that have completed with an OK status.
|
||||
CallsSucceeded atomic.Int64
|
||||
// The number of incoming calls that have a completed with a non-OK status.
|
||||
CallsFailed atomic.Int64
|
||||
// The last time a call was started on the server.
|
||||
LastCallStartedTimestamp atomic.Int64
|
||||
}
|
||||
|
||||
// NewServerMetricsForTesting returns an initialized ServerMetrics.
|
||||
func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics {
|
||||
sm := &ServerMetrics{}
|
||||
sm.CallsStarted.Store(started)
|
||||
sm.CallsSucceeded.Store(succeeded)
|
||||
sm.CallsFailed.Store(failed)
|
||||
sm.LastCallStartedTimestamp.Store(timestamp)
|
||||
return sm
|
||||
}
|
||||
|
||||
func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) {
|
||||
sm.CallsStarted.Store(o.CallsStarted.Load())
|
||||
sm.CallsSucceeded.Store(o.CallsSucceeded.Load())
|
||||
sm.CallsFailed.Store(o.CallsFailed.Load())
|
||||
sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
|
||||
}
|
||||
|
||||
// ListenSockets returns the listening sockets for s.
|
||||
func (s *Server) ListenSockets() map[int64]string {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
return copyMap(s.listenSockets)
|
||||
}
|
||||
|
||||
// String returns a printable description of s.
|
||||
func (s *Server) String() string {
|
||||
return fmt.Sprintf("Server #%d", s.ID)
|
||||
}
|
||||
|
||||
func (s *Server) id() int64 {
|
||||
return s.ID
|
||||
}
|
||||
|
||||
func (s *Server) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *Socket:
|
||||
switch v.SocketType {
|
||||
case SocketTypeNormal:
|
||||
s.sockets[id] = v.RefName
|
||||
case SocketTypeListen:
|
||||
s.listenSockets[id] = v.RefName
|
||||
}
|
||||
default:
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) deleteChild(id int64) {
|
||||
delete(s.sockets, id)
|
||||
delete(s.listenSockets, id)
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *Server) triggerDelete() {
|
||||
s.closeCalled = true
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *Server) deleteSelfIfReady() {
|
||||
if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
|
||||
return
|
||||
}
|
||||
s.cm.deleteEntry(s.ID)
|
||||
}
|
||||
|
||||
func (s *Server) getParentID() int64 {
|
||||
return 0
|
||||
}
|
||||
130
vendor/google.golang.org/grpc/internal/channelz/socket.go
generated
vendored
Normal file
130
vendor/google.golang.org/grpc/internal/channelz/socket.go
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// SocketMetrics defines the struct that the implementor of Socket interface
|
||||
// should return from ChannelzMetric().
|
||||
type SocketMetrics struct {
|
||||
// The number of streams that have been started.
|
||||
StreamsStarted atomic.Int64
|
||||
// The number of streams that have ended successfully:
|
||||
// On client side, receiving frame with eos bit set.
|
||||
// On server side, sending frame with eos bit set.
|
||||
StreamsSucceeded atomic.Int64
|
||||
// The number of streams that have ended unsuccessfully:
|
||||
// On client side, termination without receiving frame with eos bit set.
|
||||
// On server side, termination without sending frame with eos bit set.
|
||||
StreamsFailed atomic.Int64
|
||||
// The number of messages successfully sent on this socket.
|
||||
MessagesSent atomic.Int64
|
||||
MessagesReceived atomic.Int64
|
||||
// The number of keep alives sent. This is typically implemented with HTTP/2
|
||||
// ping messages.
|
||||
KeepAlivesSent atomic.Int64
|
||||
// The last time a stream was created by this endpoint. Usually unset for
|
||||
// servers.
|
||||
LastLocalStreamCreatedTimestamp atomic.Int64
|
||||
// The last time a stream was created by the remote endpoint. Usually unset
|
||||
// for clients.
|
||||
LastRemoteStreamCreatedTimestamp atomic.Int64
|
||||
// The last time a message was sent by this endpoint.
|
||||
LastMessageSentTimestamp atomic.Int64
|
||||
// The last time a message was received by this endpoint.
|
||||
LastMessageReceivedTimestamp atomic.Int64
|
||||
}
|
||||
|
||||
// EphemeralSocketMetrics are metrics that change rapidly and are tracked
|
||||
// outside of channelz.
|
||||
type EphemeralSocketMetrics struct {
|
||||
// The amount of window, granted to the local endpoint by the remote endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
LocalFlowControlWindow int64
|
||||
// The amount of window, granted to the remote endpoint by the local endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
RemoteFlowControlWindow int64
|
||||
}
|
||||
|
||||
type SocketType string
|
||||
|
||||
const (
|
||||
SocketTypeNormal = "NormalSocket"
|
||||
SocketTypeListen = "ListenSocket"
|
||||
)
|
||||
|
||||
type Socket struct {
|
||||
Entity
|
||||
SocketType SocketType
|
||||
ID int64
|
||||
Parent Entity
|
||||
cm *channelMap
|
||||
SocketMetrics SocketMetrics
|
||||
EphemeralMetrics func() *EphemeralSocketMetrics
|
||||
|
||||
RefName string
|
||||
// The locally bound address. Immutable.
|
||||
LocalAddr net.Addr
|
||||
// The remote bound address. May be absent. Immutable.
|
||||
RemoteAddr net.Addr
|
||||
// Optional, represents the name of the remote endpoint, if different than
|
||||
// the original target name. Immutable.
|
||||
RemoteName string
|
||||
// Immutable.
|
||||
SocketOptions *SocketOptionData
|
||||
// Immutable.
|
||||
Security credentials.ChannelzSecurityValue
|
||||
}
|
||||
|
||||
func (ls *Socket) String() string {
|
||||
return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID)
|
||||
}
|
||||
|
||||
func (ls *Socket) id() int64 {
|
||||
return ls.ID
|
||||
}
|
||||
|
||||
func (ls *Socket) addChild(id int64, e entry) {
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
|
||||
}
|
||||
|
||||
func (ls *Socket) deleteChild(id int64) {
|
||||
logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
|
||||
}
|
||||
|
||||
func (ls *Socket) triggerDelete() {
|
||||
ls.cm.deleteEntry(ls.ID)
|
||||
ls.Parent.(entry).deleteChild(ls.ID)
|
||||
}
|
||||
|
||||
func (ls *Socket) deleteSelfIfReady() {
|
||||
logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||
}
|
||||
|
||||
func (ls *Socket) getParentID() int64 {
|
||||
return ls.Parent.id()
|
||||
}
|
||||
151
vendor/google.golang.org/grpc/internal/channelz/subchannel.go
generated
vendored
Normal file
151
vendor/google.golang.org/grpc/internal/channelz/subchannel.go
generated
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// SubChannel is the channelz representation of a subchannel.
|
||||
type SubChannel struct {
|
||||
Entity
|
||||
// ID is the channelz id of this subchannel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this subchannel.
|
||||
RefName string
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
parent *Channel
|
||||
trace *ChannelTrace
|
||||
traceRefCount int32
|
||||
|
||||
ChannelMetrics ChannelMetrics
|
||||
}
|
||||
|
||||
func (sc *SubChannel) String() string {
|
||||
return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID)
|
||||
}
|
||||
|
||||
func (sc *SubChannel) id() int64 {
|
||||
return sc.ID
|
||||
}
|
||||
|
||||
func (sc *SubChannel) Sockets() map[int64]string {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
return copyMap(sc.sockets)
|
||||
}
|
||||
|
||||
func (sc *SubChannel) Trace() *ChannelTrace {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
return sc.trace.copy()
|
||||
}
|
||||
|
||||
func (sc *SubChannel) addChild(id int64, e entry) {
|
||||
if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal {
|
||||
sc.sockets[id] = v.RefName
|
||||
} else {
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *SubChannel) deleteChild(id int64) {
|
||||
delete(sc.sockets, id)
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *SubChannel) triggerDelete() {
|
||||
sc.closeCalled = true
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *SubChannel) getParentID() int64 {
|
||||
return sc.parent.ID
|
||||
}
|
||||
|
||||
// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
|
||||
// means deleting the subchannel reference from its parent's child list.
|
||||
//
|
||||
// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
|
||||
// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
|
||||
//
|
||||
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||
func (sc *SubChannel) deleteSelfFromTree() (deleted bool) {
|
||||
if !sc.closeCalled || len(sc.sockets) != 0 {
|
||||
return false
|
||||
}
|
||||
sc.parent.deleteChild(sc.ID)
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
|
||||
// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
|
||||
// the subchannel, and its memory will be garbage collected.
|
||||
//
|
||||
// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
|
||||
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||
//
|
||||
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||
//
|
||||
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||
func (sc *SubChannel) deleteSelfFromMap() (delete bool) {
|
||||
return sc.getTraceRefCount() == 0
|
||||
}
|
||||
|
||||
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||
// its parent's child list.
|
||||
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||
// by id will return entry not found error.
|
||||
func (sc *SubChannel) deleteSelfIfReady() {
|
||||
if !sc.deleteSelfFromTree() {
|
||||
return
|
||||
}
|
||||
if !sc.deleteSelfFromMap() {
|
||||
return
|
||||
}
|
||||
db.deleteEntry(sc.ID)
|
||||
sc.trace.clear()
|
||||
}
|
||||
|
||||
func (sc *SubChannel) getChannelTrace() *ChannelTrace {
|
||||
return sc.trace
|
||||
}
|
||||
|
||||
func (sc *SubChannel) incrTraceRefCount() {
|
||||
atomic.AddInt32(&sc.traceRefCount, 1)
|
||||
}
|
||||
|
||||
func (sc *SubChannel) decrTraceRefCount() {
|
||||
atomic.AddInt32(&sc.traceRefCount, -1)
|
||||
}
|
||||
|
||||
func (sc *SubChannel) getTraceRefCount() int {
|
||||
i := atomic.LoadInt32(&sc.traceRefCount)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func (sc *SubChannel) getRefName() string {
|
||||
return sc.RefName
|
||||
}
|
||||
@@ -49,3 +49,17 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||
s.TCPInfo = v
|
||||
}
|
||||
}
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(socket any) *SocketOptionData {
|
||||
c, ok := socket.(syscall.Conn)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
data := &SocketOptionData{}
|
||||
if rawConn, err := c.SyscallConn(); err == nil {
|
||||
rawConn.Control(data.Getsockopt)
|
||||
return data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
*
|
||||
@@ -41,3 +40,8 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||
logger.Warning("Channelz: socket options are not supported on non-linux environments")
|
||||
})
|
||||
}
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(c any) *SocketOptionData {
|
||||
return nil
|
||||
}
|
||||
204
vendor/google.golang.org/grpc/internal/channelz/trace.go
generated
vendored
Normal file
204
vendor/google.golang.org/grpc/internal/channelz/trace.go
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxTraceEntry int32 = 30
|
||||
)
|
||||
|
||||
var maxTraceEntry = defaultMaxTraceEntry
|
||||
|
||||
// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e.
|
||||
// channel/subchannel). Setting it to 0 will disable channel tracing.
|
||||
func SetMaxTraceEntry(i int32) {
|
||||
atomic.StoreInt32(&maxTraceEntry, i)
|
||||
}
|
||||
|
||||
// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per
|
||||
// entity to default.
|
||||
func ResetMaxTraceEntryToDefault() {
|
||||
atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
|
||||
}
|
||||
|
||||
func getMaxTraceEntry() int {
|
||||
i := atomic.LoadInt32(&maxTraceEntry)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
// traceEvent is an internal representation of a single trace event
|
||||
type traceEvent struct {
|
||||
// Desc is a simple description of the trace event.
|
||||
Desc string
|
||||
// Severity states the severity of this trace event.
|
||||
Severity Severity
|
||||
// Timestamp is the event time.
|
||||
Timestamp time.Time
|
||||
// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
|
||||
// involved in this event.
|
||||
// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
|
||||
RefID int64
|
||||
// RefName is the reference name for the entity that gets referenced in the event.
|
||||
RefName string
|
||||
// RefType indicates the referenced entity type, i.e Channel or SubChannel.
|
||||
RefType RefChannelType
|
||||
}
|
||||
|
||||
// TraceEvent is what the caller of AddTraceEvent should provide to describe the
|
||||
// event to be added to the channel trace.
|
||||
//
|
||||
// The Parent field is optional. It is used for an event that will be recorded
|
||||
// in the entity's parent trace.
|
||||
type TraceEvent struct {
|
||||
Desc string
|
||||
Severity Severity
|
||||
Parent *TraceEvent
|
||||
}
|
||||
|
||||
type ChannelTrace struct {
|
||||
cm *channelMap
|
||||
clearCalled bool
|
||||
CreationTime time.Time
|
||||
EventNum int64
|
||||
mu sync.Mutex
|
||||
Events []*traceEvent
|
||||
}
|
||||
|
||||
func (c *ChannelTrace) copy() *ChannelTrace {
|
||||
return &ChannelTrace{
|
||||
CreationTime: c.CreationTime,
|
||||
EventNum: c.EventNum,
|
||||
Events: append(([]*traceEvent)(nil), c.Events...),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ChannelTrace) append(e *traceEvent) {
|
||||
c.mu.Lock()
|
||||
if len(c.Events) == getMaxTraceEntry() {
|
||||
del := c.Events[0]
|
||||
c.Events = c.Events[1:]
|
||||
if del.RefID != 0 {
|
||||
// start recursive cleanup in a goroutine to not block the call originated from grpc.
|
||||
go func() {
|
||||
// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
|
||||
c.cm.mu.Lock()
|
||||
c.cm.decrTraceRefCount(del.RefID)
|
||||
c.cm.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
}
|
||||
e.Timestamp = time.Now()
|
||||
c.Events = append(c.Events, e)
|
||||
c.EventNum++
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *ChannelTrace) clear() {
|
||||
if c.clearCalled {
|
||||
return
|
||||
}
|
||||
c.clearCalled = true
|
||||
c.mu.Lock()
|
||||
for _, e := range c.Events {
|
||||
if e.RefID != 0 {
|
||||
// caller should have already held the c.cm.mu lock.
|
||||
c.cm.decrTraceRefCount(e.RefID)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Severity is the severity level of a trace event.
|
||||
// The canonical enumeration of all valid values is here:
|
||||
// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
|
||||
type Severity int
|
||||
|
||||
const (
|
||||
// CtUnknown indicates unknown severity of a trace event.
|
||||
CtUnknown Severity = iota
|
||||
// CtInfo indicates info level severity of a trace event.
|
||||
CtInfo
|
||||
// CtWarning indicates warning level severity of a trace event.
|
||||
CtWarning
|
||||
// CtError indicates error level severity of a trace event.
|
||||
CtError
|
||||
)
|
||||
|
||||
// RefChannelType is the type of the entity being referenced in a trace event.
|
||||
type RefChannelType int
|
||||
|
||||
const (
|
||||
// RefUnknown indicates an unknown entity type, the zero value for this type.
|
||||
RefUnknown RefChannelType = iota
|
||||
// RefChannel indicates the referenced entity is a Channel.
|
||||
RefChannel
|
||||
// RefSubChannel indicates the referenced entity is a SubChannel.
|
||||
RefSubChannel
|
||||
// RefServer indicates the referenced entity is a Server.
|
||||
RefServer
|
||||
// RefListenSocket indicates the referenced entity is a ListenSocket.
|
||||
RefListenSocket
|
||||
// RefNormalSocket indicates the referenced entity is a NormalSocket.
|
||||
RefNormalSocket
|
||||
)
|
||||
|
||||
var refChannelTypeToString = map[RefChannelType]string{
|
||||
RefUnknown: "Unknown",
|
||||
RefChannel: "Channel",
|
||||
RefSubChannel: "SubChannel",
|
||||
RefServer: "Server",
|
||||
RefListenSocket: "ListenSocket",
|
||||
RefNormalSocket: "NormalSocket",
|
||||
}
|
||||
|
||||
func (r RefChannelType) String() string {
|
||||
return refChannelTypeToString[r]
|
||||
}
|
||||
|
||||
// AddTraceEvent adds trace related to the entity with specified id, using the
|
||||
// provided TraceEventDesc.
|
||||
//
|
||||
// If channelz is not turned ON, this will simply log the event descriptions.
|
||||
func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) {
|
||||
// Log only the trace description associated with the bottom most entity.
|
||||
d := fmt.Sprintf("[%s]%s", e, desc.Desc)
|
||||
switch desc.Severity {
|
||||
case CtUnknown, CtInfo:
|
||||
l.InfoDepth(depth+1, d)
|
||||
case CtWarning:
|
||||
l.WarningDepth(depth+1, d)
|
||||
case CtError:
|
||||
l.ErrorDepth(depth+1, d)
|
||||
}
|
||||
|
||||
if getMaxTraceEntry() == 0 {
|
||||
return
|
||||
}
|
||||
if IsOn() {
|
||||
db.traceEvent(e.id(), desc)
|
||||
}
|
||||
}
|
||||
727
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
727
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
@@ -1,727 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// entry represents a node in the channelz database.
|
||||
type entry interface {
|
||||
// addChild adds a child e, whose channelz id is id to child list
|
||||
addChild(id int64, e entry)
|
||||
// deleteChild deletes a child with channelz id to be id from child list
|
||||
deleteChild(id int64)
|
||||
// triggerDelete tries to delete self from channelz database. However, if child
|
||||
// list is not empty, then deletion from the database is on hold until the last
|
||||
// child is deleted from database.
|
||||
triggerDelete()
|
||||
// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
|
||||
// list is now empty. If both conditions are met, then delete self from database.
|
||||
deleteSelfIfReady()
|
||||
// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
|
||||
getParentID() int64
|
||||
}
|
||||
|
||||
// dummyEntry is a fake entry to handle entry not found case.
|
||||
type dummyEntry struct {
|
||||
idNotFound int64
|
||||
}
|
||||
|
||||
func (d *dummyEntry) addChild(id int64, e entry) {
|
||||
// Note: It is possible for a normal program to reach here under race condition.
|
||||
// For example, there could be a race between ClientConn.Close() info being propagated
|
||||
// to addrConn and http2Client. ClientConn.Close() cancel the context and result
|
||||
// in http2Client to error. The error info is then caught by transport monitor
|
||||
// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
|
||||
// the addrConn will create a new transport. And when registering the new transport in
|
||||
// channelz, its parent addrConn could have already been torn down and deleted
|
||||
// from channelz tracking, and thus reach the code here.
|
||||
logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) deleteChild(id int64) {
|
||||
// It is possible for a normal program to reach here under race condition.
|
||||
// Refer to the example described in addChild().
|
||||
logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) triggerDelete() {
|
||||
logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
|
||||
}
|
||||
|
||||
func (*dummyEntry) deleteSelfIfReady() {
|
||||
// code should not reach here. deleteSelfIfReady is always called on an existing entry.
|
||||
}
|
||||
|
||||
func (*dummyEntry) getParentID() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// ChannelMetric defines the info channelz provides for a specific Channel, which
|
||||
// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
type ChannelMetric struct {
|
||||
// ID is the channelz id of this channel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this channel.
|
||||
RefName string
|
||||
// ChannelData contains channel internal metric reported by the channel through
|
||||
// ChannelzMetric().
|
||||
ChannelData *ChannelInternalMetric
|
||||
// NestedChans tracks the nested channel type children of this channel in the format of
|
||||
// a map from nested channel channelz id to corresponding reference string.
|
||||
NestedChans map[int64]string
|
||||
// SubChans tracks the subchannel type children of this channel in the format of a
|
||||
// map from subchannel channelz id to corresponding reference string.
|
||||
SubChans map[int64]string
|
||||
// Sockets tracks the socket type children of this channel in the format of a map
|
||||
// from socket channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow channel having sockets directly,
|
||||
// therefore, this is field is unused.
|
||||
Sockets map[int64]string
|
||||
// Trace contains the most recent traced events.
|
||||
Trace *ChannelTrace
|
||||
}
|
||||
|
||||
// SubChannelMetric defines the info channelz provides for a specific SubChannel,
|
||||
// which includes ChannelInternalMetric and channelz-specific data, such as
|
||||
// channelz id, child list, etc.
|
||||
type SubChannelMetric struct {
|
||||
// ID is the channelz id of this subchannel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this subchannel.
|
||||
RefName string
|
||||
// ChannelData contains subchannel internal metric reported by the subchannel
|
||||
// through ChannelzMetric().
|
||||
ChannelData *ChannelInternalMetric
|
||||
// NestedChans tracks the nested channel type children of this subchannel in the format of
|
||||
// a map from nested channel channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow subchannel to have nested channels
|
||||
// as children, therefore, this field is unused.
|
||||
NestedChans map[int64]string
|
||||
// SubChans tracks the subchannel type children of this subchannel in the format of a
|
||||
// map from subchannel channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow subchannel to have subchannels
|
||||
// as children, therefore, this field is unused.
|
||||
SubChans map[int64]string
|
||||
// Sockets tracks the socket type children of this subchannel in the format of a map
|
||||
// from socket channelz id to corresponding reference string.
|
||||
Sockets map[int64]string
|
||||
// Trace contains the most recent traced events.
|
||||
Trace *ChannelTrace
|
||||
}
|
||||
|
||||
// ChannelInternalMetric defines the struct that the implementor of Channel interface
|
||||
// should return from ChannelzMetric().
|
||||
type ChannelInternalMetric struct {
|
||||
// current connectivity state of the channel.
|
||||
State connectivity.State
|
||||
// The target this channel originally tried to connect to. May be absent
|
||||
Target string
|
||||
// The number of calls started on the channel.
|
||||
CallsStarted int64
|
||||
// The number of calls that have completed with an OK status.
|
||||
CallsSucceeded int64
|
||||
// The number of calls that have a completed with a non-OK status.
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the channel.
|
||||
LastCallStartedTimestamp time.Time
|
||||
}
|
||||
|
||||
// ChannelTrace stores traced events on a channel/subchannel and related info.
|
||||
type ChannelTrace struct {
|
||||
// EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
|
||||
EventNum int64
|
||||
// CreationTime is the creation time of the trace.
|
||||
CreationTime time.Time
|
||||
// Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
|
||||
// oldest one)
|
||||
Events []*TraceEvent
|
||||
}
|
||||
|
||||
// TraceEvent represent a single trace event
|
||||
type TraceEvent struct {
|
||||
// Desc is a simple description of the trace event.
|
||||
Desc string
|
||||
// Severity states the severity of this trace event.
|
||||
Severity Severity
|
||||
// Timestamp is the event time.
|
||||
Timestamp time.Time
|
||||
// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
|
||||
// involved in this event.
|
||||
// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
|
||||
RefID int64
|
||||
// RefName is the reference name for the entity that gets referenced in the event.
|
||||
RefName string
|
||||
// RefType indicates the referenced entity type, i.e Channel or SubChannel.
|
||||
RefType RefChannelType
|
||||
}
|
||||
|
||||
// Channel is the interface that should be satisfied in order to be tracked by
|
||||
// channelz as Channel or SubChannel.
|
||||
type Channel interface {
|
||||
ChannelzMetric() *ChannelInternalMetric
|
||||
}
|
||||
|
||||
type dummyChannel struct{}
|
||||
|
||||
func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
|
||||
return &ChannelInternalMetric{}
|
||||
}
|
||||
|
||||
type channel struct {
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
nestedChans map[int64]string
|
||||
subChans map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
trace *channelTrace
|
||||
// traceRefCount is the number of trace events that reference this channel.
|
||||
// Non-zero traceRefCount means the trace of this channel cannot be deleted.
|
||||
traceRefCount int32
|
||||
}
|
||||
|
||||
func (c *channel) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *subChannel:
|
||||
c.subChans[id] = v.refName
|
||||
case *channel:
|
||||
c.nestedChans[id] = v.refName
|
||||
default:
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channel) deleteChild(id int64) {
|
||||
delete(c.subChans, id)
|
||||
delete(c.nestedChans, id)
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *channel) triggerDelete() {
|
||||
c.closeCalled = true
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *channel) getParentID() int64 {
|
||||
return c.pid
|
||||
}
|
||||
|
||||
// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
|
||||
// deleting the channel reference from its parent's child list.
|
||||
//
|
||||
// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
|
||||
// corresponding grpc object has been invoked, and the channel does not have any children left.
|
||||
//
|
||||
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||
func (c *channel) deleteSelfFromTree() (deleted bool) {
|
||||
if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
|
||||
return false
|
||||
}
|
||||
// not top channel
|
||||
if c.pid != 0 {
|
||||
c.cm.findEntry(c.pid).deleteChild(c.id)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
|
||||
// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
|
||||
// channel, and its memory will be garbage collected.
|
||||
//
|
||||
// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
|
||||
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||
//
|
||||
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||
//
|
||||
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||
func (c *channel) deleteSelfFromMap() (delete bool) {
|
||||
if c.getTraceRefCount() != 0 {
|
||||
c.c = &dummyChannel{}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||
// parent's child list.
|
||||
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||
// will return entry not found error.
|
||||
func (c *channel) deleteSelfIfReady() {
|
||||
if !c.deleteSelfFromTree() {
|
||||
return
|
||||
}
|
||||
if !c.deleteSelfFromMap() {
|
||||
return
|
||||
}
|
||||
c.cm.deleteEntry(c.id)
|
||||
c.trace.clear()
|
||||
}
|
||||
|
||||
func (c *channel) getChannelTrace() *channelTrace {
|
||||
return c.trace
|
||||
}
|
||||
|
||||
func (c *channel) incrTraceRefCount() {
|
||||
atomic.AddInt32(&c.traceRefCount, 1)
|
||||
}
|
||||
|
||||
func (c *channel) decrTraceRefCount() {
|
||||
atomic.AddInt32(&c.traceRefCount, -1)
|
||||
}
|
||||
|
||||
func (c *channel) getTraceRefCount() int {
|
||||
i := atomic.LoadInt32(&c.traceRefCount)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func (c *channel) getRefName() string {
|
||||
return c.refName
|
||||
}
|
||||
|
||||
type subChannel struct {
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
trace *channelTrace
|
||||
traceRefCount int32
|
||||
}
|
||||
|
||||
func (sc *subChannel) addChild(id int64, e entry) {
|
||||
if v, ok := e.(*normalSocket); ok {
|
||||
sc.sockets[id] = v.refName
|
||||
} else {
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *subChannel) deleteChild(id int64) {
|
||||
delete(sc.sockets, id)
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *subChannel) triggerDelete() {
|
||||
sc.closeCalled = true
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *subChannel) getParentID() int64 {
|
||||
return sc.pid
|
||||
}
|
||||
|
||||
// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
|
||||
// means deleting the subchannel reference from its parent's child list.
|
||||
//
|
||||
// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
|
||||
// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
|
||||
//
|
||||
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||
func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
|
||||
if !sc.closeCalled || len(sc.sockets) != 0 {
|
||||
return false
|
||||
}
|
||||
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
|
||||
// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
|
||||
// the subchannel, and its memory will be garbage collected.
|
||||
//
|
||||
// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
|
||||
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||
//
|
||||
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||
//
|
||||
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||
func (sc *subChannel) deleteSelfFromMap() (delete bool) {
|
||||
if sc.getTraceRefCount() != 0 {
|
||||
// free the grpc struct (i.e. addrConn)
|
||||
sc.c = &dummyChannel{}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||
// its parent's child list.
|
||||
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||
// by id will return entry not found error.
|
||||
func (sc *subChannel) deleteSelfIfReady() {
|
||||
if !sc.deleteSelfFromTree() {
|
||||
return
|
||||
}
|
||||
if !sc.deleteSelfFromMap() {
|
||||
return
|
||||
}
|
||||
sc.cm.deleteEntry(sc.id)
|
||||
sc.trace.clear()
|
||||
}
|
||||
|
||||
func (sc *subChannel) getChannelTrace() *channelTrace {
|
||||
return sc.trace
|
||||
}
|
||||
|
||||
func (sc *subChannel) incrTraceRefCount() {
|
||||
atomic.AddInt32(&sc.traceRefCount, 1)
|
||||
}
|
||||
|
||||
func (sc *subChannel) decrTraceRefCount() {
|
||||
atomic.AddInt32(&sc.traceRefCount, -1)
|
||||
}
|
||||
|
||||
func (sc *subChannel) getTraceRefCount() int {
|
||||
i := atomic.LoadInt32(&sc.traceRefCount)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func (sc *subChannel) getRefName() string {
|
||||
return sc.refName
|
||||
}
|
||||
|
||||
// SocketMetric defines the info channelz provides for a specific Socket, which
|
||||
// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
|
||||
type SocketMetric struct {
|
||||
// ID is the channelz id of this socket.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this socket.
|
||||
RefName string
|
||||
// SocketData contains socket internal metric reported by the socket through
|
||||
// ChannelzMetric().
|
||||
SocketData *SocketInternalMetric
|
||||
}
|
||||
|
||||
// SocketInternalMetric defines the struct that the implementor of Socket interface
|
||||
// should return from ChannelzMetric().
|
||||
type SocketInternalMetric struct {
|
||||
// The number of streams that have been started.
|
||||
StreamsStarted int64
|
||||
// The number of streams that have ended successfully:
|
||||
// On client side, receiving frame with eos bit set.
|
||||
// On server side, sending frame with eos bit set.
|
||||
StreamsSucceeded int64
|
||||
// The number of streams that have ended unsuccessfully:
|
||||
// On client side, termination without receiving frame with eos bit set.
|
||||
// On server side, termination without sending frame with eos bit set.
|
||||
StreamsFailed int64
|
||||
// The number of messages successfully sent on this socket.
|
||||
MessagesSent int64
|
||||
MessagesReceived int64
|
||||
// The number of keep alives sent. This is typically implemented with HTTP/2
|
||||
// ping messages.
|
||||
KeepAlivesSent int64
|
||||
// The last time a stream was created by this endpoint. Usually unset for
|
||||
// servers.
|
||||
LastLocalStreamCreatedTimestamp time.Time
|
||||
// The last time a stream was created by the remote endpoint. Usually unset
|
||||
// for clients.
|
||||
LastRemoteStreamCreatedTimestamp time.Time
|
||||
// The last time a message was sent by this endpoint.
|
||||
LastMessageSentTimestamp time.Time
|
||||
// The last time a message was received by this endpoint.
|
||||
LastMessageReceivedTimestamp time.Time
|
||||
// The amount of window, granted to the local endpoint by the remote endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
LocalFlowControlWindow int64
|
||||
// The amount of window, granted to the remote endpoint by the local endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
RemoteFlowControlWindow int64
|
||||
// The locally bound address.
|
||||
LocalAddr net.Addr
|
||||
// The remote bound address. May be absent.
|
||||
RemoteAddr net.Addr
|
||||
// Optional, represents the name of the remote endpoint, if different than
|
||||
// the original target name.
|
||||
RemoteName string
|
||||
SocketOptions *SocketOptionData
|
||||
Security credentials.ChannelzSecurityValue
|
||||
}
|
||||
|
||||
// Socket is the interface that should be satisfied in order to be tracked by
|
||||
// channelz as Socket.
|
||||
type Socket interface {
|
||||
ChannelzMetric() *SocketInternalMetric
|
||||
}
|
||||
|
||||
type listenSocket struct {
|
||||
refName string
|
||||
s Socket
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (ls *listenSocket) addChild(id int64, e entry) {
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteChild(id int64) {
|
||||
logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) triggerDelete() {
|
||||
ls.cm.deleteEntry(ls.id)
|
||||
ls.cm.findEntry(ls.pid).deleteChild(ls.id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteSelfIfReady() {
|
||||
logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||
}
|
||||
|
||||
func (ls *listenSocket) getParentID() int64 {
|
||||
return ls.pid
|
||||
}
|
||||
|
||||
type normalSocket struct {
|
||||
refName string
|
||||
s Socket
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (ns *normalSocket) addChild(id int64, e entry) {
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteChild(id int64) {
|
||||
logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) triggerDelete() {
|
||||
ns.cm.deleteEntry(ns.id)
|
||||
ns.cm.findEntry(ns.pid).deleteChild(ns.id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteSelfIfReady() {
|
||||
logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||
}
|
||||
|
||||
func (ns *normalSocket) getParentID() int64 {
|
||||
return ns.pid
|
||||
}
|
||||
|
||||
// ServerMetric defines the info channelz provides for a specific Server, which
|
||||
// includes ServerInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
type ServerMetric struct {
|
||||
// ID is the channelz id of this server.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this server.
|
||||
RefName string
|
||||
// ServerData contains server internal metric reported by the server through
|
||||
// ChannelzMetric().
|
||||
ServerData *ServerInternalMetric
|
||||
// ListenSockets tracks the listener socket type children of this server in the
|
||||
// format of a map from socket channelz id to corresponding reference string.
|
||||
ListenSockets map[int64]string
|
||||
}
|
||||
|
||||
// ServerInternalMetric defines the struct that the implementor of Server interface
|
||||
// should return from ChannelzMetric().
|
||||
type ServerInternalMetric struct {
|
||||
// The number of incoming calls started on the server.
|
||||
CallsStarted int64
|
||||
// The number of incoming calls that have completed with an OK status.
|
||||
CallsSucceeded int64
|
||||
// The number of incoming calls that have a completed with a non-OK status.
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the server.
|
||||
LastCallStartedTimestamp time.Time
|
||||
}
|
||||
|
||||
// Server is the interface to be satisfied in order to be tracked by channelz as
|
||||
// Server.
|
||||
type Server interface {
|
||||
ChannelzMetric() *ServerInternalMetric
|
||||
}
|
||||
|
||||
type server struct {
|
||||
refName string
|
||||
s Server
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
listenSockets map[int64]string
|
||||
id int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (s *server) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *normalSocket:
|
||||
s.sockets[id] = v.refName
|
||||
case *listenSocket:
|
||||
s.listenSockets[id] = v.refName
|
||||
default:
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) deleteChild(id int64) {
|
||||
delete(s.sockets, id)
|
||||
delete(s.listenSockets, id)
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *server) triggerDelete() {
|
||||
s.closeCalled = true
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *server) deleteSelfIfReady() {
|
||||
if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
|
||||
return
|
||||
}
|
||||
s.cm.deleteEntry(s.id)
|
||||
}
|
||||
|
||||
func (s *server) getParentID() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type tracedChannel interface {
|
||||
getChannelTrace() *channelTrace
|
||||
incrTraceRefCount()
|
||||
decrTraceRefCount()
|
||||
getRefName() string
|
||||
}
|
||||
|
||||
type channelTrace struct {
|
||||
cm *channelMap
|
||||
clearCalled bool
|
||||
createdTime time.Time
|
||||
eventCount int64
|
||||
mu sync.Mutex
|
||||
events []*TraceEvent
|
||||
}
|
||||
|
||||
func (c *channelTrace) append(e *TraceEvent) {
|
||||
c.mu.Lock()
|
||||
if len(c.events) == getMaxTraceEntry() {
|
||||
del := c.events[0]
|
||||
c.events = c.events[1:]
|
||||
if del.RefID != 0 {
|
||||
// start recursive cleanup in a goroutine to not block the call originated from grpc.
|
||||
go func() {
|
||||
// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
|
||||
c.cm.mu.Lock()
|
||||
c.cm.decrTraceRefCount(del.RefID)
|
||||
c.cm.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
}
|
||||
e.Timestamp = time.Now()
|
||||
c.events = append(c.events, e)
|
||||
c.eventCount++
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelTrace) clear() {
|
||||
if c.clearCalled {
|
||||
return
|
||||
}
|
||||
c.clearCalled = true
|
||||
c.mu.Lock()
|
||||
for _, e := range c.events {
|
||||
if e.RefID != 0 {
|
||||
// caller should have already held the c.cm.mu lock.
|
||||
c.cm.decrTraceRefCount(e.RefID)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Severity is the severity level of a trace event.
|
||||
// The canonical enumeration of all valid values is here:
|
||||
// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
|
||||
type Severity int
|
||||
|
||||
const (
|
||||
// CtUnknown indicates unknown severity of a trace event.
|
||||
CtUnknown Severity = iota
|
||||
// CtInfo indicates info level severity of a trace event.
|
||||
CtInfo
|
||||
// CtWarning indicates warning level severity of a trace event.
|
||||
CtWarning
|
||||
// CtError indicates error level severity of a trace event.
|
||||
CtError
|
||||
)
|
||||
|
||||
// RefChannelType is the type of the entity being referenced in a trace event.
|
||||
type RefChannelType int
|
||||
|
||||
const (
|
||||
// RefUnknown indicates an unknown entity type, the zero value for this type.
|
||||
RefUnknown RefChannelType = iota
|
||||
// RefChannel indicates the referenced entity is a Channel.
|
||||
RefChannel
|
||||
// RefSubChannel indicates the referenced entity is a SubChannel.
|
||||
RefSubChannel
|
||||
// RefServer indicates the referenced entity is a Server.
|
||||
RefServer
|
||||
// RefListenSocket indicates the referenced entity is a ListenSocket.
|
||||
RefListenSocket
|
||||
// RefNormalSocket indicates the referenced entity is a NormalSocket.
|
||||
RefNormalSocket
|
||||
)
|
||||
|
||||
var refChannelTypeToString = map[RefChannelType]string{
|
||||
RefUnknown: "Unknown",
|
||||
RefChannel: "Channel",
|
||||
RefSubChannel: "SubChannel",
|
||||
RefServer: "Server",
|
||||
RefListenSocket: "ListenSocket",
|
||||
RefNormalSocket: "NormalSocket",
|
||||
}
|
||||
|
||||
func (r RefChannelType) String() string {
|
||||
return refChannelTypeToString[r]
|
||||
}
|
||||
|
||||
func (c *channelTrace) dumpData() *ChannelTrace {
|
||||
c.mu.Lock()
|
||||
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
|
||||
ct.Events = c.events[:len(c.events)]
|
||||
c.mu.Unlock()
|
||||
return ct
|
||||
}
|
||||
12
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
12
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
@@ -28,17 +28,11 @@ import (
|
||||
var (
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
|
||||
// AdvertiseCompressors is set if registered compressor should be advertised
|
||||
// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
|
||||
AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
|
||||
// RingHashCap indicates the maximum ring size which defaults to 4096
|
||||
// entries but may be overridden by setting the environment variable
|
||||
// "GRPC_RING_HASH_CAP". This does not override the default bounds
|
||||
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
||||
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
||||
// PickFirstLBConfig is set if we should support configuration of the
|
||||
// pick_first LB policy.
|
||||
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true)
|
||||
// LeastRequestLB is set if we should support the least_request_experimental
|
||||
// LB policy, which can be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
|
||||
@@ -46,6 +40,12 @@ var (
|
||||
// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
|
||||
// handshakes that can be performed.
|
||||
ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
|
||||
// EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
|
||||
// should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
|
||||
// option is present for backward compatibility. This option may be overridden
|
||||
// by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
|
||||
// or "false".
|
||||
EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false)
|
||||
)
|
||||
|
||||
func boolFromEnv(envVar string, def bool) bool {
|
||||
|
||||
39
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
39
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
@@ -50,46 +50,7 @@ var (
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
|
||||
// XDSRingHash indicates whether ring hash support is enabled, which can be
|
||||
// disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
||||
XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
|
||||
// XDSClientSideSecurity is used to control processing of security
|
||||
// configuration on the client-side.
|
||||
//
|
||||
// Note that there is no env var protection for the server-side because we
|
||||
// have a brand new API on the server-side and users explicitly need to use
|
||||
// the new API to get security integration on the server.
|
||||
XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
|
||||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster and
|
||||
// DNS cluster is enabled, which can be disabled by setting the environment
|
||||
// variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||
// to "false".
|
||||
XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
|
||||
|
||||
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||||
// which can be disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
||||
XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
|
||||
// XDSOutlierDetection indicates whether outlier detection support is
|
||||
// enabled, which can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
|
||||
XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
|
||||
// XDSFederation indicates whether federation support is enabled, which can
|
||||
// be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
|
||||
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
|
||||
|
||||
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||||
// support for the RLS CLuster Specifier is enabled, which can be disabled by
|
||||
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||||
// "false".
|
||||
XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true)
|
||||
|
||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
|
||||
// XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which
|
||||
// can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false".
|
||||
XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true)
|
||||
)
|
||||
|
||||
28
vendor/google.golang.org/grpc/internal/experimental.go
generated
vendored
Normal file
28
vendor/google.golang.org/grpc/internal/experimental.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
var (
|
||||
// WithRecvBufferPool is implemented by the grpc package and returns a dial
|
||||
// option to configure a shared buffer pool for a grpc.ClientConn.
|
||||
WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
|
||||
|
||||
// RecvBufferPool is implemented by the grpc package and returns a server
|
||||
// option to configure a shared buffer pool for a grpc.Server.
|
||||
RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
|
||||
)
|
||||
95
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
95
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
@@ -1,95 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpcrand implements math/rand functions in a concurrent-safe way
|
||||
// with a global random source, independent of math/rand's global source.
|
||||
package grpcrand
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
r = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
// Int implements rand.Int on the grpcrand global source.
|
||||
func Int() int {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Int()
|
||||
}
|
||||
|
||||
// Int63n implements rand.Int63n on the grpcrand global source.
|
||||
func Int63n(n int64) int64 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Int63n(n)
|
||||
}
|
||||
|
||||
// Intn implements rand.Intn on the grpcrand global source.
|
||||
func Intn(n int) int {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Intn(n)
|
||||
}
|
||||
|
||||
// Int31n implements rand.Int31n on the grpcrand global source.
|
||||
func Int31n(n int32) int32 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Int31n(n)
|
||||
}
|
||||
|
||||
// Float64 implements rand.Float64 on the grpcrand global source.
|
||||
func Float64() float64 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Float64()
|
||||
}
|
||||
|
||||
// Uint64 implements rand.Uint64 on the grpcrand global source.
|
||||
func Uint64() uint64 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Uint64()
|
||||
}
|
||||
|
||||
// Uint32 implements rand.Uint32 on the grpcrand global source.
|
||||
func Uint32() uint32 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Uint32()
|
||||
}
|
||||
|
||||
// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
|
||||
func ExpFloat64() float64 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.ExpFloat64()
|
||||
}
|
||||
|
||||
// Shuffle implements rand.Shuffle on the grpcrand global source.
|
||||
var Shuffle = func(n int, f func(int, int)) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
r.Shuffle(n, f)
|
||||
}
|
||||
51
vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
generated
vendored
51
vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
generated
vendored
@@ -20,7 +20,6 @@ package grpcsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
)
|
||||
@@ -38,8 +37,6 @@ type CallbackSerializer struct {
|
||||
done chan struct{}
|
||||
|
||||
callbacks *buffer.Unbounded
|
||||
closedMu sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
|
||||
@@ -65,56 +62,34 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
|
||||
// callbacks to be executed by the serializer. It is not possible to add
|
||||
// callbacks once the context passed to NewCallbackSerializer is cancelled.
|
||||
func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
|
||||
cs.closedMu.Lock()
|
||||
defer cs.closedMu.Unlock()
|
||||
|
||||
if cs.closed {
|
||||
return false
|
||||
}
|
||||
cs.callbacks.Put(f)
|
||||
return true
|
||||
return cs.callbacks.Put(f) == nil
|
||||
}
|
||||
|
||||
func (cs *CallbackSerializer) run(ctx context.Context) {
|
||||
var backlog []func(context.Context)
|
||||
|
||||
defer close(cs.done)
|
||||
|
||||
// TODO: when Go 1.21 is the oldest supported version, this loop and Close
|
||||
// can be replaced with:
|
||||
//
|
||||
// context.AfterFunc(ctx, cs.callbacks.Close)
|
||||
for ctx.Err() == nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Do nothing here. Next iteration of the for loop will not happen,
|
||||
// since ctx.Err() would be non-nil.
|
||||
case callback, ok := <-cs.callbacks.Get():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
case cb := <-cs.callbacks.Get():
|
||||
cs.callbacks.Load()
|
||||
callback.(func(ctx context.Context))(ctx)
|
||||
cb.(func(context.Context))(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch pending callbacks if any, and execute them before returning from
|
||||
// this method and closing cs.done.
|
||||
cs.closedMu.Lock()
|
||||
cs.closed = true
|
||||
backlog = cs.fetchPendingCallbacks()
|
||||
// Close the buffer to prevent new callbacks from being added.
|
||||
cs.callbacks.Close()
|
||||
cs.closedMu.Unlock()
|
||||
for _, b := range backlog {
|
||||
b(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
|
||||
var backlog []func(context.Context)
|
||||
for {
|
||||
select {
|
||||
case b := <-cs.callbacks.Get():
|
||||
backlog = append(backlog, b.(func(context.Context)))
|
||||
cs.callbacks.Load()
|
||||
default:
|
||||
return backlog
|
||||
}
|
||||
// Run all pending callbacks.
|
||||
for cb := range cs.callbacks.Get() {
|
||||
cs.callbacks.Load()
|
||||
cb.(func(context.Context))(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
5
vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
generated
vendored
5
vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
generated
vendored
@@ -20,8 +20,6 @@ package grpcutil
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
)
|
||||
|
||||
// RegisteredCompressorNames holds names of the registered compressors.
|
||||
@@ -40,8 +38,5 @@ func IsCompressorNameRegistered(name string) bool {
|
||||
// RegisteredCompressors returns a string of registered compressor names
|
||||
// separated by comma.
|
||||
func RegisteredCompressors() string {
|
||||
if !envconfig.AdvertiseCompressors {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(RegisteredCompressorNames, ",")
|
||||
}
|
||||
|
||||
177
vendor/google.golang.org/grpc/internal/idle/idle.go
generated
vendored
177
vendor/google.golang.org/grpc/internal/idle/idle.go
generated
vendored
@@ -26,8 +26,6 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// For overriding in unit tests.
|
||||
@@ -39,27 +37,12 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
|
||||
// and exit from idle mode.
|
||||
type Enforcer interface {
|
||||
ExitIdleMode() error
|
||||
EnterIdleMode() error
|
||||
EnterIdleMode()
|
||||
}
|
||||
|
||||
// Manager defines the functionality required to track RPC activity on a
|
||||
// channel.
|
||||
type Manager interface {
|
||||
OnCallBegin() error
|
||||
OnCallEnd()
|
||||
Close()
|
||||
}
|
||||
|
||||
type noopManager struct{}
|
||||
|
||||
func (noopManager) OnCallBegin() error { return nil }
|
||||
func (noopManager) OnCallEnd() {}
|
||||
func (noopManager) Close() {}
|
||||
|
||||
// manager implements the Manager interface. It uses atomic operations to
|
||||
// synchronize access to shared state and a mutex to guarantee mutual exclusion
|
||||
// in a critical section.
|
||||
type manager struct {
|
||||
// Manager implements idleness detection and calls the configured Enforcer to
|
||||
// enter/exit idle mode when appropriate. Must be created by NewManager.
|
||||
type Manager struct {
|
||||
// State accessed atomically.
|
||||
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
|
||||
activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
|
||||
@@ -69,8 +52,7 @@ type manager struct {
|
||||
// Can be accessed without atomics or mutex since these are set at creation
|
||||
// time and read-only after that.
|
||||
enforcer Enforcer // Functionality provided by grpc.ClientConn.
|
||||
timeout int64 // Idle timeout duration nanos stored as an int64.
|
||||
logger grpclog.LoggerV2
|
||||
timeout time.Duration
|
||||
|
||||
// idleMu is used to guarantee mutual exclusion in two scenarios:
|
||||
// - Opposing intentions:
|
||||
@@ -88,57 +70,48 @@ type manager struct {
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// ManagerOptions is a collection of options used by
|
||||
// NewManager.
|
||||
type ManagerOptions struct {
|
||||
Enforcer Enforcer
|
||||
Timeout time.Duration
|
||||
Logger grpclog.LoggerV2
|
||||
}
|
||||
|
||||
// NewManager creates a new idleness manager implementation for the
|
||||
// given idle timeout.
|
||||
func NewManager(opts ManagerOptions) Manager {
|
||||
if opts.Timeout == 0 {
|
||||
return noopManager{}
|
||||
// given idle timeout. It begins in idle mode.
|
||||
func NewManager(enforcer Enforcer, timeout time.Duration) *Manager {
|
||||
return &Manager{
|
||||
enforcer: enforcer,
|
||||
timeout: timeout,
|
||||
actuallyIdle: true,
|
||||
activeCallsCount: -math.MaxInt32,
|
||||
}
|
||||
|
||||
m := &manager{
|
||||
enforcer: opts.Enforcer,
|
||||
timeout: int64(opts.Timeout),
|
||||
logger: opts.Logger,
|
||||
}
|
||||
m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout)
|
||||
return m
|
||||
}
|
||||
|
||||
// resetIdleTimer resets the idle timer to the given duration. This method
|
||||
// should only be called from the timer callback.
|
||||
func (m *manager) resetIdleTimer(d time.Duration) {
|
||||
m.idleMu.Lock()
|
||||
defer m.idleMu.Unlock()
|
||||
|
||||
if m.timer == nil {
|
||||
// Only close sets timer to nil. We are done.
|
||||
// resetIdleTimerLocked resets the idle timer to the given duration. Called
|
||||
// when exiting idle mode or when the timer fires and we need to reset it.
|
||||
func (m *Manager) resetIdleTimerLocked(d time.Duration) {
|
||||
if m.isClosed() || m.timeout == 0 || m.actuallyIdle {
|
||||
return
|
||||
}
|
||||
|
||||
// It is safe to ignore the return value from Reset() because this method is
|
||||
// only ever called from the timer callback, which means the timer has
|
||||
// already fired.
|
||||
m.timer.Reset(d)
|
||||
// only ever called from the timer callback or when exiting idle mode.
|
||||
if m.timer != nil {
|
||||
m.timer.Stop()
|
||||
}
|
||||
m.timer = timeAfterFunc(d, m.handleIdleTimeout)
|
||||
}
|
||||
|
||||
func (m *Manager) resetIdleTimer(d time.Duration) {
|
||||
m.idleMu.Lock()
|
||||
defer m.idleMu.Unlock()
|
||||
m.resetIdleTimerLocked(d)
|
||||
}
|
||||
|
||||
// handleIdleTimeout is the timer callback that is invoked upon expiry of the
|
||||
// configured idle timeout. The channel is considered inactive if there are no
|
||||
// ongoing calls and no RPC activity since the last time the timer fired.
|
||||
func (m *manager) handleIdleTimeout() {
|
||||
func (m *Manager) handleIdleTimeout() {
|
||||
if m.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&m.activeCallsCount) > 0 {
|
||||
m.resetIdleTimer(time.Duration(m.timeout))
|
||||
m.resetIdleTimer(m.timeout)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -148,24 +121,12 @@ func (m *manager) handleIdleTimeout() {
|
||||
// Set the timer to fire after a duration of idle timeout, calculated
|
||||
// from the time the most recent RPC completed.
|
||||
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0)
|
||||
m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano()))
|
||||
m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout)
|
||||
return
|
||||
}
|
||||
|
||||
// This CAS operation is extremely likely to succeed given that there has
|
||||
// been no activity since the last time we were here. Setting the
|
||||
// activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the
|
||||
// channel is either in idle mode or is trying to get there.
|
||||
if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
|
||||
// This CAS operation can fail if an RPC started after we checked for
|
||||
// activity at the top of this method, or one was ongoing from before
|
||||
// the last time we were here. In both case, reset the timer and return.
|
||||
m.resetIdleTimer(time.Duration(m.timeout))
|
||||
return
|
||||
}
|
||||
|
||||
// Now that we've set the active calls count to -math.MaxInt32, it's time to
|
||||
// actually move to idle mode.
|
||||
// Now that we've checked that there has been no activity, attempt to enter
|
||||
// idle mode, which is very likely to succeed.
|
||||
if m.tryEnterIdleMode() {
|
||||
// Successfully entered idle mode. No timer needed until we exit idle.
|
||||
return
|
||||
@@ -174,8 +135,7 @@ func (m *manager) handleIdleTimeout() {
|
||||
// Failed to enter idle mode due to a concurrent RPC that kept the channel
|
||||
// active, or because of an error from the channel. Undo the attempt to
|
||||
// enter idle, and reset the timer to try again later.
|
||||
atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
|
||||
m.resetIdleTimer(time.Duration(m.timeout))
|
||||
m.resetIdleTimer(m.timeout)
|
||||
}
|
||||
|
||||
// tryEnterIdleMode instructs the channel to enter idle mode. But before
|
||||
@@ -185,36 +145,49 @@ func (m *manager) handleIdleTimeout() {
|
||||
// Return value indicates whether or not the channel moved to idle mode.
|
||||
//
|
||||
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
|
||||
func (m *manager) tryEnterIdleMode() bool {
|
||||
func (m *Manager) tryEnterIdleMode() bool {
|
||||
// Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin()
|
||||
// that the channel is either in idle mode or is trying to get there.
|
||||
if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
|
||||
// This CAS operation can fail if an RPC started after we checked for
|
||||
// activity in the timer handler, or one was ongoing from before the
|
||||
// last time the timer fired, or if a test is attempting to enter idle
|
||||
// mode without checking. In all cases, abort going into idle mode.
|
||||
return false
|
||||
}
|
||||
// N.B. if we fail to enter idle mode after this, we must re-add
|
||||
// math.MaxInt32 to m.activeCallsCount.
|
||||
|
||||
m.idleMu.Lock()
|
||||
defer m.idleMu.Unlock()
|
||||
|
||||
if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 {
|
||||
// We raced and lost to a new RPC. Very rare, but stop entering idle.
|
||||
atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
|
||||
return false
|
||||
}
|
||||
if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
|
||||
// An very short RPC could have come in (and also finished) after we
|
||||
// A very short RPC could have come in (and also finished) after we
|
||||
// checked for calls count and activity in handleIdleTimeout(), but
|
||||
// before the CAS operation. So, we need to check for activity again.
|
||||
atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
|
||||
return false
|
||||
}
|
||||
|
||||
// No new RPCs have come in since we last set the active calls count value
|
||||
// -math.MaxInt32 in the timer callback. And since we have the lock, it is
|
||||
// safe to enter idle mode now.
|
||||
if err := m.enforcer.EnterIdleMode(); err != nil {
|
||||
m.logger.Errorf("Failed to enter idle mode: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Successfully entered idle mode.
|
||||
// No new RPCs have come in since we set the active calls count value to
|
||||
// -math.MaxInt32. And since we have the lock, it is safe to enter idle mode
|
||||
// unconditionally now.
|
||||
m.enforcer.EnterIdleMode()
|
||||
m.actuallyIdle = true
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Manager) EnterIdleModeForTesting() {
|
||||
m.tryEnterIdleMode()
|
||||
}
|
||||
|
||||
// OnCallBegin is invoked at the start of every RPC.
|
||||
func (m *manager) OnCallBegin() error {
|
||||
func (m *Manager) OnCallBegin() error {
|
||||
if m.isClosed() {
|
||||
return nil
|
||||
}
|
||||
@@ -227,7 +200,7 @@ func (m *manager) OnCallBegin() error {
|
||||
|
||||
// Channel is either in idle mode or is in the process of moving to idle
|
||||
// mode. Attempt to exit idle mode to allow this RPC.
|
||||
if err := m.exitIdleMode(); err != nil {
|
||||
if err := m.ExitIdleMode(); err != nil {
|
||||
// Undo the increment to calls count, and return an error causing the
|
||||
// RPC to fail.
|
||||
atomic.AddInt32(&m.activeCallsCount, -1)
|
||||
@@ -238,28 +211,30 @@ func (m *manager) OnCallBegin() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// exitIdleMode instructs the channel to exit idle mode.
|
||||
//
|
||||
// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
|
||||
func (m *manager) exitIdleMode() error {
|
||||
// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's
|
||||
// internal state.
|
||||
func (m *Manager) ExitIdleMode() error {
|
||||
// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
|
||||
m.idleMu.Lock()
|
||||
defer m.idleMu.Unlock()
|
||||
|
||||
if !m.actuallyIdle {
|
||||
// This can happen in two scenarios:
|
||||
if m.isClosed() || !m.actuallyIdle {
|
||||
// This can happen in three scenarios:
|
||||
// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
|
||||
// tryEnterIdleMode(). But before the latter could grab the lock, an RPC
|
||||
// came in and OnCallBegin() noticed that the calls count is negative.
|
||||
// - Channel is in idle mode, and multiple new RPCs come in at the same
|
||||
// time, all of them notice a negative calls count in OnCallBegin and get
|
||||
// here. The first one to get the lock would got the channel to exit idle.
|
||||
// - Channel is not in idle mode, and the user calls Connect which calls
|
||||
// m.ExitIdleMode.
|
||||
//
|
||||
// Either way, nothing to do here.
|
||||
// In any case, there is nothing to do here.
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := m.enforcer.ExitIdleMode(); err != nil {
|
||||
return fmt.Errorf("channel failed to exit idle mode: %v", err)
|
||||
return fmt.Errorf("failed to exit idle mode: %w", err)
|
||||
}
|
||||
|
||||
// Undo the idle entry process. This also respects any new RPC attempts.
|
||||
@@ -267,12 +242,12 @@ func (m *manager) exitIdleMode() error {
|
||||
m.actuallyIdle = false
|
||||
|
||||
// Start a new timer to fire after the configured idle timeout.
|
||||
m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout)
|
||||
m.resetIdleTimerLocked(m.timeout)
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnCallEnd is invoked at the end of every RPC.
|
||||
func (m *manager) OnCallEnd() {
|
||||
func (m *Manager) OnCallEnd() {
|
||||
if m.isClosed() {
|
||||
return
|
||||
}
|
||||
@@ -287,15 +262,17 @@ func (m *manager) OnCallEnd() {
|
||||
atomic.AddInt32(&m.activeCallsCount, -1)
|
||||
}
|
||||
|
||||
func (m *manager) isClosed() bool {
|
||||
func (m *Manager) isClosed() bool {
|
||||
return atomic.LoadInt32(&m.closed) == 1
|
||||
}
|
||||
|
||||
func (m *manager) Close() {
|
||||
func (m *Manager) Close() {
|
||||
atomic.StoreInt32(&m.closed, 1)
|
||||
|
||||
m.idleMu.Lock()
|
||||
m.timer.Stop()
|
||||
m.timer = nil
|
||||
if m.timer != nil {
|
||||
m.timer.Stop()
|
||||
m.timer = nil
|
||||
}
|
||||
m.idleMu.Unlock()
|
||||
}
|
||||
|
||||
46
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
46
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
@@ -57,7 +57,7 @@ var (
|
||||
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
|
||||
// stored in the passed in attributes. This is set by
|
||||
// credentials/xds/xds.go.
|
||||
GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo
|
||||
GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer
|
||||
// GetServerCredentials returns the transport credentials configured on a
|
||||
// gRPC server. An xDS-enabled server needs to know what type of credentials
|
||||
// is configured on the underlying gRPC server. This is set by server.go.
|
||||
@@ -68,11 +68,11 @@ var (
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
CanonicalString any // func (codes.Code) string
|
||||
// DrainServerTransports initiates a graceful close of existing connections
|
||||
// on a gRPC server accepted on the provided listener address. An
|
||||
// xDS-enabled server invokes this method on a grpc.Server when a particular
|
||||
// listener moves to "not-serving" mode.
|
||||
DrainServerTransports any // func(*grpc.Server, string)
|
||||
// IsRegisteredMethod returns whether the passed in method is registered as
|
||||
// a method on the server.
|
||||
IsRegisteredMethod any // func(*grpc.Server, string) bool
|
||||
// ServerFromContext returns the server from the context.
|
||||
ServerFromContext any // func(context.Context) *grpc.Server
|
||||
// AddGlobalServerOptions adds an array of ServerOption that will be
|
||||
// effective globally for newly created servers. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
@@ -106,6 +106,14 @@ var (
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ClearGlobalDialOptions func()
|
||||
|
||||
// AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be
|
||||
// configured for newly created ClientConns.
|
||||
AddGlobalPerTargetDialOptions any // func (opt any)
|
||||
// ClearGlobalPerTargetDialOptions clears the slice of global late apply
|
||||
// dial options.
|
||||
ClearGlobalPerTargetDialOptions func()
|
||||
|
||||
// JoinDialOptions combines the dial options passed as arguments into a
|
||||
// single dial option.
|
||||
JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
|
||||
@@ -126,7 +134,8 @@ var (
|
||||
// deleted or changed.
|
||||
BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
|
||||
|
||||
// SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn
|
||||
// SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a
|
||||
// provided grpc.ClientConn.
|
||||
SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
|
||||
|
||||
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
|
||||
@@ -177,13 +186,32 @@ var (
|
||||
GRPCResolverSchemeExtraMetadata string = "xds"
|
||||
|
||||
// EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
|
||||
EnterIdleModeForTesting any // func(*grpc.ClientConn) error
|
||||
EnterIdleModeForTesting any // func(*grpc.ClientConn)
|
||||
|
||||
// ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
|
||||
ExitIdleModeForTesting any // func(*grpc.ClientConn) error
|
||||
|
||||
ChannelzTurnOffForTesting func()
|
||||
|
||||
// TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
|
||||
// invoke resource-not-found error for the given resource type and name.
|
||||
TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error
|
||||
|
||||
// FromOutgoingContextRaw returns the un-merged, intermediary contents of
|
||||
// metadata.rawMD.
|
||||
FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
|
||||
|
||||
// UserSetDefaultScheme is set to true if the user has overridden the
|
||||
// default resolver scheme.
|
||||
UserSetDefaultScheme bool = false
|
||||
|
||||
// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
|
||||
// is the number of elements. swap swaps the elements with indexes i and j.
|
||||
ShuffleAddressListForTesting any // func(n int, swap func(i, j int))
|
||||
)
|
||||
|
||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||
// HealthChecker defines the signature of the client-side LB channel health
|
||||
// checking function.
|
||||
//
|
||||
// The implementation is expected to create a health checking RPC stream by
|
||||
// calling newStream(), watch for the health status of serviceName, and report
|
||||
|
||||
35
vendor/google.golang.org/grpc/internal/pretty/pretty.go
generated
vendored
35
vendor/google.golang.org/grpc/internal/pretty/pretty.go
generated
vendored
@@ -24,10 +24,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
protov1 "github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
protov2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/protoadapt"
|
||||
)
|
||||
|
||||
const jsonIndent = " "
|
||||
@@ -36,21 +34,14 @@ const jsonIndent = " "
|
||||
//
|
||||
// If marshal fails, it falls back to fmt.Sprintf("%+v").
|
||||
func ToJSON(e any) string {
|
||||
switch ee := e.(type) {
|
||||
case protov1.Message:
|
||||
mm := jsonpb.Marshaler{Indent: jsonIndent}
|
||||
ret, err := mm.MarshalToString(ee)
|
||||
if err != nil {
|
||||
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
|
||||
// messages are not imported, and this will fail because the message
|
||||
// is not found.
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return ret
|
||||
case protov2.Message:
|
||||
if ee, ok := e.(protoadapt.MessageV1); ok {
|
||||
e = protoadapt.MessageV2Of(ee)
|
||||
}
|
||||
|
||||
if ee, ok := e.(protoadapt.MessageV2); ok {
|
||||
mm := protojson.MarshalOptions{
|
||||
Multiline: true,
|
||||
Indent: jsonIndent,
|
||||
Multiline: true,
|
||||
}
|
||||
ret, err := mm.Marshal(ee)
|
||||
if err != nil {
|
||||
@@ -60,13 +51,13 @@ func ToJSON(e any) string {
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return string(ret)
|
||||
default:
|
||||
ret, err := json.MarshalIndent(ee, "", jsonIndent)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return string(ret)
|
||||
}
|
||||
|
||||
ret, err := json.MarshalIndent(e, "", jsonIndent)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("%+v", e)
|
||||
}
|
||||
return string(ret)
|
||||
}
|
||||
|
||||
// FormatJSON formats the input json bytes with indentation.
|
||||
|
||||
116
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
116
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
@@ -23,8 +23,8 @@ package dns
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
@@ -36,26 +36,37 @@ import (
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/resolver/dns/internal"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
|
||||
// addresses from SRV records. Must not be changed after init time.
|
||||
var EnableSRVLookups = false
|
||||
|
||||
var logger = grpclog.Component("dns")
|
||||
|
||||
// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
|
||||
// single variable for testing the resolver?
|
||||
var (
|
||||
newTimer = time.NewTimer
|
||||
newTimerDNSResRate = time.NewTimer
|
||||
// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
|
||||
// addresses from SRV records. Must not be changed after init time.
|
||||
EnableSRVLookups = false
|
||||
|
||||
// MinResolutionInterval is the minimum interval at which re-resolutions are
|
||||
// allowed. This helps to prevent excessive re-resolution.
|
||||
MinResolutionInterval = 30 * time.Second
|
||||
|
||||
// ResolvingTimeout specifies the maximum duration for a DNS resolution request.
|
||||
// If the timeout expires before a response is received, the request will be canceled.
|
||||
//
|
||||
// It is recommended to set this value at application startup. Avoid modifying this variable
|
||||
// after initialization as it's not thread-safe for concurrent modification.
|
||||
ResolvingTimeout = 30 * time.Second
|
||||
|
||||
logger = grpclog.Component("dns")
|
||||
)
|
||||
|
||||
func init() {
|
||||
resolver.Register(NewBuilder())
|
||||
internal.TimeAfterFunc = time.After
|
||||
internal.TimeNowFunc = time.Now
|
||||
internal.TimeUntilFunc = time.Until
|
||||
internal.NewNetResolver = newNetResolver
|
||||
internal.AddressDialer = addressDialer
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -70,23 +81,6 @@ const (
|
||||
txtAttribute = "grpc_config="
|
||||
)
|
||||
|
||||
var (
|
||||
errMissingAddr = errors.New("dns resolver: missing address")
|
||||
|
||||
// Addresses ending with a colon that is supposed to be the separator
|
||||
// between host and port is not allowed. E.g. "::" is a valid address as
|
||||
// it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
|
||||
// a colon as the host and port separator
|
||||
errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
|
||||
)
|
||||
|
||||
var (
|
||||
defaultResolver netResolver = net.DefaultResolver
|
||||
// To prevent excessive re-resolution, we enforce a rate limit on DNS
|
||||
// resolution requests.
|
||||
minDNSResRate = 30 * time.Second
|
||||
)
|
||||
|
||||
var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
|
||||
return func(ctx context.Context, network, _ string) (net.Conn, error) {
|
||||
var dialer net.Dialer
|
||||
@@ -94,7 +88,11 @@ var addressDialer = func(address string) func(context.Context, string, string) (
|
||||
}
|
||||
}
|
||||
|
||||
var newNetResolver = func(authority string) (netResolver, error) {
|
||||
var newNetResolver = func(authority string) (internal.NetResolver, error) {
|
||||
if authority == "" {
|
||||
return net.DefaultResolver, nil
|
||||
}
|
||||
|
||||
host, port, err := parseTarget(authority, defaultDNSSvrPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -104,7 +102,7 @@ var newNetResolver = func(authority string) (netResolver, error) {
|
||||
|
||||
return &net.Resolver{
|
||||
PreferGo: true,
|
||||
Dial: addressDialer(authorityWithPort),
|
||||
Dial: internal.AddressDialer(authorityWithPort),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -142,13 +140,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
||||
disableServiceConfig: opts.DisableServiceConfig,
|
||||
}
|
||||
|
||||
if target.URL.Host == "" {
|
||||
d.resolver = defaultResolver
|
||||
} else {
|
||||
d.resolver, err = newNetResolver(target.URL.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.resolver, err = internal.NewNetResolver(target.URL.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d.wg.Add(1)
|
||||
@@ -161,12 +155,6 @@ func (b *dnsBuilder) Scheme() string {
|
||||
return "dns"
|
||||
}
|
||||
|
||||
type netResolver interface {
|
||||
LookupHost(ctx context.Context, host string) (addrs []string, err error)
|
||||
LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
|
||||
LookupTXT(ctx context.Context, name string) (txts []string, err error)
|
||||
}
|
||||
|
||||
// deadResolver is a resolver that does nothing.
|
||||
type deadResolver struct{}
|
||||
|
||||
@@ -178,7 +166,7 @@ func (deadResolver) Close() {}
|
||||
type dnsResolver struct {
|
||||
host string
|
||||
port string
|
||||
resolver netResolver
|
||||
resolver internal.NetResolver
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cc resolver.ClientConn
|
||||
@@ -223,45 +211,43 @@ func (d *dnsResolver) watcher() {
|
||||
err = d.cc.UpdateState(*state)
|
||||
}
|
||||
|
||||
var timer *time.Timer
|
||||
var nextResolutionTime time.Time
|
||||
if err == nil {
|
||||
// Success resolving, wait for the next ResolveNow. However, also wait 30
|
||||
// seconds at the very least to prevent constantly re-resolving.
|
||||
backoffIndex = 1
|
||||
timer = newTimerDNSResRate(minDNSResRate)
|
||||
nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval)
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-d.rn:
|
||||
}
|
||||
} else {
|
||||
// Poll on an error found in DNS Resolver or an error received from
|
||||
// ClientConn.
|
||||
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
|
||||
nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex))
|
||||
backoffIndex++
|
||||
}
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
||||
func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) {
|
||||
if !EnableSRVLookups {
|
||||
return nil, nil
|
||||
}
|
||||
var newAddrs []resolver.Address
|
||||
_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
|
||||
_, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host)
|
||||
if err != nil {
|
||||
err = handleDNSError(err, "SRV") // may become nil
|
||||
return nil, err
|
||||
}
|
||||
for _, s := range srvs {
|
||||
lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
|
||||
lbAddrs, err := d.resolver.LookupHost(ctx, s.Target)
|
||||
if err != nil {
|
||||
err = handleDNSError(err, "A") // may become nil
|
||||
if err == nil {
|
||||
@@ -298,8 +284,8 @@ func handleDNSError(err error, lookupType string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
||||
ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
|
||||
func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult {
|
||||
ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host)
|
||||
if err != nil {
|
||||
if envconfig.TXTErrIgnore {
|
||||
return nil
|
||||
@@ -326,8 +312,8 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
||||
return d.cc.ParseServiceConfig(sc)
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
|
||||
addrs, err := d.resolver.LookupHost(d.ctx, d.host)
|
||||
func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) {
|
||||
addrs, err := d.resolver.LookupHost(ctx, d.host)
|
||||
if err != nil {
|
||||
err = handleDNSError(err, "A")
|
||||
return nil, err
|
||||
@@ -345,8 +331,10 @@ func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookup() (*resolver.State, error) {
|
||||
srv, srvErr := d.lookupSRV()
|
||||
addrs, hostErr := d.lookupHost()
|
||||
ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout)
|
||||
defer cancel()
|
||||
srv, srvErr := d.lookupSRV(ctx)
|
||||
addrs, hostErr := d.lookupHost(ctx)
|
||||
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
|
||||
return nil, hostErr
|
||||
}
|
||||
@@ -356,7 +344,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
|
||||
state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
|
||||
}
|
||||
if !d.disableServiceConfig {
|
||||
state.ServiceConfig = d.lookupTXT()
|
||||
state.ServiceConfig = d.lookupTXT(ctx)
|
||||
}
|
||||
return &state, nil
|
||||
}
|
||||
@@ -387,7 +375,7 @@ func formatIP(addr string) (addrIP string, ok bool) {
|
||||
// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
|
||||
func parseTarget(target, defaultPort string) (host, port string, err error) {
|
||||
if target == "" {
|
||||
return "", "", errMissingAddr
|
||||
return "", "", internal.ErrMissingAddr
|
||||
}
|
||||
if ip := net.ParseIP(target); ip != nil {
|
||||
// target is an IPv4 or IPv6(without brackets) address
|
||||
@@ -397,7 +385,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
|
||||
if port == "" {
|
||||
// If the port field is empty (target ends with colon), e.g. "[::1]:",
|
||||
// this is an error.
|
||||
return "", "", errEndsWithColon
|
||||
return "", "", internal.ErrEndsWithColon
|
||||
}
|
||||
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
||||
if host == "" {
|
||||
@@ -437,7 +425,7 @@ func chosenByPercentage(a *int) bool {
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
return grpcrand.Intn(100)+1 <= *a
|
||||
return rand.Intn(100)+1 <= *a
|
||||
}
|
||||
|
||||
func canaryingSC(js string) string {
|
||||
|
||||
77
vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
generated
vendored
Normal file
77
vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package internal contains functionality internal to the dns resolver package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NetResolver groups the methods on net.Resolver that are used by the DNS
|
||||
// resolver implementation. This allows the default net.Resolver instance to be
|
||||
// overridden from tests.
|
||||
type NetResolver interface {
|
||||
LookupHost(ctx context.Context, host string) (addrs []string, err error)
|
||||
LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
|
||||
LookupTXT(ctx context.Context, name string) (txts []string, err error)
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrMissingAddr is the error returned when building a DNS resolver when
|
||||
// the provided target name is empty.
|
||||
ErrMissingAddr = errors.New("dns resolver: missing address")
|
||||
|
||||
// ErrEndsWithColon is the error returned when building a DNS resolver when
|
||||
// the provided target name ends with a colon that is supposed to be the
|
||||
// separator between host and port. E.g. "::" is a valid address as it is
|
||||
// an IPv6 address (host only) and "[::]:" is invalid as it ends with a
|
||||
// colon as the host and port separator
|
||||
ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
|
||||
)
|
||||
|
||||
// The following vars are overridden from tests.
|
||||
var (
|
||||
// TimeAfterFunc is used by the DNS resolver to wait for the given duration
|
||||
// to elapse. In non-test code, this is implemented by time.After. In test
|
||||
// code, this can be used to control the amount of time the resolver is
|
||||
// blocked waiting for the duration to elapse.
|
||||
TimeAfterFunc func(time.Duration) <-chan time.Time
|
||||
|
||||
// TimeNowFunc is used by the DNS resolver to get the current time.
|
||||
// In non-test code, this is implemented by time.Now. In test code,
|
||||
// this can be used to control the current time for the resolver.
|
||||
TimeNowFunc func() time.Time
|
||||
|
||||
// TimeUntilFunc is used by the DNS resolver to calculate the remaining
|
||||
// wait time for re-resolution. In non-test code, this is implemented by
|
||||
// time.Until. In test code, this can be used to control the remaining
|
||||
// time for resolver to wait for re-resolution.
|
||||
TimeUntilFunc func(time.Time) time.Duration
|
||||
|
||||
// NewNetResolver returns the net.Resolver instance for the given target.
|
||||
NewNetResolver func(string) (NetResolver, error)
|
||||
|
||||
// AddressDialer is the dialer used to dial the DNS server. It accepts the
|
||||
// Host portion of the URL corresponding to the user's dial target and
|
||||
// returns a dial function.
|
||||
AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error)
|
||||
)
|
||||
4
vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
generated
vendored
@@ -61,6 +61,10 @@ func (b *builder) Scheme() string {
|
||||
return b.scheme
|
||||
}
|
||||
|
||||
func (b *builder) OverrideAuthority(resolver.Target) string {
|
||||
return "localhost"
|
||||
}
|
||||
|
||||
type nopResolver struct {
|
||||
}
|
||||
|
||||
|
||||
15
vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
15
vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
@@ -31,10 +31,11 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/protoadapt"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
// Status represents an RPC status code, message, and details. It is immutable
|
||||
@@ -130,14 +131,14 @@ func (s *Status) Err() error {
|
||||
|
||||
// WithDetails returns a new status with the provided details messages appended to the status.
|
||||
// If any errors are encountered, it returns nil and the first error encountered.
|
||||
func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
|
||||
func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
|
||||
if s.Code() == codes.OK {
|
||||
return nil, errors.New("no error details for status with code OK")
|
||||
}
|
||||
// s.Code() != OK implies that s.Proto() != nil.
|
||||
p := s.Proto()
|
||||
for _, detail := range details {
|
||||
any, err := ptypes.MarshalAny(detail)
|
||||
any, err := anypb.New(protoadapt.MessageV2Of(detail))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -154,12 +155,12 @@ func (s *Status) Details() []any {
|
||||
}
|
||||
details := make([]any, 0, len(s.s.Details))
|
||||
for _, any := range s.s.Details {
|
||||
detail := &ptypes.DynamicAny{}
|
||||
if err := ptypes.UnmarshalAny(any, detail); err != nil {
|
||||
detail, err := any.UnmarshalNew()
|
||||
if err != nil {
|
||||
details = append(details, err)
|
||||
continue
|
||||
}
|
||||
details = append(details, detail.Message)
|
||||
details = append(details, detail)
|
||||
}
|
||||
return details
|
||||
}
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
//go:build !unix && !windows
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -19,9 +17,13 @@
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
package internal
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(c any) *SocketOptionData {
|
||||
return nil
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms.
|
||||
func NetDialerWithTCPKeepalive() *net.Dialer {
|
||||
return &net.Dialer{}
|
||||
}
|
||||
54
vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
generated
vendored
Normal file
54
vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
//go:build unix
|
||||
|
||||
/*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"net"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
|
||||
// the underlying connection with OS default values for keepalive parameters.
|
||||
//
|
||||
// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
|
||||
// appropriate Go version becomes less than our least supported Go version, we
|
||||
// should look into using the new API to make things more straightforward.
|
||||
func NetDialerWithTCPKeepalive() *net.Dialer {
|
||||
return &net.Dialer{
|
||||
// Setting a negative value here prevents the Go stdlib from overriding
|
||||
// the values of TCP keepalive time and interval. It also prevents the
|
||||
// Go stdlib from enabling TCP keepalives by default.
|
||||
KeepAlive: time.Duration(-1),
|
||||
// This method is called after the underlying network socket is created,
|
||||
// but before dialing the socket (or calling its connect() method). The
|
||||
// combination of unconditionally enabling TCP keepalives here, and
|
||||
// disabling the overriding of TCP keepalive parameters by setting the
|
||||
// KeepAlive field to a negative value above, results in OS defaults for
|
||||
// the TCP keealive interval and time parameters.
|
||||
Control: func(_, _ string, c syscall.RawConn) error {
|
||||
return c.Control(func(fd uintptr) {
|
||||
unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
|
||||
})
|
||||
},
|
||||
}
|
||||
}
|
||||
54
vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
generated
vendored
Normal file
54
vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
//go:build windows
|
||||
|
||||
/*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"net"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
|
||||
// the underlying connection with OS default values for keepalive parameters.
|
||||
//
|
||||
// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
|
||||
// appropriate Go version becomes less than our least supported Go version, we
|
||||
// should look into using the new API to make things more straightforward.
|
||||
func NetDialerWithTCPKeepalive() *net.Dialer {
|
||||
return &net.Dialer{
|
||||
// Setting a negative value here prevents the Go stdlib from overriding
|
||||
// the values of TCP keepalive time and interval. It also prevents the
|
||||
// Go stdlib from enabling TCP keepalives by default.
|
||||
KeepAlive: time.Duration(-1),
|
||||
// This method is called after the underlying network socket is created,
|
||||
// but before dialing the socket (or calling its connect() method). The
|
||||
// combination of unconditionally enabling TCP keepalives here, and
|
||||
// disabling the overriding of TCP keepalive parameters by setting the
|
||||
// KeepAlive field to a negative value above, results in OS defaults for
|
||||
// the TCP keealive interval and time parameters.
|
||||
Control: func(_, _ string, c syscall.RawConn) error {
|
||||
return c.Control(func(fd uintptr) {
|
||||
windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
|
||||
})
|
||||
},
|
||||
}
|
||||
}
|
||||
38
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
38
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
@@ -193,7 +193,7 @@ type goAway struct {
|
||||
code http2.ErrCode
|
||||
debugData []byte
|
||||
headsUp bool
|
||||
closeConn error // if set, loopyWriter will exit, resulting in conn closure
|
||||
closeConn error // if set, loopyWriter will exit with this error
|
||||
}
|
||||
|
||||
func (*goAway) isTransportResponseFrame() bool { return false }
|
||||
@@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) {
|
||||
func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
|
||||
var wakeUp bool
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
@@ -344,7 +344,7 @@ func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, err
|
||||
return false, c.err
|
||||
}
|
||||
if f != nil {
|
||||
if !f(it) { // f wasn't successful
|
||||
if !f() { // f wasn't successful
|
||||
c.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
@@ -495,21 +495,22 @@ type loopyWriter struct {
|
||||
ssGoAwayHandler func(*goAway) (bool, error)
|
||||
}
|
||||
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter {
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter {
|
||||
var buf bytes.Buffer
|
||||
l := &loopyWriter{
|
||||
side: s,
|
||||
cbuf: cbuf,
|
||||
sendQuota: defaultWindowSize,
|
||||
oiws: defaultWindowSize,
|
||||
estdStreams: make(map[uint32]*outStream),
|
||||
activeStreams: newOutStreamList(),
|
||||
framer: fr,
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
bdpEst: bdpEst,
|
||||
conn: conn,
|
||||
logger: logger,
|
||||
side: s,
|
||||
cbuf: cbuf,
|
||||
sendQuota: defaultWindowSize,
|
||||
oiws: defaultWindowSize,
|
||||
estdStreams: make(map[uint32]*outStream),
|
||||
activeStreams: newOutStreamList(),
|
||||
framer: fr,
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
bdpEst: bdpEst,
|
||||
conn: conn,
|
||||
logger: logger,
|
||||
ssGoAwayHandler: goAwayHandler,
|
||||
}
|
||||
return l
|
||||
}
|
||||
@@ -535,8 +536,8 @@ const minBatchSize = 1000
|
||||
// size is too low to give stream goroutines a chance to fill it up.
|
||||
//
|
||||
// Upon exiting, if the error causing the exit is not an I/O error, run()
|
||||
// flushes and closes the underlying connection. Otherwise, the connection is
|
||||
// left open to allow the I/O error to be encountered by the reader instead.
|
||||
// flushes the underlying connection. The connection is always left open to
|
||||
// allow different closing behavior on the client and server.
|
||||
func (l *loopyWriter) run() (err error) {
|
||||
defer func() {
|
||||
if l.logger.V(logLevel) {
|
||||
@@ -544,7 +545,6 @@ func (l *loopyWriter) run() (err error) {
|
||||
}
|
||||
if !isIOError(err) {
|
||||
l.framer.writer.Flush()
|
||||
l.conn.Close()
|
||||
}
|
||||
l.cbuf.finish()
|
||||
}()
|
||||
|
||||
84
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
84
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
@@ -35,7 +35,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
@@ -45,20 +44,17 @@ import (
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
|
||||
// inside an http.Handler, or writes an HTTP error to w and returns an error.
|
||||
// It requires that the http Server supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
msg := "gRPC requires HTTP/2"
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
if r.Method != http.MethodPost {
|
||||
w.Header().Set("Allow", http.MethodPost)
|
||||
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
http.Error(w, msg, http.StatusMethodNotAllowed)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
@@ -69,17 +65,36 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
|
||||
http.Error(w, msg, http.StatusUnsupportedMediaType)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if r.ProtoMajor != 2 {
|
||||
msg := "gRPC requires HTTP/2"
|
||||
http.Error(w, msg, http.StatusHTTPVersionNotSupported)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
msg := "gRPC requires a ResponseWriter supporting http.Flusher"
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
var localAddr net.Addr
|
||||
if la := r.Context().Value(http.LocalAddrContextKey); la != nil {
|
||||
localAddr, _ = la.(net.Addr)
|
||||
}
|
||||
var authInfo credentials.AuthInfo
|
||||
if r.TLS != nil {
|
||||
authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
|
||||
}
|
||||
p := peer.Peer{
|
||||
Addr: strAddr(r.RemoteAddr),
|
||||
LocalAddr: localAddr,
|
||||
AuthInfo: authInfo,
|
||||
}
|
||||
st := &serverHandlerTransport{
|
||||
rw: w,
|
||||
req: r,
|
||||
closedCh: make(chan struct{}),
|
||||
writes: make(chan func()),
|
||||
peer: p,
|
||||
contentType: contentType,
|
||||
contentSubtype: contentSubtype,
|
||||
stats: stats,
|
||||
@@ -134,6 +149,8 @@ type serverHandlerTransport struct {
|
||||
|
||||
headerMD metadata.MD
|
||||
|
||||
peer peer.Peer
|
||||
|
||||
closeOnce sync.Once
|
||||
closedCh chan struct{} // closed on Close
|
||||
|
||||
@@ -165,7 +182,13 @@ func (ht *serverHandlerTransport) Close(err error) {
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
||||
func (ht *serverHandlerTransport) Peer() *peer.Peer {
|
||||
return &peer.Peer{
|
||||
Addr: ht.peer.Addr,
|
||||
LocalAddr: ht.peer.LocalAddr,
|
||||
AuthInfo: ht.peer.AuthInfo,
|
||||
}
|
||||
}
|
||||
|
||||
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
||||
// the empty string if unknown.
|
||||
@@ -347,10 +370,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
|
||||
func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) {
|
||||
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||
|
||||
ctx := ht.req.Context()
|
||||
var cancel context.CancelFunc
|
||||
if ht.timeoutSet {
|
||||
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
|
||||
@@ -370,34 +391,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
|
||||
ht.Close(errors.New("request is done processing"))
|
||||
}()
|
||||
|
||||
req := ht.req
|
||||
|
||||
s := &Stream{
|
||||
id: 0, // irrelevant
|
||||
requestRead: func(int) {},
|
||||
cancel: cancel,
|
||||
buf: newRecvBuffer(),
|
||||
st: ht,
|
||||
method: req.URL.Path,
|
||||
recvCompress: req.Header.Get("grpc-encoding"),
|
||||
contentSubtype: ht.contentSubtype,
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: ht.RemoteAddr(),
|
||||
}
|
||||
if req.TLS != nil {
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
|
||||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
for _, sh := range ht.stats {
|
||||
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: ht.RemoteAddr(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
req := ht.req
|
||||
s := &Stream{
|
||||
id: 0, // irrelevant
|
||||
ctx: ctx,
|
||||
requestRead: func(int) {},
|
||||
cancel: cancel,
|
||||
buf: newRecvBuffer(),
|
||||
st: ht,
|
||||
method: req.URL.Path,
|
||||
recvCompress: req.Header.Get("grpc-encoding"),
|
||||
contentSubtype: ht.contentSubtype,
|
||||
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
|
||||
}
|
||||
s.trReader = &transportReader{
|
||||
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
|
||||
|
||||
168
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
168
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
@@ -36,6 +36,7 @@ import (
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
icredentials "google.golang.org/grpc/internal/credentials"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
@@ -43,7 +44,7 @@ import (
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
imetadata "google.golang.org/grpc/internal/metadata"
|
||||
istatus "google.golang.org/grpc/internal/status"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
isyscall "google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/internal/transport/networktype"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
@@ -58,6 +59,8 @@ import (
|
||||
// atomically.
|
||||
var clientConnectionCounter uint64
|
||||
|
||||
var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
|
||||
|
||||
// http2Client implements the ClientTransport interface with HTTP2.
|
||||
type http2Client struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
@@ -111,11 +114,11 @@ type http2Client struct {
|
||||
streamQuota int64
|
||||
streamsQuotaAvailable chan struct{}
|
||||
waitingStreams uint32
|
||||
nextID uint32
|
||||
registeredCompressors string
|
||||
|
||||
// Do not access controlBuf with mu held.
|
||||
mu sync.Mutex // guard the following variables
|
||||
nextID uint32
|
||||
state transportState
|
||||
activeStreams map[uint32]*Stream
|
||||
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
|
||||
@@ -137,9 +140,7 @@ type http2Client struct {
|
||||
// variable.
|
||||
kpDormant bool
|
||||
|
||||
// Fields below are for channelz metric collection.
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
channelz *channelz.Socket
|
||||
|
||||
onClose func(GoAwayReason)
|
||||
|
||||
@@ -176,7 +177,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
|
||||
if networkType == "tcp" && useProxy {
|
||||
return proxyDial(ctx, address, grpcUA)
|
||||
}
|
||||
return (&net.Dialer{}).DialContext(ctx, networkType, address)
|
||||
return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address)
|
||||
}
|
||||
|
||||
func isTemporary(err error) bool {
|
||||
@@ -262,7 +263,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
}
|
||||
keepaliveEnabled := false
|
||||
if kp.Time != infinity {
|
||||
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
|
||||
if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
||||
}
|
||||
keepaliveEnabled = true
|
||||
@@ -316,6 +317,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
if opts.MaxHeaderListSize != nil {
|
||||
maxHeaderListSize = *opts.MaxHeaderListSize
|
||||
}
|
||||
|
||||
t := &http2Client{
|
||||
ctx: ctx,
|
||||
ctxDone: ctx.Done(), // Cache Done chan.
|
||||
@@ -343,11 +345,25 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
maxConcurrentStreams: defaultMaxStreamsClient,
|
||||
streamQuota: defaultMaxStreamsClient,
|
||||
streamsQuotaAvailable: make(chan struct{}, 1),
|
||||
czData: new(channelzData),
|
||||
keepaliveEnabled: keepaliveEnabled,
|
||||
bufferPool: newBufferPool(),
|
||||
onClose: onClose,
|
||||
}
|
||||
var czSecurity credentials.ChannelzSecurityValue
|
||||
if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
|
||||
czSecurity = au.GetSecurityValue()
|
||||
}
|
||||
t.channelz = channelz.RegisterSocket(
|
||||
&channelz.Socket{
|
||||
SocketType: channelz.SocketTypeNormal,
|
||||
Parent: opts.ChannelzParent,
|
||||
SocketMetrics: channelz.SocketMetrics{},
|
||||
EphemeralMetrics: t.socketMetrics,
|
||||
LocalAddr: t.localAddr,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
SocketOptions: channelz.GetSocketOption(t.conn),
|
||||
Security: czSecurity,
|
||||
})
|
||||
t.logger = prefixLoggerForClientTransport(t)
|
||||
// Add peer information to the http2client context.
|
||||
t.ctx = peer.NewContext(t.ctx, t.getPeer())
|
||||
@@ -378,10 +394,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
}
|
||||
sh.HandleConn(t.ctx, connBegin)
|
||||
}
|
||||
t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t.keepaliveEnabled {
|
||||
t.kpDormancyCond = sync.NewCond(&t.mu)
|
||||
go t.keepalive()
|
||||
@@ -396,10 +408,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
readerErrCh := make(chan error, 1)
|
||||
go t.reader(readerErrCh)
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = <-readerErrCh
|
||||
}
|
||||
if err != nil {
|
||||
// writerDone should be closed since the loopy goroutine
|
||||
// wouldn't have started in the case this function returns an error.
|
||||
close(t.writerDone)
|
||||
t.Close(err)
|
||||
}
|
||||
}()
|
||||
@@ -446,9 +458,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
if err := t.framer.writer.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Block until the server preface is received successfully or an error occurs.
|
||||
if err = <-readerErrCh; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
|
||||
t.loopy.run()
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
|
||||
if err := t.loopy.run(); !isIOError(err) {
|
||||
// Immediately close the connection, as the loopy writer returns
|
||||
// when there are no more active streams and we were draining (the
|
||||
// server sent a GOAWAY). For I/O errors, the reader will hit it
|
||||
// after draining any remaining incoming data.
|
||||
t.conn.Close()
|
||||
}
|
||||
close(t.writerDone)
|
||||
}()
|
||||
return t, nil
|
||||
@@ -493,11 +515,23 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
|
||||
func (t *http2Client) getPeer() *peer.Peer {
|
||||
return &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
AuthInfo: t.authInfo, // Can be nil
|
||||
Addr: t.remoteAddr,
|
||||
AuthInfo: t.authInfo, // Can be nil
|
||||
LocalAddr: t.localAddr,
|
||||
}
|
||||
}
|
||||
|
||||
// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway
|
||||
// to be the last frame loopy writes to the transport.
|
||||
func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, g.closeConn
|
||||
}
|
||||
|
||||
func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
|
||||
aud := t.createAudience(callHdr)
|
||||
ri := credentials.RequestInfo{
|
||||
@@ -566,7 +600,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
|
||||
}
|
||||
|
||||
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
||||
if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
|
||||
var k string
|
||||
for k, vv := range md {
|
||||
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
|
||||
@@ -746,8 +780,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
return ErrConnClosing
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||||
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
|
||||
t.channelz.SocketMetrics.StreamsStarted.Add(1)
|
||||
t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano())
|
||||
}
|
||||
// If the keepalive goroutine has gone dormant, wake it up.
|
||||
if t.kpDormant {
|
||||
@@ -762,7 +796,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
firstTry := true
|
||||
var ch chan struct{}
|
||||
transportDrainRequired := false
|
||||
checkForStreamQuota := func(it any) bool {
|
||||
checkForStreamQuota := func() bool {
|
||||
if t.streamQuota <= 0 { // Can go negative if server decreases it.
|
||||
if firstTry {
|
||||
t.waitingStreams++
|
||||
@@ -774,23 +808,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
t.waitingStreams--
|
||||
}
|
||||
t.streamQuota--
|
||||
h := it.(*headerFrame)
|
||||
h.streamID = t.nextID
|
||||
t.nextID += 2
|
||||
|
||||
// Drain client transport if nextID > MaxStreamID which signals gRPC that
|
||||
// the connection is closed and a new one must be created for subsequent RPCs.
|
||||
transportDrainRequired = t.nextID > MaxStreamID
|
||||
|
||||
s.id = h.streamID
|
||||
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
|
||||
t.mu.Lock()
|
||||
if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
|
||||
t.mu.Unlock()
|
||||
return false // Don't create a stream if the transport is already closed.
|
||||
}
|
||||
|
||||
hdr.streamID = t.nextID
|
||||
t.nextID += 2
|
||||
// Drain client transport if nextID > MaxStreamID which signals gRPC that
|
||||
// the connection is closed and a new one must be created for subsequent RPCs.
|
||||
transportDrainRequired = t.nextID > MaxStreamID
|
||||
|
||||
s.id = hdr.streamID
|
||||
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
|
||||
t.activeStreams[s.id] = s
|
||||
t.mu.Unlock()
|
||||
|
||||
if t.streamQuota > 0 && t.waitingStreams > 0 {
|
||||
select {
|
||||
case t.streamsQuotaAvailable <- struct{}{}:
|
||||
@@ -800,13 +835,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
return true
|
||||
}
|
||||
var hdrListSizeErr error
|
||||
checkForHeaderListSize := func(it any) bool {
|
||||
checkForHeaderListSize := func() bool {
|
||||
if t.maxSendHeaderListSize == nil {
|
||||
return true
|
||||
}
|
||||
hdrFrame := it.(*headerFrame)
|
||||
var sz int64
|
||||
for _, f := range hdrFrame.hf {
|
||||
for _, f := range hdr.hf {
|
||||
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
|
||||
hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
|
||||
return false
|
||||
@@ -815,8 +849,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
return true
|
||||
}
|
||||
for {
|
||||
success, err := t.controlBuf.executeAndPut(func(it any) bool {
|
||||
return checkForHeaderListSize(it) && checkForStreamQuota(it)
|
||||
success, err := t.controlBuf.executeAndPut(func() bool {
|
||||
return checkForHeaderListSize() && checkForStreamQuota()
|
||||
}, hdr)
|
||||
if err != nil {
|
||||
// Connection closed.
|
||||
@@ -918,16 +952,16 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||||
t.mu.Unlock()
|
||||
if channelz.IsOn() {
|
||||
if eosReceived {
|
||||
atomic.AddInt64(&t.czData.streamsSucceeded, 1)
|
||||
t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
|
||||
} else {
|
||||
atomic.AddInt64(&t.czData.streamsFailed, 1)
|
||||
t.channelz.SocketMetrics.StreamsFailed.Add(1)
|
||||
}
|
||||
}
|
||||
},
|
||||
rst: rst,
|
||||
rstCode: rstCode,
|
||||
}
|
||||
addBackStreamQuota := func(any) bool {
|
||||
addBackStreamQuota := func() bool {
|
||||
t.streamQuota++
|
||||
if t.streamQuota > 0 && t.waitingStreams > 0 {
|
||||
select {
|
||||
@@ -947,7 +981,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||||
|
||||
// Close kicks off the shutdown process of the transport. This should be called
|
||||
// only once on a transport. Once it is called, the transport should not be
|
||||
// accessed any more.
|
||||
// accessed anymore.
|
||||
func (t *http2Client) Close(err error) {
|
||||
t.mu.Lock()
|
||||
// Make sure we only close once.
|
||||
@@ -972,10 +1006,13 @@ func (t *http2Client) Close(err error) {
|
||||
t.kpDormancyCond.Signal()
|
||||
}
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
// Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
|
||||
// connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY.
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
|
||||
<-t.writerDone
|
||||
t.cancel()
|
||||
t.conn.Close()
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
channelz.RemoveEntry(t.channelz.ID)
|
||||
// Append info about previous goaways if there were any, since this may be important
|
||||
// for understanding the root cause for this connection to be closed.
|
||||
_, goAwayDebugMessage := t.GetGoAwayReason()
|
||||
@@ -1080,7 +1117,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
|
||||
// for the transport and the stream based on the current bdp
|
||||
// estimation.
|
||||
func (t *http2Client) updateFlowControl(n uint32) {
|
||||
updateIWS := func(any) bool {
|
||||
updateIWS := func() bool {
|
||||
t.initialWindowSize = int32(n)
|
||||
t.mu.Lock()
|
||||
for _, s := range t.activeStreams {
|
||||
@@ -1233,7 +1270,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
|
||||
}
|
||||
updateFuncs = append(updateFuncs, updateStreamQuota)
|
||||
}
|
||||
t.controlBuf.executeAndPut(func(any) bool {
|
||||
t.controlBuf.executeAndPut(func() bool {
|
||||
for _, f := range updateFuncs {
|
||||
f()
|
||||
}
|
||||
@@ -1321,10 +1358,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
for streamID, stream := range t.activeStreams {
|
||||
if streamID > id && streamID <= upperLimit {
|
||||
// The stream was unprocessed by the server.
|
||||
if streamID > id && streamID <= upperLimit {
|
||||
atomic.StoreUint32(&stream.unprocessed, 1)
|
||||
streamsToClose = append(streamsToClose, stream)
|
||||
}
|
||||
atomic.StoreUint32(&stream.unprocessed, 1)
|
||||
streamsToClose = append(streamsToClose, stream)
|
||||
}
|
||||
}
|
||||
t.mu.Unlock()
|
||||
@@ -1700,7 +1735,7 @@ func (t *http2Client) keepalive() {
|
||||
// keepalive timer expired. In both cases, we need to send a ping.
|
||||
if !outstandingPing {
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
|
||||
}
|
||||
t.controlBuf.put(p)
|
||||
timeoutLeft = t.kp.Timeout
|
||||
@@ -1730,40 +1765,23 @@ func (t *http2Client) GoAway() <-chan struct{} {
|
||||
return t.goAway
|
||||
}
|
||||
|
||||
func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||
s := channelz.SocketInternalMetric{
|
||||
StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
|
||||
StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
|
||||
StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
|
||||
MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
|
||||
MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
|
||||
KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
|
||||
LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
|
||||
LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
|
||||
LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
|
||||
LocalFlowControlWindow: int64(t.fc.getSize()),
|
||||
SocketOptions: channelz.GetSocketOption(t.conn),
|
||||
LocalAddr: t.localAddr,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
// RemoteName :
|
||||
func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
|
||||
return &channelz.EphemeralSocketMetrics{
|
||||
LocalFlowControlWindow: int64(t.fc.getSize()),
|
||||
RemoteFlowControlWindow: t.getOutFlowWindow(),
|
||||
}
|
||||
if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
|
||||
s.Security = au.GetSecurityValue()
|
||||
}
|
||||
s.RemoteFlowControlWindow = t.getOutFlowWindow()
|
||||
return &s
|
||||
}
|
||||
|
||||
func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
|
||||
|
||||
func (t *http2Client) IncrMsgSent() {
|
||||
atomic.AddInt64(&t.czData.msgSent, 1)
|
||||
atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
|
||||
t.channelz.SocketMetrics.MessagesSent.Add(1)
|
||||
t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func (t *http2Client) IncrMsgRecv() {
|
||||
atomic.AddInt64(&t.czData.msgRecv, 1)
|
||||
atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
|
||||
t.channelz.SocketMetrics.MessagesReceived.Add(1)
|
||||
t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func (t *http2Client) getOutFlowWindow() int64 {
|
||||
|
||||
230
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
230
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
@@ -25,6 +25,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@@ -32,18 +33,17 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
@@ -68,18 +68,15 @@ var serverConnectionCounter uint64
|
||||
|
||||
// http2Server implements the ServerTransport interface with HTTP2.
|
||||
type http2Server struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
done chan struct{}
|
||||
conn net.Conn
|
||||
loopy *loopyWriter
|
||||
readerDone chan struct{} // sync point to enable testing.
|
||||
writerDone chan struct{} // sync point to enable testing.
|
||||
remoteAddr net.Addr
|
||||
localAddr net.Addr
|
||||
authInfo credentials.AuthInfo // auth info about the connection
|
||||
inTapHandle tap.ServerInHandle
|
||||
framer *framer
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
done chan struct{}
|
||||
conn net.Conn
|
||||
loopy *loopyWriter
|
||||
readerDone chan struct{} // sync point to enable testing.
|
||||
loopyWriterDone chan struct{}
|
||||
peer peer.Peer
|
||||
inTapHandle tap.ServerInHandle
|
||||
framer *framer
|
||||
// The max number of concurrent streams.
|
||||
maxStreams uint32
|
||||
// controlBuf delivers all the control related tasks (e.g., window
|
||||
@@ -121,8 +118,7 @@ type http2Server struct {
|
||||
idle time.Time
|
||||
|
||||
// Fields below are for channelz metric collection.
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
channelz *channelz.Socket
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
@@ -243,16 +239,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
peer := peer.Peer{
|
||||
Addr: conn.RemoteAddr(),
|
||||
LocalAddr: conn.LocalAddr(),
|
||||
AuthInfo: authInfo,
|
||||
}
|
||||
t := &http2Server{
|
||||
ctx: setConnection(context.Background(), rawConn),
|
||||
done: done,
|
||||
conn: conn,
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
localAddr: conn.LocalAddr(),
|
||||
authInfo: authInfo,
|
||||
peer: peer,
|
||||
framer: framer,
|
||||
readerDone: make(chan struct{}),
|
||||
writerDone: make(chan struct{}),
|
||||
loopyWriterDone: make(chan struct{}),
|
||||
maxStreams: config.MaxStreams,
|
||||
inTapHandle: config.InTapHandle,
|
||||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
@@ -263,12 +261,25 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
idle: time.Now(),
|
||||
kep: kep,
|
||||
initialWindowSize: iwz,
|
||||
czData: new(channelzData),
|
||||
bufferPool: newBufferPool(),
|
||||
}
|
||||
var czSecurity credentials.ChannelzSecurityValue
|
||||
if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
|
||||
czSecurity = au.GetSecurityValue()
|
||||
}
|
||||
t.channelz = channelz.RegisterSocket(
|
||||
&channelz.Socket{
|
||||
SocketType: channelz.SocketTypeNormal,
|
||||
Parent: config.ChannelzParent,
|
||||
SocketMetrics: channelz.SocketMetrics{},
|
||||
EphemeralMetrics: t.socketMetrics,
|
||||
LocalAddr: t.peer.LocalAddr,
|
||||
RemoteAddr: t.peer.Addr,
|
||||
SocketOptions: channelz.GetSocketOption(t.conn),
|
||||
Security: czSecurity,
|
||||
},
|
||||
)
|
||||
t.logger = prefixLoggerForServerTransport(t)
|
||||
// Add peer information to the http2server context.
|
||||
t.ctx = peer.NewContext(t.ctx, t.getPeer())
|
||||
|
||||
t.controlBuf = newControlBuffer(t.done)
|
||||
if dynamicWindow {
|
||||
@@ -277,18 +288,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
for _, sh := range t.stats {
|
||||
t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
})
|
||||
connBegin := &stats.ConnBegin{}
|
||||
sh.HandleConn(t.ctx, connBegin)
|
||||
}
|
||||
t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
|
||||
t.framer.writer.Flush()
|
||||
@@ -331,10 +330,27 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
t.handleSettings(sf)
|
||||
|
||||
go func() {
|
||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
|
||||
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
|
||||
t.loopy.run()
|
||||
close(t.writerDone)
|
||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
|
||||
err := t.loopy.run()
|
||||
close(t.loopyWriterDone)
|
||||
if !isIOError(err) {
|
||||
// Close the connection if a non-I/O error occurs (for I/O errors
|
||||
// the reader will also encounter the error and close). Wait 1
|
||||
// second before closing the connection, or when the reader is done
|
||||
// (i.e. the client already closed the connection or a connection
|
||||
// error occurred). This avoids the potential problem where there
|
||||
// is unread data on the receive side of the connection, which, if
|
||||
// closed, would lead to a TCP RST instead of FIN, and the client
|
||||
// encountering errors. For more info:
|
||||
// https://github.com/grpc/grpc-go/issues/5358
|
||||
timer := time.NewTimer(time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-t.readerDone:
|
||||
case <-timer.C:
|
||||
}
|
||||
t.conn.Close()
|
||||
}
|
||||
}()
|
||||
go t.keepalive()
|
||||
return t, nil
|
||||
@@ -342,7 +358,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
|
||||
// operateHeaders takes action on the decoded headers. Returns an error if fatal
|
||||
// error encountered and transport needs to close, otherwise returns nil.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
|
||||
func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
|
||||
// Acquire max stream ID lock for entire duration
|
||||
t.maxStreamMu.Lock()
|
||||
defer t.maxStreamMu.Unlock()
|
||||
@@ -369,10 +385,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
|
||||
buf := newRecvBuffer()
|
||||
s := &Stream{
|
||||
id: streamID,
|
||||
st: t,
|
||||
buf: buf,
|
||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
id: streamID,
|
||||
st: t,
|
||||
buf: buf,
|
||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
headerWireLength: int(frame.Header().Length),
|
||||
}
|
||||
var (
|
||||
// if false, content-type was missing or invalid
|
||||
@@ -511,9 +528,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
s.state = streamReadDone
|
||||
}
|
||||
if timeoutSet {
|
||||
s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout)
|
||||
s.ctx, s.cancel = context.WithTimeout(ctx, timeout)
|
||||
} else {
|
||||
s.ctx, s.cancel = context.WithCancel(t.ctx)
|
||||
s.ctx, s.cancel = context.WithCancel(ctx)
|
||||
}
|
||||
|
||||
// Attach the received metadata to the context.
|
||||
@@ -586,24 +603,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
}
|
||||
t.mu.Unlock()
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||||
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
|
||||
t.channelz.SocketMetrics.StreamsStarted.Add(1)
|
||||
t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
|
||||
}
|
||||
s.requestRead = func(n int) {
|
||||
t.adjustWindow(s, uint32(n))
|
||||
}
|
||||
for _, sh := range t.stats {
|
||||
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: s.recvCompress,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: mdata.Copy(),
|
||||
}
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.ctxDone = s.ctx.Done()
|
||||
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
|
||||
s.trReader = &transportReader{
|
||||
@@ -629,8 +634,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
// HandleStreams receives incoming streams using the given handler. This is
|
||||
// typically run in a separate goroutine.
|
||||
// traceCtx attaches trace to ctx and returns the new context.
|
||||
func (t *http2Server) HandleStreams(handle func(*Stream)) {
|
||||
defer close(t.readerDone)
|
||||
func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
|
||||
defer func() {
|
||||
close(t.readerDone)
|
||||
<-t.loopyWriterDone
|
||||
}()
|
||||
for {
|
||||
t.controlBuf.throttle()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
@@ -655,18 +663,20 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) {
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
t.Close(err)
|
||||
return
|
||||
}
|
||||
t.Close(err)
|
||||
return
|
||||
}
|
||||
switch frame := frame.(type) {
|
||||
case *http2.MetaHeadersFrame:
|
||||
if err := t.operateHeaders(frame, handle); err != nil {
|
||||
t.Close(err)
|
||||
break
|
||||
if err := t.operateHeaders(ctx, frame, handle); err != nil {
|
||||
// Any error processing client headers, e.g. invalid stream ID,
|
||||
// is considered a protocol violation.
|
||||
t.controlBuf.put(&goAway{
|
||||
code: http2.ErrCodeProtocol,
|
||||
debugData: []byte(err.Error()),
|
||||
closeConn: err,
|
||||
})
|
||||
continue
|
||||
}
|
||||
case *http2.DataFrame:
|
||||
t.handleData(frame)
|
||||
@@ -849,7 +859,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
t.controlBuf.executeAndPut(func(any) bool {
|
||||
t.controlBuf.executeAndPut(func() bool {
|
||||
for _, f := range updateFuncs {
|
||||
f()
|
||||
}
|
||||
@@ -979,7 +989,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
}
|
||||
}
|
||||
if err := t.writeHeaderLocked(s); err != nil {
|
||||
return status.Convert(err).Err()
|
||||
switch e := err.(type) {
|
||||
case ConnectionError:
|
||||
return status.Error(codes.Unavailable, e.Desc)
|
||||
default:
|
||||
return status.Convert(err).Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -998,12 +1013,13 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
}
|
||||
headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
|
||||
success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
|
||||
hf := &headerFrame{
|
||||
streamID: s.id,
|
||||
hf: headerFields,
|
||||
endStream: false,
|
||||
onWrite: t.setResetPingStrikes,
|
||||
})
|
||||
}
|
||||
success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf)
|
||||
if !success {
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1192,12 +1208,12 @@ func (t *http2Server) keepalive() {
|
||||
continue
|
||||
}
|
||||
if outstandingPing && kpTimeoutLeft <= 0 {
|
||||
t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
|
||||
t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout))
|
||||
return
|
||||
}
|
||||
if !outstandingPing {
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
|
||||
}
|
||||
t.controlBuf.put(p)
|
||||
kpTimeoutLeft = t.kp.Timeout
|
||||
@@ -1237,15 +1253,11 @@ func (t *http2Server) Close(err error) {
|
||||
if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
|
||||
t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
|
||||
}
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
channelz.RemoveEntry(t.channelz.ID)
|
||||
// Cancel all active streams.
|
||||
for _, s := range streams {
|
||||
s.cancel()
|
||||
}
|
||||
for _, sh := range t.stats {
|
||||
connEnd := &stats.ConnEnd{}
|
||||
sh.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteStream deletes the stream s from transport's active streams.
|
||||
@@ -1262,9 +1274,9 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
||||
|
||||
if channelz.IsOn() {
|
||||
if eosReceived {
|
||||
atomic.AddInt64(&t.czData.streamsSucceeded, 1)
|
||||
t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
|
||||
} else {
|
||||
atomic.AddInt64(&t.czData.streamsFailed, 1)
|
||||
t.channelz.SocketMetrics.StreamsFailed.Add(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1311,10 +1323,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo
|
||||
})
|
||||
}
|
||||
|
||||
func (t *http2Server) RemoteAddr() net.Addr {
|
||||
return t.remoteAddr
|
||||
}
|
||||
|
||||
func (t *http2Server) Drain(debugData string) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
@@ -1351,6 +1359,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
|
||||
return false, err
|
||||
}
|
||||
t.framer.writer.Flush()
|
||||
if retErr != nil {
|
||||
return false, retErr
|
||||
}
|
||||
@@ -1371,7 +1380,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
go func() {
|
||||
timer := time.NewTimer(time.Minute)
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-t.drainEvent.Done():
|
||||
@@ -1384,38 +1393,21 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||
s := channelz.SocketInternalMetric{
|
||||
StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
|
||||
StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
|
||||
StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
|
||||
MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
|
||||
MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
|
||||
KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
|
||||
LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
|
||||
LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
|
||||
LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
|
||||
LocalFlowControlWindow: int64(t.fc.getSize()),
|
||||
SocketOptions: channelz.GetSocketOption(t.conn),
|
||||
LocalAddr: t.localAddr,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
// RemoteName :
|
||||
func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics {
|
||||
return &channelz.EphemeralSocketMetrics{
|
||||
LocalFlowControlWindow: int64(t.fc.getSize()),
|
||||
RemoteFlowControlWindow: t.getOutFlowWindow(),
|
||||
}
|
||||
if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
|
||||
s.Security = au.GetSecurityValue()
|
||||
}
|
||||
s.RemoteFlowControlWindow = t.getOutFlowWindow()
|
||||
return &s
|
||||
}
|
||||
|
||||
func (t *http2Server) IncrMsgSent() {
|
||||
atomic.AddInt64(&t.czData.msgSent, 1)
|
||||
atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
|
||||
t.channelz.SocketMetrics.MessagesSent.Add(1)
|
||||
t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1)
|
||||
}
|
||||
|
||||
func (t *http2Server) IncrMsgRecv() {
|
||||
atomic.AddInt64(&t.czData.msgRecv, 1)
|
||||
atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
|
||||
t.channelz.SocketMetrics.MessagesReceived.Add(1)
|
||||
t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1)
|
||||
}
|
||||
|
||||
func (t *http2Server) getOutFlowWindow() int64 {
|
||||
@@ -1433,10 +1425,12 @@ func (t *http2Server) getOutFlowWindow() int64 {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Server) getPeer() *peer.Peer {
|
||||
// Peer returns the peer of the transport.
|
||||
func (t *http2Server) Peer() *peer.Peer {
|
||||
return &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
AuthInfo: t.authInfo, // Can be nil
|
||||
Addr: t.peer.Addr,
|
||||
LocalAddr: t.peer.LocalAddr,
|
||||
AuthInfo: t.peer.AuthInfo, // Can be nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1446,7 +1440,7 @@ func getJitter(v time.Duration) time.Duration {
|
||||
}
|
||||
// Generate a jitter between +/- 10% of the value.
|
||||
r := int64(v / 10)
|
||||
j := grpcrand.Int63n(2*r) - r
|
||||
j := rand.Int63n(2*r) - r
|
||||
return time.Duration(j)
|
||||
}
|
||||
|
||||
@@ -1461,6 +1455,6 @@ func GetConnection(ctx context.Context) net.Conn {
|
||||
// SetConnection adds the connection to the context to be able to get
|
||||
// information about the destination ip and port for an incoming RPC. This also
|
||||
// allows any unary or streaming interceptors to see the connection.
|
||||
func setConnection(ctx context.Context, conn net.Conn) context.Context {
|
||||
func SetConnection(ctx context.Context, conn net.Conn) context.Context {
|
||||
return context.WithValue(ctx, connectionKey{}, conn)
|
||||
}
|
||||
|
||||
3
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
3
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
@@ -418,10 +418,9 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
|
||||
return f
|
||||
}
|
||||
|
||||
func getWriteBufferPool(writeBufferSize int) *sync.Pool {
|
||||
func getWriteBufferPool(size int) *sync.Pool {
|
||||
writeBufferMutex.Lock()
|
||||
defer writeBufferMutex.Unlock()
|
||||
size := writeBufferSize * 2
|
||||
pool, ok := writeBufferPoolMap[size]
|
||||
if ok {
|
||||
return pool
|
||||
|
||||
14
vendor/google.golang.org/grpc/internal/transport/proxy.go
generated
vendored
14
vendor/google.golang.org/grpc/internal/transport/proxy.go
generated
vendored
@@ -28,6 +28,8 @@ import (
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
const proxyAuthHeaderKey = "Proxy-Authorization"
|
||||
@@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
|
||||
// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
|
||||
// is necessary, dials, does the HTTP CONNECT handshake, and returns the
|
||||
// connection.
|
||||
func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
|
||||
func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) {
|
||||
newAddr := addr
|
||||
proxyURL, err := mapAddress(addr)
|
||||
if err != nil {
|
||||
@@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn,
|
||||
newAddr = proxyURL.Host
|
||||
}
|
||||
|
||||
conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr)
|
||||
conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
if proxyURL == nil {
|
||||
// proxy is disabled if proxyURL is nil.
|
||||
conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
|
||||
return conn, err
|
||||
}
|
||||
return
|
||||
return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
|
||||
}
|
||||
|
||||
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
|
||||
|
||||
63
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
63
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
@@ -28,6 +28,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -37,6 +38,7 @@ import (
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -265,7 +267,8 @@ type Stream struct {
|
||||
// headerValid indicates whether a valid header was received. Only
|
||||
// meaningful after headerChan is closed (always call waitOnHeader() before
|
||||
// reading its value). Not valid on server side.
|
||||
headerValid bool
|
||||
headerValid bool
|
||||
headerWireLength int // Only set on server side.
|
||||
|
||||
// hdrMu protects header and trailer metadata on the server-side.
|
||||
hdrMu sync.Mutex
|
||||
@@ -301,7 +304,7 @@ func (s *Stream) isHeaderSent() bool {
|
||||
}
|
||||
|
||||
// updateHeaderSent updates headerSent and returns true
|
||||
// if it was alreay set. It is valid only on server-side.
|
||||
// if it was already set. It is valid only on server-side.
|
||||
func (s *Stream) updateHeaderSent() bool {
|
||||
return atomic.SwapUint32(&s.headerSent, 1) == 1
|
||||
}
|
||||
@@ -360,8 +363,12 @@ func (s *Stream) SendCompress() string {
|
||||
|
||||
// ClientAdvertisedCompressors returns the compressor names advertised by the
|
||||
// client via grpc-accept-encoding header.
|
||||
func (s *Stream) ClientAdvertisedCompressors() string {
|
||||
return s.clientAdvertisedCompressors
|
||||
func (s *Stream) ClientAdvertisedCompressors() []string {
|
||||
values := strings.Split(s.clientAdvertisedCompressors, ",")
|
||||
for i, v := range values {
|
||||
values[i] = strings.TrimSpace(v)
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// Done returns a channel which is closed when it receives the final status
|
||||
@@ -425,6 +432,12 @@ func (s *Stream) Context() context.Context {
|
||||
return s.ctx
|
||||
}
|
||||
|
||||
// SetContext sets the context of the stream. This will be deleted once the
|
||||
// stats handler callouts all move to gRPC layer.
|
||||
func (s *Stream) SetContext(ctx context.Context) {
|
||||
s.ctx = ctx
|
||||
}
|
||||
|
||||
// Method returns the method for the stream.
|
||||
func (s *Stream) Method() string {
|
||||
return s.method
|
||||
@@ -437,6 +450,12 @@ func (s *Stream) Status() *status.Status {
|
||||
return s.status
|
||||
}
|
||||
|
||||
// HeaderWireLength returns the size of the headers of the stream as received
|
||||
// from the wire. Valid only on the server.
|
||||
func (s *Stream) HeaderWireLength() int {
|
||||
return s.headerWireLength
|
||||
}
|
||||
|
||||
// SetHeader sets the header metadata. This can be called multiple times.
|
||||
// Server side only.
|
||||
// This should not be called in parallel to other data writes.
|
||||
@@ -552,7 +571,7 @@ type ServerConfig struct {
|
||||
WriteBufferSize int
|
||||
ReadBufferSize int
|
||||
SharedWriteBuffer bool
|
||||
ChannelzParentID *channelz.Identifier
|
||||
ChannelzParent *channelz.Server
|
||||
MaxHeaderListSize *uint32
|
||||
HeaderTableSize *uint32
|
||||
}
|
||||
@@ -587,8 +606,8 @@ type ConnectOptions struct {
|
||||
ReadBufferSize int
|
||||
// SharedWriteBuffer indicates whether connections should reuse write buffer
|
||||
SharedWriteBuffer bool
|
||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||
ChannelzParentID *channelz.Identifier
|
||||
// ChannelzParent sets the addrConn id which initiated the creation of this client transport.
|
||||
ChannelzParent *channelz.SubChannel
|
||||
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
||||
MaxHeaderListSize *uint32
|
||||
// UseProxy specifies if a proxy should be used.
|
||||
@@ -698,7 +717,7 @@ type ClientTransport interface {
|
||||
// Write methods for a given Stream will be called serially.
|
||||
type ServerTransport interface {
|
||||
// HandleStreams receives incoming streams using the given handler.
|
||||
HandleStreams(func(*Stream))
|
||||
HandleStreams(context.Context, func(*Stream))
|
||||
|
||||
// WriteHeader sends the header metadata for the given stream.
|
||||
// WriteHeader may not be called on all streams.
|
||||
@@ -717,8 +736,8 @@ type ServerTransport interface {
|
||||
// handlers will be terminated asynchronously.
|
||||
Close(err error)
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
RemoteAddr() net.Addr
|
||||
// Peer returns the peer of the server transport.
|
||||
Peer() *peer.Peer
|
||||
|
||||
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
||||
Drain(debugData string)
|
||||
@@ -801,30 +820,6 @@ const (
|
||||
GoAwayTooManyPings GoAwayReason = 2
|
||||
)
|
||||
|
||||
// channelzData is used to store channelz related data for http2Client and http2Server.
|
||||
// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
|
||||
// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
|
||||
// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
|
||||
type channelzData struct {
|
||||
kpCount int64
|
||||
// The number of streams that have started, including already finished ones.
|
||||
streamsStarted int64
|
||||
// Client side: The number of streams that have ended successfully by receiving
|
||||
// EoS bit set frame from server.
|
||||
// Server side: The number of streams that have ended successfully by sending
|
||||
// frame with EoS bit set.
|
||||
streamsSucceeded int64
|
||||
streamsFailed int64
|
||||
// lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
|
||||
// instead of time.Time since it's more costly to atomically update time.Time variable than int64
|
||||
// variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
|
||||
lastStreamCreatedTime int64
|
||||
msgSent int64
|
||||
msgRecv int64
|
||||
lastMsgSentTime int64
|
||||
lastMsgRecvTime int64
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a status error.
|
||||
func ContextErr(err error) error {
|
||||
switch err {
|
||||
|
||||
40
vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
generated
vendored
40
vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
generated
vendored
@@ -1,40 +0,0 @@
|
||||
/*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// handshakeClusterNameKey is the type used as the key to store cluster name in
|
||||
// the Attributes field of resolver.Address.
|
||||
type handshakeClusterNameKey struct{}
|
||||
|
||||
// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
|
||||
// is updated with the cluster name.
|
||||
func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
|
||||
addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
|
||||
return addr
|
||||
}
|
||||
|
||||
// GetXDSHandshakeClusterName returns cluster name stored in attr.
|
||||
func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
|
||||
v := attr.Value(handshakeClusterNameKey{})
|
||||
name, ok := v.(string)
|
||||
return name, ok
|
||||
}
|
||||
31
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
31
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
@@ -25,8 +25,14 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.FromOutgoingContextRaw = fromOutgoingContextRaw
|
||||
}
|
||||
|
||||
// DecodeKeyValue returns k, v, nil.
|
||||
//
|
||||
// Deprecated: use k and v directly instead.
|
||||
@@ -153,14 +159,16 @@ func Join(mds ...MD) MD {
|
||||
type mdIncomingKey struct{}
|
||||
type mdOutgoingKey struct{}
|
||||
|
||||
// NewIncomingContext creates a new context with incoming md attached.
|
||||
// NewIncomingContext creates a new context with incoming md attached. md must
|
||||
// not be modified after calling this function.
|
||||
func NewIncomingContext(ctx context.Context, md MD) context.Context {
|
||||
return context.WithValue(ctx, mdIncomingKey{}, md)
|
||||
}
|
||||
|
||||
// NewOutgoingContext creates a new context with outgoing md attached. If used
|
||||
// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
|
||||
// overwrite any previously-appended metadata.
|
||||
// overwrite any previously-appended metadata. md must not be modified after
|
||||
// calling this function.
|
||||
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
|
||||
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
|
||||
}
|
||||
@@ -203,7 +211,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
|
||||
}
|
||||
|
||||
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
|
||||
// key from the incoming metadata if it exists. Key must be lower-case.
|
||||
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
|
||||
// manner.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
@@ -219,33 +228,29 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string {
|
||||
return copyOf(v)
|
||||
}
|
||||
for k, v := range md {
|
||||
// We need to manually convert all keys to lower case, because MD is a
|
||||
// map, and there's no guarantee that the MD attached to the context is
|
||||
// created using our helper functions.
|
||||
if strings.ToLower(k) == key {
|
||||
// Case insenitive comparison: MD is a map, and there's no guarantee
|
||||
// that the MD attached to the context is created using our helper
|
||||
// functions.
|
||||
if strings.EqualFold(k, key) {
|
||||
return copyOf(v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// the returned slice must not be modified in place
|
||||
func copyOf(v []string) []string {
|
||||
vals := make([]string, len(v))
|
||||
copy(vals, v)
|
||||
return vals
|
||||
}
|
||||
|
||||
// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
|
||||
// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
|
||||
//
|
||||
// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
|
||||
// is a map, there's no guarantee it's created using our helper functions) and
|
||||
// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
|
||||
// lowercase).
|
||||
//
|
||||
// This is intended for gRPC-internal use ONLY. Users should use
|
||||
// FromOutgoingContext instead.
|
||||
func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
|
||||
func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
|
||||
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||
if !ok {
|
||||
return nil, nil, false
|
||||
|
||||
32
vendor/google.golang.org/grpc/peer/peer.go
generated
vendored
32
vendor/google.golang.org/grpc/peer/peer.go
generated
vendored
@@ -22,7 +22,9 @@ package peer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
@@ -32,11 +34,41 @@ import (
|
||||
type Peer struct {
|
||||
// Addr is the peer address.
|
||||
Addr net.Addr
|
||||
// LocalAddr is the local address.
|
||||
LocalAddr net.Addr
|
||||
// AuthInfo is the authentication information of the transport.
|
||||
// It is nil if there is no transport security being used.
|
||||
AuthInfo credentials.AuthInfo
|
||||
}
|
||||
|
||||
// String ensures the Peer types implements the Stringer interface in order to
|
||||
// allow to print a context with a peerKey value effectively.
|
||||
func (p *Peer) String() string {
|
||||
if p == nil {
|
||||
return "Peer<nil>"
|
||||
}
|
||||
sb := &strings.Builder{}
|
||||
sb.WriteString("Peer{")
|
||||
if p.Addr != nil {
|
||||
fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String())
|
||||
} else {
|
||||
fmt.Fprintf(sb, "Addr: <nil>, ")
|
||||
}
|
||||
if p.LocalAddr != nil {
|
||||
fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String())
|
||||
} else {
|
||||
fmt.Fprintf(sb, "LocalAddr: <nil>, ")
|
||||
}
|
||||
if p.AuthInfo != nil {
|
||||
fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType())
|
||||
} else {
|
||||
fmt.Fprintf(sb, "AuthInfo: <nil>")
|
||||
}
|
||||
sb.WriteString("}")
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
type peerKey struct{}
|
||||
|
||||
// NewContext creates a new context with peer information attached.
|
||||
|
||||
103
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
103
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
@@ -20,8 +20,9 @@ package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -32,40 +33,43 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// pickerGeneration stores a picker and a channel used to signal that a picker
|
||||
// newer than this one is available.
|
||||
type pickerGeneration struct {
|
||||
// picker is the picker produced by the LB policy. May be nil if a picker
|
||||
// has never been produced.
|
||||
picker balancer.Picker
|
||||
// blockingCh is closed when the picker has been invalidated because there
|
||||
// is a new one available.
|
||||
blockingCh chan struct{}
|
||||
}
|
||||
|
||||
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
||||
// actions and unblock when there's a picker update.
|
||||
type pickerWrapper struct {
|
||||
mu sync.Mutex
|
||||
done bool
|
||||
idle bool
|
||||
blockingCh chan struct{}
|
||||
picker balancer.Picker
|
||||
// If pickerGen holds a nil pointer, the pickerWrapper is closed.
|
||||
pickerGen atomic.Pointer[pickerGeneration]
|
||||
statsHandlers []stats.Handler // to record blocking picker calls
|
||||
}
|
||||
|
||||
func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
|
||||
return &pickerWrapper{
|
||||
blockingCh: make(chan struct{}),
|
||||
pw := &pickerWrapper{
|
||||
statsHandlers: statsHandlers,
|
||||
}
|
||||
pw.pickerGen.Store(&pickerGeneration{
|
||||
blockingCh: make(chan struct{}),
|
||||
})
|
||||
return pw
|
||||
}
|
||||
|
||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||
// updatePicker is called by UpdateState calls from the LB policy. It
|
||||
// unblocks all blocked pick.
|
||||
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
pw.mu.Lock()
|
||||
if pw.done || pw.idle {
|
||||
// There is a small window where a picker update from the LB policy can
|
||||
// race with the channel going to idle mode. If the picker is idle here,
|
||||
// it is because the channel asked it to do so, and therefore it is sage
|
||||
// to ignore the update from the LB policy.
|
||||
pw.mu.Unlock()
|
||||
return
|
||||
}
|
||||
pw.picker = p
|
||||
// pw.blockingCh should never be nil.
|
||||
close(pw.blockingCh)
|
||||
pw.blockingCh = make(chan struct{})
|
||||
pw.mu.Unlock()
|
||||
old := pw.pickerGen.Swap(&pickerGeneration{
|
||||
picker: p,
|
||||
blockingCh: make(chan struct{}),
|
||||
})
|
||||
close(old.blockingCh)
|
||||
}
|
||||
|
||||
// doneChannelzWrapper performs the following:
|
||||
@@ -102,27 +106,24 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||
var lastPickErr error
|
||||
|
||||
for {
|
||||
pw.mu.Lock()
|
||||
if pw.done {
|
||||
pw.mu.Unlock()
|
||||
pg := pw.pickerGen.Load()
|
||||
if pg == nil {
|
||||
return nil, balancer.PickResult{}, ErrClientConnClosing
|
||||
}
|
||||
|
||||
if pw.picker == nil {
|
||||
ch = pw.blockingCh
|
||||
if pg.picker == nil {
|
||||
ch = pg.blockingCh
|
||||
}
|
||||
if ch == pw.blockingCh {
|
||||
if ch == pg.blockingCh {
|
||||
// This could happen when either:
|
||||
// - pw.picker is nil (the previous if condition), or
|
||||
// - has called pick on the current picker.
|
||||
pw.mu.Unlock()
|
||||
// - we have already called pick on the current picker.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
var errStr string
|
||||
if lastPickErr != nil {
|
||||
errStr = "latest balancer error: " + lastPickErr.Error()
|
||||
} else {
|
||||
errStr = ctx.Err().Error()
|
||||
errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error())
|
||||
}
|
||||
switch ctx.Err() {
|
||||
case context.DeadlineExceeded:
|
||||
@@ -149,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||
}
|
||||
}
|
||||
|
||||
ch = pw.blockingCh
|
||||
p := pw.picker
|
||||
pw.mu.Unlock()
|
||||
ch = pg.blockingCh
|
||||
p := pg.picker
|
||||
|
||||
pickResult, err := p.Pick(info)
|
||||
if err != nil {
|
||||
@@ -201,32 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||
}
|
||||
|
||||
func (pw *pickerWrapper) close() {
|
||||
pw.mu.Lock()
|
||||
defer pw.mu.Unlock()
|
||||
if pw.done {
|
||||
return
|
||||
}
|
||||
pw.done = true
|
||||
close(pw.blockingCh)
|
||||
old := pw.pickerGen.Swap(nil)
|
||||
close(old.blockingCh)
|
||||
}
|
||||
|
||||
func (pw *pickerWrapper) enterIdleMode() {
|
||||
pw.mu.Lock()
|
||||
defer pw.mu.Unlock()
|
||||
if pw.done {
|
||||
return
|
||||
}
|
||||
pw.idle = true
|
||||
}
|
||||
|
||||
func (pw *pickerWrapper) exitIdleMode() {
|
||||
pw.mu.Lock()
|
||||
defer pw.mu.Unlock()
|
||||
if pw.done {
|
||||
return
|
||||
}
|
||||
pw.blockingCh = make(chan struct{})
|
||||
pw.idle = false
|
||||
// reset clears the pickerWrapper and prepares it for being used again when idle
|
||||
// mode is exited.
|
||||
func (pw *pickerWrapper) reset() {
|
||||
old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})})
|
||||
close(old.blockingCh)
|
||||
}
|
||||
|
||||
// dropError is a wrapper error that indicates the LB policy wishes to drop the
|
||||
|
||||
6
vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
6
vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
@@ -63,7 +63,7 @@ LEGACY_SOURCES=(
|
||||
|
||||
# Generates only the new gRPC Service symbols
|
||||
SOURCES=(
|
||||
$(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$')
|
||||
$(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$')
|
||||
${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
|
||||
${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
|
||||
${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
|
||||
@@ -93,7 +93,7 @@ Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
|
||||
|
||||
for src in ${SOURCES[@]}; do
|
||||
echo "protoc ${src}"
|
||||
protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \
|
||||
protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \
|
||||
-I"." \
|
||||
-I${WORKDIR}/grpc-proto \
|
||||
-I${WORKDIR}/googleapis \
|
||||
@@ -118,6 +118,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/
|
||||
|
||||
# grpc_testing_not_regenerate/*.pb.go are not re-generated,
|
||||
# see grpc_testing_not_regenerate/README.md for details.
|
||||
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
|
||||
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go
|
||||
|
||||
cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
|
||||
|
||||
60
vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
generated
vendored
Normal file
60
vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package dns implements a dns resolver to be installed as the default resolver
|
||||
// in grpc.
|
||||
package dns
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/internal/resolver/dns"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// SetResolvingTimeout sets the maximum duration for DNS resolution requests.
|
||||
//
|
||||
// This function affects the global timeout used by all channels using the DNS
|
||||
// name resolver scheme.
|
||||
//
|
||||
// It must be called only at application startup, before any gRPC calls are
|
||||
// made. Modifying this value after initialization is not thread-safe.
|
||||
//
|
||||
// The default value is 30 seconds. Setting the timeout too low may result in
|
||||
// premature timeouts during resolution, while setting it too high may lead to
|
||||
// unnecessary delays in service discovery. Choose a value appropriate for your
|
||||
// specific needs and network environment.
|
||||
func SetResolvingTimeout(timeout time.Duration) {
|
||||
dns.ResolvingTimeout = timeout
|
||||
}
|
||||
|
||||
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
|
||||
//
|
||||
// Deprecated: import grpc and use resolver.Get("dns") instead.
|
||||
func NewBuilder() resolver.Builder {
|
||||
return dns.NewBuilder()
|
||||
}
|
||||
|
||||
// SetMinResolutionInterval sets the default minimum interval at which DNS
|
||||
// re-resolutions are allowed. This helps to prevent excessive re-resolution.
|
||||
//
|
||||
// It must be called only at application startup, before any gRPC calls are
|
||||
// made. Modifying this value after initialization is not thread-safe.
|
||||
func SetMinResolutionInterval(d time.Duration) {
|
||||
dns.MinResolutionInterval = d
|
||||
}
|
||||
15
vendor/google.golang.org/grpc/resolver/manual/manual.go
generated
vendored
15
vendor/google.golang.org/grpc/resolver/manual/manual.go
generated
vendored
@@ -78,12 +78,12 @@ func (r *Resolver) InitialState(s resolver.State) {
|
||||
func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
r.BuildCallback(target, cc, opts)
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.CC = cc
|
||||
if r.lastSeenState != nil {
|
||||
err := r.CC.UpdateState(*r.lastSeenState)
|
||||
go r.UpdateStateCallback(err)
|
||||
}
|
||||
r.mu.Unlock()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
@@ -105,15 +105,22 @@ func (r *Resolver) Close() {
|
||||
// UpdateState calls CC.UpdateState.
|
||||
func (r *Resolver) UpdateState(s resolver.State) {
|
||||
r.mu.Lock()
|
||||
err := r.CC.UpdateState(s)
|
||||
defer r.mu.Unlock()
|
||||
var err error
|
||||
if r.CC == nil {
|
||||
panic("cannot update state as grpc.Dial with resolver has not been called")
|
||||
}
|
||||
err = r.CC.UpdateState(s)
|
||||
r.lastSeenState = &s
|
||||
r.mu.Unlock()
|
||||
r.UpdateStateCallback(err)
|
||||
}
|
||||
|
||||
// ReportError calls CC.ReportError.
|
||||
func (r *Resolver) ReportError(err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.CC == nil {
|
||||
panic("cannot report error as grpc.Dial with resolver has not been called")
|
||||
}
|
||||
r.CC.ReportError(err)
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
113
vendor/google.golang.org/grpc/resolver/map.go
generated
vendored
113
vendor/google.golang.org/grpc/resolver/map.go
generated
vendored
@@ -136,3 +136,116 @@ func (a *AddressMap) Values() []any {
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type endpointNode struct {
|
||||
addrs map[string]struct{}
|
||||
}
|
||||
|
||||
// Equal returns whether the unordered set of addrs are the same between the
|
||||
// endpoint nodes.
|
||||
func (en *endpointNode) Equal(en2 *endpointNode) bool {
|
||||
if len(en.addrs) != len(en2.addrs) {
|
||||
return false
|
||||
}
|
||||
for addr := range en.addrs {
|
||||
if _, ok := en2.addrs[addr]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toEndpointNode(endpoint Endpoint) endpointNode {
|
||||
en := make(map[string]struct{})
|
||||
for _, addr := range endpoint.Addresses {
|
||||
en[addr.Addr] = struct{}{}
|
||||
}
|
||||
return endpointNode{
|
||||
addrs: en,
|
||||
}
|
||||
}
|
||||
|
||||
// EndpointMap is a map of endpoints to arbitrary values keyed on only the
|
||||
// unordered set of address strings within an endpoint. This map is not thread
|
||||
// safe, thus it is unsafe to access concurrently. Must be created via
|
||||
// NewEndpointMap; do not construct directly.
|
||||
type EndpointMap struct {
|
||||
endpoints map[*endpointNode]any
|
||||
}
|
||||
|
||||
// NewEndpointMap creates a new EndpointMap.
|
||||
func NewEndpointMap() *EndpointMap {
|
||||
return &EndpointMap{
|
||||
endpoints: make(map[*endpointNode]any),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the value for the address in the map, if present.
|
||||
func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) {
|
||||
en := toEndpointNode(e)
|
||||
if endpoint := em.find(en); endpoint != nil {
|
||||
return em.endpoints[endpoint], true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Set updates or adds the value to the address in the map.
|
||||
func (em *EndpointMap) Set(e Endpoint, value any) {
|
||||
en := toEndpointNode(e)
|
||||
if endpoint := em.find(en); endpoint != nil {
|
||||
em.endpoints[endpoint] = value
|
||||
return
|
||||
}
|
||||
em.endpoints[&en] = value
|
||||
}
|
||||
|
||||
// Len returns the number of entries in the map.
|
||||
func (em *EndpointMap) Len() int {
|
||||
return len(em.endpoints)
|
||||
}
|
||||
|
||||
// Keys returns a slice of all current map keys, as endpoints specifying the
|
||||
// addresses present in the endpoint keys, in which uniqueness is determined by
|
||||
// the unordered set of addresses. Thus, endpoint information returned is not
|
||||
// the full endpoint data (drops duplicated addresses and attributes) but can be
|
||||
// used for EndpointMap accesses.
|
||||
func (em *EndpointMap) Keys() []Endpoint {
|
||||
ret := make([]Endpoint, 0, len(em.endpoints))
|
||||
for en := range em.endpoints {
|
||||
var endpoint Endpoint
|
||||
for addr := range en.addrs {
|
||||
endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr})
|
||||
}
|
||||
ret = append(ret, endpoint)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Values returns a slice of all current map values.
|
||||
func (em *EndpointMap) Values() []any {
|
||||
ret := make([]any, 0, len(em.endpoints))
|
||||
for _, val := range em.endpoints {
|
||||
ret = append(ret, val)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// find returns a pointer to the endpoint node in em if the endpoint node is
|
||||
// already present. If not found, nil is returned. The comparisons are done on
|
||||
// the unordered set of addresses within an endpoint.
|
||||
func (em EndpointMap) find(e endpointNode) *endpointNode {
|
||||
for endpoint := range em.endpoints {
|
||||
if e.Equal(endpoint) {
|
||||
return endpoint
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes the specified endpoint from the map.
|
||||
func (em *EndpointMap) Delete(e Endpoint) {
|
||||
en := toEndpointNode(e)
|
||||
if entry := em.find(en); entry != nil {
|
||||
delete(em.endpoints, entry)
|
||||
}
|
||||
}
|
||||
|
||||
30
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
30
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
@@ -29,6 +29,7 @@ import (
|
||||
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
@@ -63,16 +64,18 @@ func Get(scheme string) Builder {
|
||||
}
|
||||
|
||||
// SetDefaultScheme sets the default scheme that will be used. The default
|
||||
// default scheme is "passthrough".
|
||||
// scheme is initially set to "passthrough".
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. The scheme set last overrides
|
||||
// previously set values.
|
||||
func SetDefaultScheme(scheme string) {
|
||||
defaultScheme = scheme
|
||||
internal.UserSetDefaultScheme = true
|
||||
}
|
||||
|
||||
// GetDefaultScheme gets the default scheme that will be used.
|
||||
// GetDefaultScheme gets the default scheme that will be used by grpc.Dial. If
|
||||
// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead.
|
||||
func GetDefaultScheme() string {
|
||||
return defaultScheme
|
||||
}
|
||||
@@ -168,6 +171,9 @@ type BuildOptions struct {
|
||||
// field. In most cases though, it is not appropriate, and this field may
|
||||
// be ignored.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
// Authority is the effective authority of the clientconn for which the
|
||||
// resolver is built.
|
||||
Authority string
|
||||
}
|
||||
|
||||
// An Endpoint is one network endpoint, or server, which may have multiple
|
||||
@@ -240,11 +246,6 @@ type ClientConn interface {
|
||||
//
|
||||
// Deprecated: Use UpdateState instead.
|
||||
NewAddress(addresses []Address)
|
||||
// NewServiceConfig is called by resolver to notify ClientConn a new
|
||||
// service config. The service config should be provided as a json string.
|
||||
//
|
||||
// Deprecated: Use UpdateState instead.
|
||||
NewServiceConfig(serviceConfig string)
|
||||
// ParseServiceConfig parses the provided service config and returns an
|
||||
// object that provides the parsed config.
|
||||
ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
|
||||
@@ -286,6 +287,11 @@ func (t Target) Endpoint() string {
|
||||
return strings.TrimPrefix(endpoint, "/")
|
||||
}
|
||||
|
||||
// String returns the canonical string representation of Target.
|
||||
func (t Target) String() string {
|
||||
return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint()
|
||||
}
|
||||
|
||||
// Builder creates a resolver that will be used to watch name resolution updates.
|
||||
type Builder interface {
|
||||
// Build creates a new resolver for the given target.
|
||||
@@ -314,3 +320,13 @@ type Resolver interface {
|
||||
// Close closes the resolver.
|
||||
Close()
|
||||
}
|
||||
|
||||
// AuthorityOverrider is implemented by Builders that wish to override the
|
||||
// default authority for the ClientConn.
|
||||
// By default, the authority used is target.Endpoint().
|
||||
type AuthorityOverrider interface {
|
||||
// OverrideAuthority returns the authority to use for a ClientConn with the
|
||||
// given target. The implementation must generate it without blocking,
|
||||
// typically in line, and must keep it unchanged.
|
||||
OverrideAuthority(Target) string
|
||||
}
|
||||
|
||||
247
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
247
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
@@ -1,247 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// resolverStateUpdater wraps the single method used by ccResolverWrapper to
|
||||
// report a state update from the actual resolver implementation.
|
||||
type resolverStateUpdater interface {
|
||||
updateResolverState(s resolver.State, err error) error
|
||||
}
|
||||
|
||||
// ccResolverWrapper is a wrapper on top of cc for resolvers.
|
||||
// It implements resolver.ClientConn interface.
|
||||
type ccResolverWrapper struct {
|
||||
// The following fields are initialized when the wrapper is created and are
|
||||
// read-only afterwards, and therefore can be accessed without a mutex.
|
||||
cc resolverStateUpdater
|
||||
channelzID *channelz.Identifier
|
||||
ignoreServiceConfig bool
|
||||
opts ccResolverWrapperOpts
|
||||
serializer *grpcsync.CallbackSerializer // To serialize all incoming calls.
|
||||
serializerCancel context.CancelFunc // To close the serializer, accessed only from close().
|
||||
|
||||
// All incoming (resolver --> gRPC) calls are guaranteed to execute in a
|
||||
// mutually exclusive manner as they are scheduled on the serializer.
|
||||
// Fields accessed *only* in these serializer callbacks, can therefore be
|
||||
// accessed without a mutex.
|
||||
curState resolver.State
|
||||
|
||||
// mu guards access to the below fields.
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
resolver resolver.Resolver // Accessed only from outgoing calls.
|
||||
}
|
||||
|
||||
// ccResolverWrapperOpts wraps the arguments to be passed when creating a new
|
||||
// ccResolverWrapper.
|
||||
type ccResolverWrapperOpts struct {
|
||||
target resolver.Target // User specified dial target to resolve.
|
||||
builder resolver.Builder // Resolver builder to use.
|
||||
bOpts resolver.BuildOptions // Resolver build options to use.
|
||||
channelzID *channelz.Identifier // Channelz identifier for the channel.
|
||||
}
|
||||
|
||||
// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
|
||||
// returns a ccResolverWrapper object which wraps the newly built resolver.
|
||||
func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ccr := &ccResolverWrapper{
|
||||
cc: cc,
|
||||
channelzID: opts.channelzID,
|
||||
ignoreServiceConfig: opts.bOpts.DisableServiceConfig,
|
||||
opts: opts,
|
||||
serializer: grpcsync.NewCallbackSerializer(ctx),
|
||||
serializerCancel: cancel,
|
||||
}
|
||||
|
||||
// Cannot hold the lock at build time because the resolver can send an
|
||||
// update or error inline and these incoming calls grab the lock to schedule
|
||||
// a callback in the serializer.
|
||||
r, err := opts.builder.Build(opts.target, ccr, opts.bOpts)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Any error reported by the resolver at build time that leads to a
|
||||
// re-resolution request from the balancer is dropped by grpc until we
|
||||
// return from this function. So, we don't have to handle pending resolveNow
|
||||
// requests here.
|
||||
ccr.mu.Lock()
|
||||
ccr.resolver = r
|
||||
ccr.mu.Unlock()
|
||||
|
||||
return ccr, nil
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
|
||||
ccr.mu.Lock()
|
||||
defer ccr.mu.Unlock()
|
||||
|
||||
// ccr.resolver field is set only after the call to Build() returns. But in
|
||||
// the process of building, the resolver may send an error update which when
|
||||
// propagated to the balancer may result in a re-resolution request.
|
||||
if ccr.closed || ccr.resolver == nil {
|
||||
return
|
||||
}
|
||||
ccr.resolver.ResolveNow(o)
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) close() {
|
||||
ccr.mu.Lock()
|
||||
if ccr.closed {
|
||||
ccr.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
channelz.Info(logger, ccr.channelzID, "Closing the name resolver")
|
||||
|
||||
// Close the serializer to ensure that no more calls from the resolver are
|
||||
// handled, before actually closing the resolver.
|
||||
ccr.serializerCancel()
|
||||
ccr.closed = true
|
||||
r := ccr.resolver
|
||||
ccr.mu.Unlock()
|
||||
|
||||
// Give enqueued callbacks a chance to finish.
|
||||
<-ccr.serializer.Done()
|
||||
|
||||
// Spawn a goroutine to close the resolver (since it may block trying to
|
||||
// cleanup all allocated resources) and return early.
|
||||
go r.Close()
|
||||
}
|
||||
|
||||
// serializerScheduleLocked is a convenience method to schedule a function to be
|
||||
// run on the serializer while holding ccr.mu.
|
||||
func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) {
|
||||
ccr.mu.Lock()
|
||||
ccr.serializer.Schedule(f)
|
||||
ccr.mu.Unlock()
|
||||
}
|
||||
|
||||
// UpdateState is called by resolver implementations to report new state to gRPC
|
||||
// which includes addresses and service config.
|
||||
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
|
||||
errCh := make(chan error, 1)
|
||||
if s.Endpoints == nil {
|
||||
s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
|
||||
for _, a := range s.Addresses {
|
||||
ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
|
||||
ep.Addresses[0].BalancerAttributes = nil
|
||||
s.Endpoints = append(s.Endpoints, ep)
|
||||
}
|
||||
}
|
||||
ok := ccr.serializer.Schedule(func(context.Context) {
|
||||
ccr.addChannelzTraceEvent(s)
|
||||
ccr.curState = s
|
||||
if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
|
||||
errCh <- balancer.ErrBadResolverState
|
||||
return
|
||||
}
|
||||
errCh <- nil
|
||||
})
|
||||
if !ok {
|
||||
// The only time when Schedule() fail to add the callback to the
|
||||
// serializer is when the serializer is closed, and this happens only
|
||||
// when the resolver wrapper is closed.
|
||||
return nil
|
||||
}
|
||||
return <-errCh
|
||||
}
|
||||
|
||||
// ReportError is called by resolver implementations to report errors
|
||||
// encountered during name resolution to gRPC.
|
||||
func (ccr *ccResolverWrapper) ReportError(err error) {
|
||||
ccr.serializerScheduleLocked(func(_ context.Context) {
|
||||
channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
|
||||
ccr.cc.updateResolverState(resolver.State{}, err)
|
||||
})
|
||||
}
|
||||
|
||||
// NewAddress is called by the resolver implementation to send addresses to
|
||||
// gRPC.
|
||||
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||
ccr.serializerScheduleLocked(func(_ context.Context) {
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
||||
ccr.curState.Addresses = addrs
|
||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||
})
|
||||
}
|
||||
|
||||
// NewServiceConfig is called by the resolver implementation to send service
|
||||
// configs to gRPC.
|
||||
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||
ccr.serializerScheduleLocked(func(_ context.Context) {
|
||||
channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc)
|
||||
if ccr.ignoreServiceConfig {
|
||||
channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config")
|
||||
return
|
||||
}
|
||||
scpr := parseServiceConfig(sc)
|
||||
if scpr.Err != nil {
|
||||
channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
||||
return
|
||||
}
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
||||
ccr.curState.ServiceConfig = scpr
|
||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||
})
|
||||
}
|
||||
|
||||
// ParseServiceConfig is called by resolver implementations to parse a JSON
|
||||
// representation of the service config.
|
||||
func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
|
||||
return parseServiceConfig(scJSON)
|
||||
}
|
||||
|
||||
// addChannelzTraceEvent adds a channelz trace event containing the new
|
||||
// state received from resolver implementations.
|
||||
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
||||
var updates []string
|
||||
var oldSC, newSC *ServiceConfig
|
||||
var oldOK, newOK bool
|
||||
if ccr.curState.ServiceConfig != nil {
|
||||
oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
|
||||
}
|
||||
if s.ServiceConfig != nil {
|
||||
newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
|
||||
}
|
||||
if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
|
||||
updates = append(updates, "service config updated")
|
||||
}
|
||||
if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
|
||||
updates = append(updates, "resolver returned an empty address list")
|
||||
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
|
||||
updates = append(updates, "resolver returned new addresses")
|
||||
}
|
||||
channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
|
||||
}
|
||||
198
vendor/google.golang.org/grpc/resolver_wrapper.go
generated
vendored
Normal file
198
vendor/google.golang.org/grpc/resolver_wrapper.go
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// ccResolverWrapper is a wrapper on top of cc for resolvers.
|
||||
// It implements resolver.ClientConn interface.
|
||||
type ccResolverWrapper struct {
|
||||
// The following fields are initialized when the wrapper is created and are
|
||||
// read-only afterwards, and therefore can be accessed without a mutex.
|
||||
cc *ClientConn
|
||||
ignoreServiceConfig bool
|
||||
serializer *grpcsync.CallbackSerializer
|
||||
serializerCancel context.CancelFunc
|
||||
|
||||
resolver resolver.Resolver // only accessed within the serializer
|
||||
|
||||
// The following fields are protected by mu. Caller must take cc.mu before
|
||||
// taking mu.
|
||||
mu sync.Mutex
|
||||
curState resolver.State
|
||||
closed bool
|
||||
}
|
||||
|
||||
// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used
|
||||
// after calling start, which builds the resolver.
|
||||
func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
|
||||
ctx, cancel := context.WithCancel(cc.ctx)
|
||||
return &ccResolverWrapper{
|
||||
cc: cc,
|
||||
ignoreServiceConfig: cc.dopts.disableServiceConfig,
|
||||
serializer: grpcsync.NewCallbackSerializer(ctx),
|
||||
serializerCancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// start builds the name resolver using the resolver.Builder in cc and returns
|
||||
// any error encountered. It must always be the first operation performed on
|
||||
// any newly created ccResolverWrapper, except that close may be called instead.
|
||||
func (ccr *ccResolverWrapper) start() error {
|
||||
errCh := make(chan error)
|
||||
ccr.serializer.Schedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
opts := resolver.BuildOptions{
|
||||
DisableServiceConfig: ccr.cc.dopts.disableServiceConfig,
|
||||
DialCreds: ccr.cc.dopts.copts.TransportCredentials,
|
||||
CredsBundle: ccr.cc.dopts.copts.CredsBundle,
|
||||
Dialer: ccr.cc.dopts.copts.Dialer,
|
||||
Authority: ccr.cc.authority,
|
||||
}
|
||||
var err error
|
||||
ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
|
||||
errCh <- err
|
||||
})
|
||||
return <-errCh
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
|
||||
ccr.serializer.Schedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil || ccr.resolver == nil {
|
||||
return
|
||||
}
|
||||
ccr.resolver.ResolveNow(o)
|
||||
})
|
||||
}
|
||||
|
||||
// close initiates async shutdown of the wrapper. To determine the wrapper has
|
||||
// finished shutting down, the channel should block on ccr.serializer.Done()
|
||||
// without cc.mu held.
|
||||
func (ccr *ccResolverWrapper) close() {
|
||||
channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver")
|
||||
ccr.mu.Lock()
|
||||
ccr.closed = true
|
||||
ccr.mu.Unlock()
|
||||
|
||||
ccr.serializer.Schedule(func(context.Context) {
|
||||
if ccr.resolver == nil {
|
||||
return
|
||||
}
|
||||
ccr.resolver.Close()
|
||||
ccr.resolver = nil
|
||||
})
|
||||
ccr.serializerCancel()
|
||||
}
|
||||
|
||||
// UpdateState is called by resolver implementations to report new state to gRPC
|
||||
// which includes addresses and service config.
|
||||
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
|
||||
ccr.cc.mu.Lock()
|
||||
ccr.mu.Lock()
|
||||
if ccr.closed {
|
||||
ccr.mu.Unlock()
|
||||
ccr.cc.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
if s.Endpoints == nil {
|
||||
s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
|
||||
for _, a := range s.Addresses {
|
||||
ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
|
||||
ep.Addresses[0].BalancerAttributes = nil
|
||||
s.Endpoints = append(s.Endpoints, ep)
|
||||
}
|
||||
}
|
||||
ccr.addChannelzTraceEvent(s)
|
||||
ccr.curState = s
|
||||
ccr.mu.Unlock()
|
||||
return ccr.cc.updateResolverStateAndUnlock(s, nil)
|
||||
}
|
||||
|
||||
// ReportError is called by resolver implementations to report errors
|
||||
// encountered during name resolution to gRPC.
|
||||
func (ccr *ccResolverWrapper) ReportError(err error) {
|
||||
ccr.cc.mu.Lock()
|
||||
ccr.mu.Lock()
|
||||
if ccr.closed {
|
||||
ccr.mu.Unlock()
|
||||
ccr.cc.mu.Unlock()
|
||||
return
|
||||
}
|
||||
ccr.mu.Unlock()
|
||||
channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err)
|
||||
ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err)
|
||||
}
|
||||
|
||||
// NewAddress is called by the resolver implementation to send addresses to
|
||||
// gRPC.
|
||||
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||
ccr.cc.mu.Lock()
|
||||
ccr.mu.Lock()
|
||||
if ccr.closed {
|
||||
ccr.mu.Unlock()
|
||||
ccr.cc.mu.Unlock()
|
||||
return
|
||||
}
|
||||
s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}
|
||||
ccr.addChannelzTraceEvent(s)
|
||||
ccr.curState = s
|
||||
ccr.mu.Unlock()
|
||||
ccr.cc.updateResolverStateAndUnlock(s, nil)
|
||||
}
|
||||
|
||||
// ParseServiceConfig is called by resolver implementations to parse a JSON
|
||||
// representation of the service config.
|
||||
func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
|
||||
return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts)
|
||||
}
|
||||
|
||||
// addChannelzTraceEvent adds a channelz trace event containing the new
|
||||
// state received from resolver implementations.
|
||||
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
||||
var updates []string
|
||||
var oldSC, newSC *ServiceConfig
|
||||
var oldOK, newOK bool
|
||||
if ccr.curState.ServiceConfig != nil {
|
||||
oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
|
||||
}
|
||||
if s.ServiceConfig != nil {
|
||||
newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
|
||||
}
|
||||
if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
|
||||
updates = append(updates, "service config updated")
|
||||
}
|
||||
if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
|
||||
updates = append(updates, "resolver returned an empty address list")
|
||||
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
|
||||
updates = append(updates, "resolver returned new addresses")
|
||||
}
|
||||
channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
|
||||
}
|
||||
93
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
93
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
@@ -189,6 +189,20 @@ type EmptyCallOption struct{}
|
||||
func (EmptyCallOption) before(*callInfo) error { return nil }
|
||||
func (EmptyCallOption) after(*callInfo, *csAttempt) {}
|
||||
|
||||
// StaticMethod returns a CallOption which specifies that a call is being made
|
||||
// to a method that is static, which means the method is known at compile time
|
||||
// and doesn't change at runtime. This can be used as a signal to stats plugins
|
||||
// that this method is safe to include as a key to a measurement.
|
||||
func StaticMethod() CallOption {
|
||||
return StaticMethodCallOption{}
|
||||
}
|
||||
|
||||
// StaticMethodCallOption is a CallOption that specifies that a call comes
|
||||
// from a static method.
|
||||
type StaticMethodCallOption struct {
|
||||
EmptyCallOption
|
||||
}
|
||||
|
||||
// Header returns a CallOptions that retrieves the header metadata
|
||||
// for a unary RPC.
|
||||
func Header(md *metadata.MD) CallOption {
|
||||
@@ -640,14 +654,18 @@ func encode(c baseCodec, msg any) ([]byte, error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// compress returns the input bytes compressed by compressor or cp. If both
|
||||
// compressors are nil, returns nil.
|
||||
// compress returns the input bytes compressed by compressor or cp.
|
||||
// If both compressors are nil, or if the message has zero length, returns nil,
|
||||
// indicating no compression was done.
|
||||
//
|
||||
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
|
||||
func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
|
||||
if compressor == nil && cp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if len(in) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
wrapErr := func(err error) error {
|
||||
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
||||
}
|
||||
@@ -726,17 +744,19 @@ type payloadInfo struct {
|
||||
uncompressedBytes []byte
|
||||
}
|
||||
|
||||
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
|
||||
pf, buf, err := p.recvMsg(maxReceiveMessageSize)
|
||||
// recvAndDecompress reads a message from the stream, decompressing it if necessary.
|
||||
//
|
||||
// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as
|
||||
// the buffer is no longer needed.
|
||||
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor,
|
||||
) (uncompressedBuf []byte, cancel func(), err error) {
|
||||
pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if payInfo != nil {
|
||||
payInfo.compressedLength = len(buf)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
||||
return nil, st.Err()
|
||||
return nil, nil, st.Err()
|
||||
}
|
||||
|
||||
var size int
|
||||
@@ -744,21 +764,35 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
||||
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
|
||||
// use this decompressor as the default.
|
||||
if dc != nil {
|
||||
buf, err = dc.Do(bytes.NewReader(buf))
|
||||
size = len(buf)
|
||||
uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf))
|
||||
size = len(uncompressedBuf)
|
||||
} else {
|
||||
buf, size, err = decompress(compressor, buf, maxReceiveMessageSize)
|
||||
uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
||||
return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
||||
}
|
||||
if size > maxReceiveMessageSize {
|
||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||
// implementation.
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
||||
return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
||||
}
|
||||
} else {
|
||||
uncompressedBuf = compressedBuf
|
||||
}
|
||||
|
||||
if payInfo != nil {
|
||||
payInfo.compressedLength = len(compressedBuf)
|
||||
payInfo.uncompressedBytes = uncompressedBuf
|
||||
|
||||
cancel = func() {}
|
||||
} else {
|
||||
cancel = func() {
|
||||
p.recvBufferPool.Put(&compressedBuf)
|
||||
}
|
||||
}
|
||||
return buf, nil
|
||||
|
||||
return uncompressedBuf, cancel, nil
|
||||
}
|
||||
|
||||
// Using compressor, decompress d, returning data and size.
|
||||
@@ -778,6 +812,9 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
|
||||
// size is used as an estimate to size the buffer, but we
|
||||
// will read more data if available.
|
||||
// +MinRead so ReadFrom will not reallocate if size is correct.
|
||||
//
|
||||
// TODO: If we ensure that the buffer size is the same as the DecompressedSize,
|
||||
// we can also utilize the recv buffer pool here.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
|
||||
bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
return buf.Bytes(), int(bytesRead), err
|
||||
@@ -793,18 +830,15 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
|
||||
// dc takes precedence over compressor.
|
||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
|
||||
buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
|
||||
buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
if err := c.Unmarshal(buf, m); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
|
||||
}
|
||||
if payInfo != nil {
|
||||
payInfo.uncompressedBytes = buf
|
||||
} else {
|
||||
p.recvBufferPool.Put(&buf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -928,22 +962,9 @@ func setCallInfoCodec(c *callInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
|
||||
// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
|
||||
// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
|
||||
// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
|
||||
type channelzData struct {
|
||||
callsStarted int64
|
||||
callsFailed int64
|
||||
callsSucceeded int64
|
||||
// lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
|
||||
// time.Time since it's more costly to atomically update time.Time variable than int64 variable.
|
||||
lastCallStartedTime int64
|
||||
}
|
||||
|
||||
// The SupportPackageIsVersion variables are referenced from generated protocol
|
||||
// buffer files to ensure compatibility with the gRPC version used. The latest
|
||||
// support package version is 7.
|
||||
// support package version is 9.
|
||||
//
|
||||
// Older versions are kept for compatibility.
|
||||
//
|
||||
@@ -954,6 +975,8 @@ const (
|
||||
SupportPackageIsVersion5 = true
|
||||
SupportPackageIsVersion6 = true
|
||||
SupportPackageIsVersion7 = true
|
||||
SupportPackageIsVersion8 = true
|
||||
SupportPackageIsVersion9 = true
|
||||
)
|
||||
|
||||
const grpcUA = "grpc-go/" + Version
|
||||
|
||||
405
vendor/google.golang.org/grpc/server.go
generated
vendored
405
vendor/google.golang.org/grpc/server.go
generated
vendored
@@ -33,8 +33,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/trace"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/encoding"
|
||||
@@ -70,9 +68,10 @@ func init() {
|
||||
internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
|
||||
return srv.opts.creds
|
||||
}
|
||||
internal.DrainServerTransports = func(srv *Server, addr string) {
|
||||
srv.drainServerTransports(addr)
|
||||
internal.IsRegisteredMethod = func(srv *Server, method string) bool {
|
||||
return srv.isRegisteredMethod(method)
|
||||
}
|
||||
internal.ServerFromContext = serverFromContext
|
||||
internal.AddGlobalServerOptions = func(opt ...ServerOption) {
|
||||
globalServerOptions = append(globalServerOptions, opt...)
|
||||
}
|
||||
@@ -81,6 +80,7 @@ func init() {
|
||||
}
|
||||
internal.BinaryLogger = binaryLogger
|
||||
internal.JoinServerOptions = newJoinServerOption
|
||||
internal.RecvBufferPool = recvBufferPool
|
||||
}
|
||||
|
||||
var statusOK = status.New(codes.OK, "")
|
||||
@@ -129,17 +129,18 @@ type Server struct {
|
||||
drain bool
|
||||
cv *sync.Cond // signaled when connections close for GracefulStop
|
||||
services map[string]*serviceInfo // service name -> service info
|
||||
events trace.EventLog
|
||||
events traceEventLog
|
||||
|
||||
quit *grpcsync.Event
|
||||
done *grpcsync.Event
|
||||
channelzRemoveOnce sync.Once
|
||||
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
||||
serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
|
||||
handlersWG sync.WaitGroup // counts active method handler goroutines
|
||||
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
channelz *channelz.Server
|
||||
|
||||
serverWorkerChannel chan func()
|
||||
serverWorkerChannel chan func()
|
||||
serverWorkerChannelClose func()
|
||||
}
|
||||
|
||||
type serverOptions struct {
|
||||
@@ -170,6 +171,7 @@ type serverOptions struct {
|
||||
headerTableSize *uint32
|
||||
numServerWorkers uint32
|
||||
recvBufferPool SharedBufferPool
|
||||
waitForHandlers bool
|
||||
}
|
||||
|
||||
var defaultServerOptions = serverOptions{
|
||||
@@ -246,11 +248,9 @@ func SharedWriteBuffer(val bool) ServerOption {
|
||||
}
|
||||
|
||||
// WriteBufferSize determines how much data can be batched before doing a write
|
||||
// on the wire. The corresponding memory allocation for this buffer will be
|
||||
// twice the size to keep syscalls low. The default value for this buffer is
|
||||
// 32KB. Zero or negative values will disable the write buffer such that each
|
||||
// write will be on underlying connection.
|
||||
// Note: A Send call may not directly translate to a write.
|
||||
// on the wire. The default value for this buffer is 32KB. Zero or negative
|
||||
// values will disable the write buffer such that each write will be on underlying
|
||||
// connection. Note: A Send call may not directly translate to a write.
|
||||
func WriteBufferSize(s int) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.writeBufferSize = s
|
||||
@@ -527,12 +527,22 @@ func ConnectionTimeout(d time.Duration) ServerOption {
|
||||
})
|
||||
}
|
||||
|
||||
// MaxHeaderListSizeServerOption is a ServerOption that sets the max
|
||||
// (uncompressed) size of header list that the server is prepared to accept.
|
||||
type MaxHeaderListSizeServerOption struct {
|
||||
MaxHeaderListSize uint32
|
||||
}
|
||||
|
||||
func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) {
|
||||
so.maxHeaderListSize = &o.MaxHeaderListSize
|
||||
}
|
||||
|
||||
// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
|
||||
// of header list that the server is prepared to accept.
|
||||
func MaxHeaderListSize(s uint32) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.maxHeaderListSize = &s
|
||||
})
|
||||
return MaxHeaderListSizeServerOption{
|
||||
MaxHeaderListSize: s,
|
||||
}
|
||||
}
|
||||
|
||||
// HeaderTableSize returns a ServerOption that sets the size of dynamic
|
||||
@@ -567,6 +577,21 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForHandlers cause Stop to wait until all outstanding method handlers have
|
||||
// exited before returning. If false, Stop will return as soon as all
|
||||
// connections have closed, but method handlers may still be running. By
|
||||
// default, Stop does not wait for method handlers to return.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WaitForHandlers(w bool) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.waitForHandlers = w
|
||||
})
|
||||
}
|
||||
|
||||
// RecvBufferPool returns a ServerOption that configures the server
|
||||
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||
// on the application's workload, this could result in reduced memory allocation.
|
||||
@@ -578,11 +603,13 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
|
||||
// options are used: StatsHandler, EnableTracing, or binary logging. In such
|
||||
// cases, the shared buffer pool will be ignored.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
|
||||
// v1.60.0 or later.
|
||||
func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||
return recvBufferPool(bufferPool)
|
||||
}
|
||||
|
||||
func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.recvBufferPool = bufferPool
|
||||
})
|
||||
@@ -616,15 +643,14 @@ func (s *Server) serverWorker() {
|
||||
// connections to reduce the time spent overall on runtime.morestack.
|
||||
func (s *Server) initServerWorkers() {
|
||||
s.serverWorkerChannel = make(chan func())
|
||||
s.serverWorkerChannelClose = grpcsync.OnceFunc(func() {
|
||||
close(s.serverWorkerChannel)
|
||||
})
|
||||
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
||||
go s.serverWorker()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) stopServerWorkers() {
|
||||
close(s.serverWorkerChannel)
|
||||
}
|
||||
|
||||
// NewServer creates a gRPC server which has no service registered and has not
|
||||
// started to accept requests yet.
|
||||
func NewServer(opt ...ServerOption) *Server {
|
||||
@@ -642,22 +668,21 @@ func NewServer(opt ...ServerOption) *Server {
|
||||
services: make(map[string]*serviceInfo),
|
||||
quit: grpcsync.NewEvent(),
|
||||
done: grpcsync.NewEvent(),
|
||||
czData: new(channelzData),
|
||||
channelz: channelz.RegisterServer(""),
|
||||
}
|
||||
chainUnaryServerInterceptors(s)
|
||||
chainStreamServerInterceptors(s)
|
||||
s.cv = sync.NewCond(&s.mu)
|
||||
if EnableTracing {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
|
||||
s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
|
||||
}
|
||||
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
s.initServerWorkers()
|
||||
}
|
||||
|
||||
s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
|
||||
channelz.Info(logger, s.channelzID, "Server created")
|
||||
channelz.Info(logger, s.channelz, "Server created")
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -783,20 +808,13 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped")
|
||||
|
||||
type listenSocket struct {
|
||||
net.Listener
|
||||
channelzID *channelz.Identifier
|
||||
}
|
||||
|
||||
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||
return &channelz.SocketInternalMetric{
|
||||
SocketOptions: channelz.GetSocketOption(l.Listener),
|
||||
LocalAddr: l.Listener.Addr(),
|
||||
}
|
||||
channelz *channelz.Socket
|
||||
}
|
||||
|
||||
func (l *listenSocket) Close() error {
|
||||
err := l.Listener.Close()
|
||||
channelz.RemoveEntry(l.channelzID)
|
||||
channelz.Info(logger, l.channelzID, "ListenSocket deleted")
|
||||
channelz.RemoveEntry(l.channelz.ID)
|
||||
channelz.Info(logger, l.channelz, "ListenSocket deleted")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -806,6 +824,18 @@ func (l *listenSocket) Close() error {
|
||||
// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
|
||||
// this method returns.
|
||||
// Serve will return a non-nil error unless Stop or GracefulStop is called.
|
||||
//
|
||||
// Note: All supported releases of Go (as of December 2023) override the OS
|
||||
// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
|
||||
// with OS defaults for keepalive time and interval, callers need to do the
|
||||
// following two things:
|
||||
// - pass a net.Listener created by calling the Listen method on a
|
||||
// net.ListenConfig with the `KeepAlive` field set to a negative value. This
|
||||
// will result in the Go standard library not overriding OS defaults for TCP
|
||||
// keepalive interval and time. But this will also result in the Go standard
|
||||
// library not enabling TCP keepalives by default.
|
||||
// - override the Accept method on the passed in net.Listener and set the
|
||||
// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults.
|
||||
func (s *Server) Serve(lis net.Listener) error {
|
||||
s.mu.Lock()
|
||||
s.printf("serving")
|
||||
@@ -826,7 +856,16 @@ func (s *Server) Serve(lis net.Listener) error {
|
||||
}
|
||||
}()
|
||||
|
||||
ls := &listenSocket{Listener: lis}
|
||||
ls := &listenSocket{
|
||||
Listener: lis,
|
||||
channelz: channelz.RegisterSocket(&channelz.Socket{
|
||||
SocketType: channelz.SocketTypeListen,
|
||||
Parent: s.channelz,
|
||||
RefName: lis.Addr().String(),
|
||||
LocalAddr: lis.Addr(),
|
||||
SocketOptions: channelz.GetSocketOption(lis)},
|
||||
),
|
||||
}
|
||||
s.lis[ls] = true
|
||||
|
||||
defer func() {
|
||||
@@ -838,14 +877,8 @@ func (s *Server) Serve(lis net.Listener) error {
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
|
||||
var err error
|
||||
ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
|
||||
if err != nil {
|
||||
s.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
s.mu.Unlock()
|
||||
channelz.Info(logger, ls.channelzID, "ListenSocket created")
|
||||
channelz.Info(logger, ls.channelz, "ListenSocket created")
|
||||
|
||||
var tempDelay time.Duration // how long to sleep on accept failure
|
||||
for {
|
||||
@@ -913,24 +946,21 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
|
||||
return
|
||||
}
|
||||
|
||||
if cc, ok := rawConn.(interface {
|
||||
PassServerTransport(transport.ServerTransport)
|
||||
}); ok {
|
||||
cc.PassServerTransport(st)
|
||||
}
|
||||
|
||||
if !s.addConn(lisAddr, st) {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
s.serveStreams(st)
|
||||
s.serveStreams(context.Background(), st, rawConn)
|
||||
s.removeConn(lisAddr, st)
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Server) drainServerTransports(addr string) {
|
||||
s.mu.Lock()
|
||||
conns := s.conns[addr]
|
||||
for st := range conns {
|
||||
st.Drain("")
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// newHTTP2Transport sets up a http/2 transport (using the
|
||||
// gRPC http2 server transport in transport/http2_server.go).
|
||||
func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||
@@ -947,7 +977,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||
WriteBufferSize: s.opts.writeBufferSize,
|
||||
ReadBufferSize: s.opts.readBufferSize,
|
||||
SharedWriteBuffer: s.opts.sharedWriteBuffer,
|
||||
ChannelzParentID: s.channelzID,
|
||||
ChannelzParent: s.channelz,
|
||||
MaxHeaderListSize: s.opts.maxHeaderListSize,
|
||||
HeaderTableSize: s.opts.headerTableSize,
|
||||
}
|
||||
@@ -961,7 +991,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||
if err != credentials.ErrConnDispatched {
|
||||
// Don't log on ErrConnDispatched and io.EOF to prevent log spam.
|
||||
if err != io.EOF {
|
||||
channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
}
|
||||
c.Close()
|
||||
}
|
||||
@@ -971,18 +1001,31 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||
return st
|
||||
}
|
||||
|
||||
func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
defer st.Close(errors.New("finished serving streams for the server transport"))
|
||||
var wg sync.WaitGroup
|
||||
func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
|
||||
ctx = transport.SetConnection(ctx, rawConn)
|
||||
ctx = peer.NewContext(ctx, st.Peer())
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: st.Peer().Addr,
|
||||
LocalAddr: st.Peer().LocalAddr,
|
||||
})
|
||||
sh.HandleConn(ctx, &stats.ConnBegin{})
|
||||
}
|
||||
|
||||
defer func() {
|
||||
st.Close(errors.New("finished serving streams for the server transport"))
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
sh.HandleConn(ctx, &stats.ConnEnd{})
|
||||
}
|
||||
}()
|
||||
|
||||
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
|
||||
st.HandleStreams(func(stream *transport.Stream) {
|
||||
wg.Add(1)
|
||||
|
||||
st.HandleStreams(ctx, func(stream *transport.Stream) {
|
||||
s.handlersWG.Add(1)
|
||||
streamQuota.acquire()
|
||||
f := func() {
|
||||
defer streamQuota.release()
|
||||
defer wg.Done()
|
||||
defer s.handlersWG.Done()
|
||||
s.handleStream(st, stream)
|
||||
}
|
||||
|
||||
@@ -996,7 +1039,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
}
|
||||
go f()
|
||||
})
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
var _ http.Handler = (*Server)(nil)
|
||||
@@ -1040,7 +1082,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
defer s.removeConn(listenerAddressForServeHTTP, st)
|
||||
s.serveStreams(st)
|
||||
s.serveStreams(r.Context(), st, nil)
|
||||
}
|
||||
|
||||
func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
|
||||
@@ -1081,37 +1123,28 @@ func (s *Server) removeConn(addr string, st transport.ServerTransport) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
|
||||
return &channelz.ServerInternalMetric{
|
||||
CallsStarted: atomic.LoadInt64(&s.czData.callsStarted),
|
||||
CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded),
|
||||
CallsFailed: atomic.LoadInt64(&s.czData.callsFailed),
|
||||
LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) incrCallsStarted() {
|
||||
atomic.AddInt64(&s.czData.callsStarted, 1)
|
||||
atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
|
||||
s.channelz.ServerMetrics.CallsStarted.Add(1)
|
||||
s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func (s *Server) incrCallsSucceeded() {
|
||||
atomic.AddInt64(&s.czData.callsSucceeded, 1)
|
||||
s.channelz.ServerMetrics.CallsSucceeded.Add(1)
|
||||
}
|
||||
|
||||
func (s *Server) incrCallsFailed() {
|
||||
atomic.AddInt64(&s.czData.callsFailed, 1)
|
||||
s.channelz.ServerMetrics.CallsFailed.Add(1)
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
||||
if err != nil {
|
||||
channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
|
||||
channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
|
||||
return err
|
||||
}
|
||||
compData, err := compress(data, cp, comp)
|
||||
if err != nil {
|
||||
channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err)
|
||||
channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
|
||||
return err
|
||||
}
|
||||
hdr, payload := msgHeader(data, compData)
|
||||
@@ -1302,10 +1335,11 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
if len(shs) != 0 || len(binlogs) != 0 {
|
||||
payInfo = &payloadInfo{}
|
||||
}
|
||||
d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
|
||||
d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
if err != nil {
|
||||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1313,6 +1347,8 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
t.IncrMsgRecv()
|
||||
}
|
||||
df := func(v any) error {
|
||||
defer cancel()
|
||||
|
||||
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
||||
}
|
||||
@@ -1354,7 +1390,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if e := t.WriteStatus(stream, appStatus); e != nil {
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
if len(binlogs) != 0 {
|
||||
if h, _ := stream.Header(); h.Len() > 0 {
|
||||
@@ -1394,7 +1430,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
}
|
||||
if sts, ok := status.FromError(err); ok {
|
||||
if e := t.WriteStatus(stream, sts); e != nil {
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
} else {
|
||||
switch st := err.(type) {
|
||||
@@ -1689,15 +1725,16 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
||||
|
||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
|
||||
ctx := stream.Context()
|
||||
ctx = contextWithServer(ctx, s)
|
||||
var ti *traceInfo
|
||||
if EnableTracing {
|
||||
tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
|
||||
ctx = trace.NewContext(ctx, tr)
|
||||
tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
|
||||
ctx = newTraceContext(ctx, tr)
|
||||
ti = &traceInfo{
|
||||
tr: tr,
|
||||
firstLine: firstLine{
|
||||
client: false,
|
||||
remoteAddr: t.RemoteAddr(),
|
||||
remoteAddr: t.Peer().Addr,
|
||||
},
|
||||
}
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
@@ -1721,7 +1758,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ti.tr.SetError()
|
||||
}
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if ti != nil {
|
||||
ti.tr.Finish()
|
||||
@@ -1731,6 +1768,22 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
service := sm[:pos]
|
||||
method := sm[pos+1:]
|
||||
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
|
||||
sh.HandleRPC(ctx, &stats.InHeader{
|
||||
FullMethod: stream.Method(),
|
||||
RemoteAddr: t.Peer().Addr,
|
||||
LocalAddr: t.Peer().LocalAddr,
|
||||
Compression: stream.RecvCompress(),
|
||||
WireLength: stream.HeaderWireLength(),
|
||||
Header: md,
|
||||
})
|
||||
}
|
||||
// To have calls in stream callouts work. Will delete once all stats handler
|
||||
// calls come from the gRPC layer.
|
||||
stream.SetContext(ctx)
|
||||
|
||||
srv, knownService := s.services[service]
|
||||
if knownService {
|
||||
if md, ok := srv.methods[method]; ok {
|
||||
@@ -1762,7 +1815,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ti.tr.SetError()
|
||||
}
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if ti != nil {
|
||||
ti.tr.Finish()
|
||||
@@ -1820,62 +1873,71 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream
|
||||
// pending RPCs on the client side will get notified by connection
|
||||
// errors.
|
||||
func (s *Server) Stop() {
|
||||
s.quit.Fire()
|
||||
|
||||
defer func() {
|
||||
s.serveWG.Wait()
|
||||
s.done.Fire()
|
||||
}()
|
||||
|
||||
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
|
||||
|
||||
s.mu.Lock()
|
||||
listeners := s.lis
|
||||
s.lis = nil
|
||||
conns := s.conns
|
||||
s.conns = nil
|
||||
// interrupt GracefulStop if Stop and GracefulStop are called concurrently.
|
||||
s.cv.Broadcast()
|
||||
s.mu.Unlock()
|
||||
|
||||
for lis := range listeners {
|
||||
lis.Close()
|
||||
}
|
||||
for _, cs := range conns {
|
||||
for st := range cs {
|
||||
st.Close(errors.New("Server.Stop called"))
|
||||
}
|
||||
}
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
s.stopServerWorkers()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.stop(false)
|
||||
}
|
||||
|
||||
// GracefulStop stops the gRPC server gracefully. It stops the server from
|
||||
// accepting new connections and RPCs and blocks until all the pending RPCs are
|
||||
// finished.
|
||||
func (s *Server) GracefulStop() {
|
||||
s.stop(true)
|
||||
}
|
||||
|
||||
func (s *Server) stop(graceful bool) {
|
||||
s.quit.Fire()
|
||||
defer s.done.Fire()
|
||||
|
||||
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
|
||||
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) })
|
||||
s.mu.Lock()
|
||||
if s.conns == nil {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
s.closeListenersLocked()
|
||||
// Wait for serving threads to be ready to exit. Only then can we be sure no
|
||||
// new conns will be created.
|
||||
s.mu.Unlock()
|
||||
s.serveWG.Wait()
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if graceful {
|
||||
s.drainAllServerTransportsLocked()
|
||||
} else {
|
||||
s.closeServerTransportsLocked()
|
||||
}
|
||||
|
||||
for lis := range s.lis {
|
||||
lis.Close()
|
||||
for len(s.conns) != 0 {
|
||||
s.cv.Wait()
|
||||
}
|
||||
s.lis = nil
|
||||
s.conns = nil
|
||||
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
// Closing the channel (only once, via grpcsync.OnceFunc) after all the
|
||||
// connections have been closed above ensures that there are no
|
||||
// goroutines executing the callback passed to st.HandleStreams (where
|
||||
// the channel is written to).
|
||||
s.serverWorkerChannelClose()
|
||||
}
|
||||
|
||||
if graceful || s.opts.waitForHandlers {
|
||||
s.handlersWG.Wait()
|
||||
}
|
||||
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
}
|
||||
}
|
||||
|
||||
// s.mu must be held by the caller.
|
||||
func (s *Server) closeServerTransportsLocked() {
|
||||
for _, conns := range s.conns {
|
||||
for st := range conns {
|
||||
st.Close(errors.New("Server.Stop called"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// s.mu must be held by the caller.
|
||||
func (s *Server) drainAllServerTransportsLocked() {
|
||||
if !s.drain {
|
||||
for _, conns := range s.conns {
|
||||
for st := range conns {
|
||||
@@ -1884,22 +1946,14 @@ func (s *Server) GracefulStop() {
|
||||
}
|
||||
s.drain = true
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for serving threads to be ready to exit. Only then can we be sure no
|
||||
// new conns will be created.
|
||||
s.mu.Unlock()
|
||||
s.serveWG.Wait()
|
||||
s.mu.Lock()
|
||||
|
||||
for len(s.conns) != 0 {
|
||||
s.cv.Wait()
|
||||
// s.mu must be held by the caller.
|
||||
func (s *Server) closeListenersLocked() {
|
||||
for lis := range s.lis {
|
||||
lis.Close()
|
||||
}
|
||||
s.conns = nil
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.lis = nil
|
||||
}
|
||||
|
||||
// contentSubtype must be lowercase
|
||||
@@ -1913,11 +1967,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
|
||||
}
|
||||
codec := encoding.GetCodec(contentSubtype)
|
||||
if codec == nil {
|
||||
logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
|
||||
return encoding.GetCodec(proto.Name)
|
||||
}
|
||||
return codec
|
||||
}
|
||||
|
||||
type serverKey struct{}
|
||||
|
||||
// serverFromContext gets the Server from the context.
|
||||
func serverFromContext(ctx context.Context) *Server {
|
||||
s, _ := ctx.Value(serverKey{}).(*Server)
|
||||
return s
|
||||
}
|
||||
|
||||
// contextWithServer sets the Server in the context.
|
||||
func contextWithServer(ctx context.Context, server *Server) context.Context {
|
||||
return context.WithValue(ctx, serverKey{}, server)
|
||||
}
|
||||
|
||||
// isRegisteredMethod returns whether the passed in method is registered as a
|
||||
// method on the server. /service/method and service/method will match if the
|
||||
// service and method are registered on the server.
|
||||
func (s *Server) isRegisteredMethod(serviceMethod string) bool {
|
||||
if serviceMethod != "" && serviceMethod[0] == '/' {
|
||||
serviceMethod = serviceMethod[1:]
|
||||
}
|
||||
pos := strings.LastIndex(serviceMethod, "/")
|
||||
if pos == -1 { // Invalid method name syntax.
|
||||
return false
|
||||
}
|
||||
service := serviceMethod[:pos]
|
||||
method := serviceMethod[pos+1:]
|
||||
srv, knownService := s.services[service]
|
||||
if knownService {
|
||||
if _, ok := srv.methods[method]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := srv.streams[method]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SetHeader sets the header metadata to be sent from the server to the client.
|
||||
// The context provided must be the context passed to the server's handler.
|
||||
//
|
||||
@@ -2019,7 +2112,7 @@ func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
|
||||
return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
|
||||
}
|
||||
|
||||
return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil
|
||||
return stream.ClientAdvertisedCompressors(), nil
|
||||
}
|
||||
|
||||
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
|
||||
@@ -2049,17 +2142,9 @@ func Method(ctx context.Context) (string, bool) {
|
||||
return s.Method(), true
|
||||
}
|
||||
|
||||
type channelzServer struct {
|
||||
s *Server
|
||||
}
|
||||
|
||||
func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
|
||||
return c.s.channelzMetric()
|
||||
}
|
||||
|
||||
// validateSendCompressor returns an error when given compressor name cannot be
|
||||
// handled by the server or the client based on the advertised compressors.
|
||||
func validateSendCompressor(name, clientCompressors string) error {
|
||||
func validateSendCompressor(name string, clientCompressors []string) error {
|
||||
if name == encoding.Identity {
|
||||
return nil
|
||||
}
|
||||
@@ -2068,7 +2153,7 @@ func validateSendCompressor(name, clientCompressors string) error {
|
||||
return fmt.Errorf("compressor not registered %q", name)
|
||||
}
|
||||
|
||||
for _, c := range strings.Split(clientCompressors, ",") {
|
||||
for _, c := range clientCompressors {
|
||||
if c == name {
|
||||
return nil // found match
|
||||
}
|
||||
|
||||
69
vendor/google.golang.org/grpc/service_config.go
generated
vendored
69
vendor/google.golang.org/grpc/service_config.go
generated
vendored
@@ -25,8 +25,11 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/pickfirst"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||||
internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
@@ -41,11 +44,6 @@ const maxInt = int(^uint(0) >> 1)
|
||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
||||
type MethodConfig = internalserviceconfig.MethodConfig
|
||||
|
||||
type lbConfig struct {
|
||||
name string
|
||||
cfg serviceconfig.LoadBalancingConfig
|
||||
}
|
||||
|
||||
// ServiceConfig is provided by the service provider and contains parameters for how
|
||||
// clients that connect to the service should behave.
|
||||
//
|
||||
@@ -55,14 +53,9 @@ type lbConfig struct {
|
||||
type ServiceConfig struct {
|
||||
serviceconfig.Config
|
||||
|
||||
// LB is the load balancer the service providers recommends. This is
|
||||
// deprecated; lbConfigs is preferred. If lbConfig and LB are both present,
|
||||
// lbConfig will be used.
|
||||
LB *string
|
||||
|
||||
// lbConfig is the service config's load balancing configuration. If
|
||||
// lbConfig and LB are both present, lbConfig will be used.
|
||||
lbConfig *lbConfig
|
||||
lbConfig serviceconfig.LoadBalancingConfig
|
||||
|
||||
// Methods contains a map for the methods in this service. If there is an
|
||||
// exact match for a method (i.e. /service/method) in the map, use the
|
||||
@@ -164,38 +157,55 @@ type jsonMC struct {
|
||||
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
|
||||
type jsonSC struct {
|
||||
LoadBalancingPolicy *string
|
||||
LoadBalancingConfig *internalserviceconfig.BalancerConfig
|
||||
LoadBalancingConfig *json.RawMessage
|
||||
MethodConfig *[]jsonMC
|
||||
RetryThrottling *retryThrottlingPolicy
|
||||
HealthCheckConfig *healthCheckConfig
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.ParseServiceConfig = parseServiceConfig
|
||||
internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult {
|
||||
return parseServiceConfig(js, defaultMaxCallAttempts)
|
||||
}
|
||||
}
|
||||
func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult {
|
||||
if len(js) == 0 {
|
||||
return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
|
||||
}
|
||||
var rsc jsonSC
|
||||
err := json.Unmarshal([]byte(js), &rsc)
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
sc := ServiceConfig{
|
||||
LB: rsc.LoadBalancingPolicy,
|
||||
Methods: make(map[string]MethodConfig),
|
||||
retryThrottling: rsc.RetryThrottling,
|
||||
healthCheckConfig: rsc.HealthCheckConfig,
|
||||
rawJSONString: js,
|
||||
}
|
||||
if c := rsc.LoadBalancingConfig; c != nil {
|
||||
sc.lbConfig = &lbConfig{
|
||||
name: c.Name,
|
||||
cfg: c.Config,
|
||||
c := rsc.LoadBalancingConfig
|
||||
if c == nil {
|
||||
name := pickfirst.Name
|
||||
if rsc.LoadBalancingPolicy != nil {
|
||||
name = *rsc.LoadBalancingPolicy
|
||||
}
|
||||
if balancer.Get(name) == nil {
|
||||
name = pickfirst.Name
|
||||
}
|
||||
cfg := []map[string]any{{name: struct{}{}}}
|
||||
strCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)}
|
||||
}
|
||||
r := json.RawMessage(strCfg)
|
||||
c = &r
|
||||
}
|
||||
cfg, err := gracefulswitch.ParseConfig(*c)
|
||||
if err != nil {
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
sc.lbConfig = cfg
|
||||
|
||||
if rsc.MethodConfig == nil {
|
||||
return &serviceconfig.ParseResult{Config: &sc}
|
||||
@@ -211,8 +221,8 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
WaitForReady: m.WaitForReady,
|
||||
Timeout: (*time.Duration)(m.Timeout),
|
||||
}
|
||||
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil {
|
||||
logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
if m.MaxRequestMessageBytes != nil {
|
||||
@@ -232,13 +242,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
for i, n := range *m.Name {
|
||||
path, err := n.generatePath()
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||
logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
|
||||
if _, ok := paths[path]; ok {
|
||||
err = errDuplicatedName
|
||||
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||
logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
paths[path] = struct{}{}
|
||||
@@ -257,7 +267,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
return &serviceconfig.ParseResult{Config: &sc}
|
||||
}
|
||||
|
||||
func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) {
|
||||
func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) {
|
||||
if jrp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -271,17 +281,16 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if jrp.MaxAttempts < maxAttempts {
|
||||
maxAttempts = jrp.MaxAttempts
|
||||
}
|
||||
rp := &internalserviceconfig.RetryPolicy{
|
||||
MaxAttempts: jrp.MaxAttempts,
|
||||
MaxAttempts: maxAttempts,
|
||||
InitialBackoff: time.Duration(jrp.InitialBackoff),
|
||||
MaxBackoff: time.Duration(jrp.MaxBackoff),
|
||||
BackoffMultiplier: jrp.BackoffMultiplier,
|
||||
RetryableStatusCodes: make(map[codes.Code]bool),
|
||||
}
|
||||
if rp.MaxAttempts > 5 {
|
||||
// TODO(retry): Make the max maxAttempts configurable.
|
||||
rp.MaxAttempts = 5
|
||||
}
|
||||
for _, code := range jrp.RetryableStatusCodes {
|
||||
rp.RetryableStatusCodes[code] = true
|
||||
}
|
||||
|
||||
10
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
10
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
@@ -73,9 +73,12 @@ func (*PickerUpdated) isRPCStats() {}
|
||||
type InPayload struct {
|
||||
// Client is true if this InPayload is from client side.
|
||||
Client bool
|
||||
// Payload is the payload with original type.
|
||||
// Payload is the payload with original type. This may be modified after
|
||||
// the call to HandleRPC which provides the InPayload returns and must be
|
||||
// copied if needed later.
|
||||
Payload any
|
||||
// Data is the serialized message payload.
|
||||
// Deprecated: Data will be removed in the next release.
|
||||
Data []byte
|
||||
|
||||
// Length is the size of the uncompressed payload data. Does not include any
|
||||
@@ -143,9 +146,12 @@ func (s *InTrailer) isRPCStats() {}
|
||||
type OutPayload struct {
|
||||
// Client is true if this OutPayload is from client side.
|
||||
Client bool
|
||||
// Payload is the payload with original type.
|
||||
// Payload is the payload with original type. This may be modified after
|
||||
// the call to HandleRPC which provides the OutPayload returns and must be
|
||||
// copied if needed later.
|
||||
Payload any
|
||||
// Data is the serialized message payload.
|
||||
// Deprecated: Data will be removed in the next release.
|
||||
Data []byte
|
||||
// Length is the size of the uncompressed payload data. Does not include any
|
||||
// framing (gRPC or HTTP/2).
|
||||
|
||||
18
vendor/google.golang.org/grpc/stream.go
generated
vendored
18
vendor/google.golang.org/grpc/stream.go
generated
vendored
@@ -23,11 +23,11 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/encoding"
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
"google.golang.org/grpc/internal/balancerload"
|
||||
"google.golang.org/grpc/internal/binarylog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
imetadata "google.golang.org/grpc/internal/metadata"
|
||||
iresolver "google.golang.org/grpc/internal/resolver"
|
||||
@@ -48,6 +47,8 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
|
||||
|
||||
// StreamHandler defines the handler called by gRPC server to complete the
|
||||
// execution of a streaming RPC.
|
||||
//
|
||||
@@ -184,7 +185,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||||
// when the RPC completes.
|
||||
opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
|
||||
|
||||
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
||||
if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
|
||||
// validate md
|
||||
if err := imetadata.Validate(md); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@@ -429,7 +430,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
|
||||
var trInfo *traceInfo
|
||||
if EnableTracing {
|
||||
trInfo = &traceInfo{
|
||||
tr: trace.New("grpc.Sent."+methodFamily(method), method),
|
||||
tr: newTrace("grpc.Sent."+methodFamily(method), method),
|
||||
firstLine: firstLine{
|
||||
client: true,
|
||||
},
|
||||
@@ -438,7 +439,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
|
||||
trInfo.firstLine.deadline = time.Until(deadline)
|
||||
}
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
ctx = trace.NewContext(ctx, trInfo.tr)
|
||||
ctx = newTraceContext(ctx, trInfo.tr)
|
||||
}
|
||||
|
||||
if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata {
|
||||
@@ -515,6 +516,7 @@ func (a *csAttempt) newStream() error {
|
||||
return toRPCErr(nse.Err)
|
||||
}
|
||||
a.s = s
|
||||
a.ctx = s.Context()
|
||||
a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
|
||||
return nil
|
||||
}
|
||||
@@ -654,13 +656,13 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
|
||||
if len(sps) == 1 {
|
||||
var e error
|
||||
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
|
||||
channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
|
||||
channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0])
|
||||
cs.retryThrottler.throttle() // This counts as a failure for throttling.
|
||||
return false, err
|
||||
}
|
||||
hasPushback = true
|
||||
} else if len(sps) > 1 {
|
||||
channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
|
||||
channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps)
|
||||
cs.retryThrottler.throttle() // This counts as a failure for throttling.
|
||||
return false, err
|
||||
}
|
||||
@@ -697,7 +699,7 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
|
||||
if max := float64(rp.MaxBackoff); cur > max {
|
||||
cur = max
|
||||
}
|
||||
dur = time.Duration(grpcrand.Int63n(int64(cur)))
|
||||
dur = time.Duration(rand.Int63n(int64(cur)))
|
||||
cs.numRetriesSincePushback++
|
||||
}
|
||||
|
||||
|
||||
152
vendor/google.golang.org/grpc/stream_interfaces.go
generated
vendored
Normal file
152
vendor/google.golang.org/grpc/stream_interfaces.go
generated
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
// ServerStreamingClient represents the client side of a server-streaming (one
|
||||
// request, many responses) RPC. It is generic over the type of the response
|
||||
// message. It is used in generated code.
|
||||
type ServerStreamingClient[Res any] interface {
|
||||
Recv() (*Res, error)
|
||||
ClientStream
|
||||
}
|
||||
|
||||
// ServerStreamingServer represents the server side of a server-streaming (one
|
||||
// request, many responses) RPC. It is generic over the type of the response
|
||||
// message. It is used in generated code.
|
||||
type ServerStreamingServer[Res any] interface {
|
||||
Send(*Res) error
|
||||
ServerStream
|
||||
}
|
||||
|
||||
// ClientStreamingClient represents the client side of a client-streaming (many
|
||||
// requests, one response) RPC. It is generic over both the type of the request
|
||||
// message stream and the type of the unary response message. It is used in
|
||||
// generated code.
|
||||
type ClientStreamingClient[Req any, Res any] interface {
|
||||
Send(*Req) error
|
||||
CloseAndRecv() (*Res, error)
|
||||
ClientStream
|
||||
}
|
||||
|
||||
// ClientStreamingServer represents the server side of a client-streaming (many
|
||||
// requests, one response) RPC. It is generic over both the type of the request
|
||||
// message stream and the type of the unary response message. It is used in
|
||||
// generated code.
|
||||
type ClientStreamingServer[Req any, Res any] interface {
|
||||
Recv() (*Req, error)
|
||||
SendAndClose(*Res) error
|
||||
ServerStream
|
||||
}
|
||||
|
||||
// BidiStreamingClient represents the client side of a bidirectional-streaming
|
||||
// (many requests, many responses) RPC. It is generic over both the type of the
|
||||
// request message stream and the type of the response message stream. It is
|
||||
// used in generated code.
|
||||
type BidiStreamingClient[Req any, Res any] interface {
|
||||
Send(*Req) error
|
||||
Recv() (*Res, error)
|
||||
ClientStream
|
||||
}
|
||||
|
||||
// BidiStreamingServer represents the server side of a bidirectional-streaming
|
||||
// (many requests, many responses) RPC. It is generic over both the type of the
|
||||
// request message stream and the type of the response message stream. It is
|
||||
// used in generated code.
|
||||
type BidiStreamingServer[Req any, Res any] interface {
|
||||
Recv() (*Req, error)
|
||||
Send(*Res) error
|
||||
ServerStream
|
||||
}
|
||||
|
||||
// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient,
|
||||
// and BidiStreamingClient interfaces. It is used in generated code.
|
||||
type GenericClientStream[Req any, Res any] struct {
|
||||
ClientStream
|
||||
}
|
||||
|
||||
var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil)
|
||||
var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
|
||||
var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
|
||||
|
||||
// Send pushes one message into the stream of requests to be consumed by the
|
||||
// server. The type of message which can be sent is determined by the Req type
|
||||
// parameter of the GenericClientStream receiver.
|
||||
func (x *GenericClientStream[Req, Res]) Send(m *Req) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
// Recv reads one message from the stream of responses generated by the server.
|
||||
// The type of the message returned is determined by the Res type parameter
|
||||
// of the GenericClientStream receiver.
|
||||
func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) {
|
||||
m := new(Res)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// CloseAndRecv closes the sending side of the stream, then receives the unary
|
||||
// response from the server. The type of message which it returns is determined
|
||||
// by the Res type parameter of the GenericClientStream receiver.
|
||||
func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) {
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := new(Res)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer,
|
||||
// and BidiStreamingServer interfaces. It is used in generated code.
|
||||
type GenericServerStream[Req any, Res any] struct {
|
||||
ServerStream
|
||||
}
|
||||
|
||||
var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil)
|
||||
var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
|
||||
var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
|
||||
|
||||
// Send pushes one message into the stream of responses to be consumed by the
|
||||
// client. The type of message which can be sent is determined by the Res
|
||||
// type parameter of the serverStreamServer receiver.
|
||||
func (x *GenericServerStream[Req, Res]) Send(m *Res) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
// SendAndClose pushes the unary response to the client. The type of message
|
||||
// which can be sent is determined by the Res type parameter of the
|
||||
// clientStreamServer receiver.
|
||||
func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
// Recv reads one message from the stream of requests generated by the client.
|
||||
// The type of the message returned is determined by the Req type parameter
|
||||
// of the clientStreamServer receiver.
|
||||
func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) {
|
||||
m := new(Req)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
26
vendor/google.golang.org/grpc/trace.go
generated
vendored
26
vendor/google.golang.org/grpc/trace.go
generated
vendored
@@ -26,8 +26,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/trace"
|
||||
)
|
||||
|
||||
// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
|
||||
@@ -44,9 +42,31 @@ func methodFamily(m string) string {
|
||||
return m
|
||||
}
|
||||
|
||||
// traceEventLog mirrors golang.org/x/net/trace.EventLog.
|
||||
//
|
||||
// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
|
||||
type traceEventLog interface {
|
||||
Printf(format string, a ...any)
|
||||
Errorf(format string, a ...any)
|
||||
Finish()
|
||||
}
|
||||
|
||||
// traceLog mirrors golang.org/x/net/trace.Trace.
|
||||
//
|
||||
// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
|
||||
type traceLog interface {
|
||||
LazyLog(x fmt.Stringer, sensitive bool)
|
||||
LazyPrintf(format string, a ...any)
|
||||
SetError()
|
||||
SetRecycler(f func(any))
|
||||
SetTraceInfo(traceID, spanID uint64)
|
||||
SetMaxEvents(m int)
|
||||
Finish()
|
||||
}
|
||||
|
||||
// traceInfo contains tracing information for an RPC.
|
||||
type traceInfo struct {
|
||||
tr trace.Trace
|
||||
tr traceLog
|
||||
firstLine firstLine
|
||||
}
|
||||
|
||||
|
||||
52
vendor/google.golang.org/grpc/trace_notrace.go
generated
vendored
Normal file
52
vendor/google.golang.org/grpc/trace_notrace.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
//go:build grpcnotrace
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in
|
||||
// turn enables binaries using gRPC-Go for dead code elimination, which can
|
||||
// yield 10-15% improvements in binary size when tracing is not needed.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type notrace struct{}
|
||||
|
||||
func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {}
|
||||
func (notrace) LazyPrintf(format string, a ...any) {}
|
||||
func (notrace) SetError() {}
|
||||
func (notrace) SetRecycler(f func(any)) {}
|
||||
func (notrace) SetTraceInfo(traceID, spanID uint64) {}
|
||||
func (notrace) SetMaxEvents(m int) {}
|
||||
func (notrace) Finish() {}
|
||||
|
||||
func newTrace(family, title string) traceLog {
|
||||
return notrace{}
|
||||
}
|
||||
|
||||
func newTraceContext(ctx context.Context, tr traceLog) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func newTraceEventLog(family, title string) traceEventLog {
|
||||
return nil
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
//go:build !grpcnotrace
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,22 +18,22 @@
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"context"
|
||||
|
||||
t "golang.org/x/net/trace"
|
||||
)
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(socket any) *SocketOptionData {
|
||||
c, ok := socket.(syscall.Conn)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
data := &SocketOptionData{}
|
||||
if rawConn, err := c.SyscallConn(); err == nil {
|
||||
rawConn.Control(data.Getsockopt)
|
||||
return data
|
||||
}
|
||||
return nil
|
||||
func newTrace(family, title string) traceLog {
|
||||
return t.New(family, title)
|
||||
}
|
||||
|
||||
func newTraceContext(ctx context.Context, tr traceLog) context.Context {
|
||||
return t.NewContext(ctx, tr)
|
||||
}
|
||||
|
||||
func newTraceEventLog(family, title string) traceEventLog {
|
||||
return t.NewEventLog(family, title)
|
||||
}
|
||||
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
@@ -19,4 +19,4 @@
|
||||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.59.0"
|
||||
const Version = "1.65.0"
|
||||
|
||||
212
vendor/google.golang.org/grpc/vet.sh
generated
vendored
212
vendor/google.golang.org/grpc/vet.sh
generated
vendored
@@ -1,212 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex # Exit on error; debugging enabled.
|
||||
set -o pipefail # Fail a pipe if any sub-command fails.
|
||||
|
||||
# not makes sure the command passed to it does not exit with a return code of 0.
|
||||
not() {
|
||||
# This is required instead of the earlier (! $COMMAND) because subshells and
|
||||
# pipefail don't work the same on Darwin as in Linux.
|
||||
! "$@"
|
||||
}
|
||||
|
||||
die() {
|
||||
echo "$@" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
fail_on_output() {
|
||||
tee /dev/stderr | not read
|
||||
}
|
||||
|
||||
# Check to make sure it's safe to modify the user's git repo.
|
||||
git status --porcelain | fail_on_output
|
||||
|
||||
# Undo any edits made by this script.
|
||||
cleanup() {
|
||||
git reset --hard HEAD
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}"
|
||||
go version
|
||||
|
||||
if [[ "$1" = "-install" ]]; then
|
||||
# Install the pinned versions as defined in module tools.
|
||||
pushd ./test/tools
|
||||
go install \
|
||||
golang.org/x/lint/golint \
|
||||
golang.org/x/tools/cmd/goimports \
|
||||
honnef.co/go/tools/cmd/staticcheck \
|
||||
github.com/client9/misspell/cmd/misspell
|
||||
popd
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
if [[ "${GITHUB_ACTIONS}" = "true" ]]; then
|
||||
PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files.
|
||||
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
|
||||
pushd /home/runner/go
|
||||
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
|
||||
unzip ${PROTOC_FILENAME}
|
||||
bin/protoc --version
|
||||
popd
|
||||
elif not which protoc > /dev/null; then
|
||||
die "Please install protoc into your path"
|
||||
fi
|
||||
fi
|
||||
exit 0
|
||||
elif [[ "$#" -ne 0 ]]; then
|
||||
die "Unknown argument(s): $*"
|
||||
fi
|
||||
|
||||
# - Check that generated proto files are up to date.
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
make proto && git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
fi
|
||||
|
||||
if [[ -n "${VET_ONLY_PROTO}" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# - Ensure all source files contain a copyright message.
|
||||
# (Done in two parts because Darwin "git grep" has broken support for compound
|
||||
# exclusion matches.)
|
||||
(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output
|
||||
|
||||
# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
|
||||
not grep 'func Test[^(]' *_test.go
|
||||
not grep 'func Test[^(]' test/*.go
|
||||
|
||||
# - Do not import x/net/context.
|
||||
not git grep -l 'x/net/context' -- "*.go"
|
||||
|
||||
# - Do not import math/rand for real library code. Use internal/grpcrand for
|
||||
# thread safety.
|
||||
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
|
||||
|
||||
# - Do not use "interface{}"; use "any" instead.
|
||||
git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc'
|
||||
|
||||
# - Do not call grpclog directly. Use grpclog.Component instead.
|
||||
git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
|
||||
|
||||
# - Ensure all ptypes proto packages are renamed when importing.
|
||||
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
|
||||
|
||||
# - Ensure all usages of grpc_testing package are renamed when importing.
|
||||
not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go"
|
||||
|
||||
# - Ensure all xds proto imports are renamed to *pb or *grpc.
|
||||
git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "'
|
||||
|
||||
misspell -error .
|
||||
|
||||
# - gofmt, goimports, golint (with exceptions for generated code), go vet,
|
||||
# go mod tidy.
|
||||
# Perform these checks on each module inside gRPC.
|
||||
for MOD_FILE in $(find . -name 'go.mod'); do
|
||||
MOD_DIR=$(dirname ${MOD_FILE})
|
||||
pushd ${MOD_DIR}
|
||||
go vet -all ./... | fail_on_output
|
||||
gofmt -s -d -l . 2>&1 | fail_on_output
|
||||
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
|
||||
golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
|
||||
|
||||
go mod tidy -compat=1.19
|
||||
git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
popd
|
||||
done
|
||||
|
||||
# - Collection of static analysis checks
|
||||
#
|
||||
# TODO(dfawley): don't use deprecated functions in examples or first-party
|
||||
# plugins.
|
||||
# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs.
|
||||
SC_OUT="$(mktemp)"
|
||||
staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true
|
||||
# Error if anything other than deprecation warnings are printed.
|
||||
not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
|
||||
# Only ignore the following deprecated types/fields/functions.
|
||||
not grep -Fv '.CredsBundle
|
||||
.HeaderMap
|
||||
.Metadata is deprecated: use Attributes
|
||||
.NewAddress
|
||||
.NewServiceConfig
|
||||
.Type is deprecated: use Attributes
|
||||
BuildVersion is deprecated
|
||||
balancer.ErrTransientFailure
|
||||
balancer.Picker
|
||||
extDesc.Filename is deprecated
|
||||
github.com/golang/protobuf/jsonpb is deprecated
|
||||
grpc.CallCustomCodec
|
||||
grpc.Code
|
||||
grpc.Compressor
|
||||
grpc.CustomCodec
|
||||
grpc.Decompressor
|
||||
grpc.MaxMsgSize
|
||||
grpc.MethodConfig
|
||||
grpc.NewGZIPCompressor
|
||||
grpc.NewGZIPDecompressor
|
||||
grpc.RPCCompressor
|
||||
grpc.RPCDecompressor
|
||||
grpc.ServiceConfig
|
||||
grpc.WithCompressor
|
||||
grpc.WithDecompressor
|
||||
grpc.WithDialer
|
||||
grpc.WithMaxMsgSize
|
||||
grpc.WithServiceConfig
|
||||
grpc.WithTimeout
|
||||
http.CloseNotifier
|
||||
info.SecurityVersion
|
||||
proto is deprecated
|
||||
proto.InternalMessageInfo is deprecated
|
||||
proto.EnumName is deprecated
|
||||
proto.ErrInternalBadWireType is deprecated
|
||||
proto.FileDescriptor is deprecated
|
||||
proto.Marshaler is deprecated
|
||||
proto.MessageType is deprecated
|
||||
proto.RegisterEnum is deprecated
|
||||
proto.RegisterFile is deprecated
|
||||
proto.RegisterType is deprecated
|
||||
proto.RegisterExtension is deprecated
|
||||
proto.RegisteredExtension is deprecated
|
||||
proto.RegisteredExtensions is deprecated
|
||||
proto.RegisterMapType is deprecated
|
||||
proto.Unmarshaler is deprecated
|
||||
Target is deprecated: Use the Target field in the BuildOptions instead.
|
||||
xxx_messageInfo_
|
||||
' "${SC_OUT}"
|
||||
|
||||
# - special golint on package comments.
|
||||
lint_package_comment_per_package() {
|
||||
# Number of files in this go package.
|
||||
fileCount=$(go list -f '{{len .GoFiles}}' $1)
|
||||
if [ ${fileCount} -eq 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
# Number of package errors generated by golint.
|
||||
lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment")
|
||||
# golint complains about every file that's missing the package comment. If the
|
||||
# number of files for this package is greater than the number of errors, there's
|
||||
# at least one file with package comment, good. Otherwise, fail.
|
||||
if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then
|
||||
echo "Package $1 (with ${fileCount} files) is missing package comment"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
lint_package_comment() {
|
||||
set +ex
|
||||
|
||||
count=0
|
||||
for i in $(go list ./...); do
|
||||
lint_package_comment_per_package "$i"
|
||||
((count += $?))
|
||||
done
|
||||
|
||||
set -ex
|
||||
return $count
|
||||
}
|
||||
lint_package_comment
|
||||
|
||||
echo SUCCESS
|
||||
Reference in New Issue
Block a user